diff --git a/bundle/schema/README.md b/bundle/schema/README.md new file mode 100644 index 00000000..cd849be0 --- /dev/null +++ b/bundle/schema/README.md @@ -0,0 +1,26 @@ +### Overview + +`docs/bundle_descriptions.json` contains both autogenerated as well as manually written +descriptions for the json schema. Specifically +1. `resources` : almost all descriptions are autogenerated from the OpenAPI spec +2. `environments` : almost all descriptions are copied over from root level entities (eg: `bundle`, `artifacts`) +3. `bundle` : manually editted +4. `include` : manually editted +5. `workspace` : manually editted +6. `artifacts` : manually editted + +These descriptions are rendered in the inline documentation in an IDE + +### SOP: Add schema descriptions for new fields in bundle config + +1. You can autogenerate empty descriptions for the new fields by running +`bricks bundle schema --only-docs > ~/bricks/bundle/schema/docs/bundle_descriptions.json` +2. Manually edit bundle_descriptions.json to add your descriptions +3. Again run `bricks bundle schema --only-docs > ~/bricks/bundle/schema/docs/bundle_descriptions.json` to copy over any applicable descriptions to `environments` +4. push to repo + + +### SOP: Update descriptions in resources from a newer openapi spec + +1. Run `bricks bundle schema --only-docs --openapi PATH_TO_SPEC > ~/bricks/bundle/schema/docs/bundle_descriptions.json` +2. push to repo diff --git a/bundle/schema/bundle_config_docs.yml b/bundle/schema/bundle_config_docs.yml deleted file mode 100644 index eeb102fc..00000000 --- a/bundle/schema/bundle_config_docs.yml +++ /dev/null @@ -1,12 +0,0 @@ -documentation: Root of the bundle config -children: - bundle: - documentation: | - Bundle contains details about this bundle, such as its name, - version of the spec (TODO), default cluster, default warehouse, etc. - children: - environment: - documentation: Environment is set by the mutator that selects the environment. - - artifacts: - documentation: Artifacts contains a description of all code artifacts in this bundle. \ No newline at end of file diff --git a/bundle/schema/docs.go b/bundle/schema/docs.go index 8522b17b..c8ad15a3 100644 --- a/bundle/schema/docs.go +++ b/bundle/schema/docs.go @@ -2,37 +2,109 @@ package schema import ( _ "embed" + "encoding/json" + "fmt" "os" + "reflect" - "gopkg.in/yaml.v3" + "github.com/databricks/bricks/bundle/config" + "github.com/databricks/databricks-sdk-go/openapi" ) +// A subset of Schema struct type Docs struct { - Documentation string `json:"documentation"` - Children map[string]Docs `json:"children"` + Description string `json:"description"` + Properties map[string]*Docs `json:"properties,omitempty"` + Items *Docs `json:"items,omitempty"` + AdditionalProperties *Docs `json:"additionalproperties,omitempty"` } -func LoadDocs(path string) (*Docs, error) { - bytes, err := os.ReadFile(path) - if err != nil { - return nil, err - } - docs := Docs{} - err = yaml.Unmarshal(bytes, &docs) - if err != nil { - return nil, err - } - return &docs, nil -} - -//go:embed bundle_config_docs.yml +//go:embed docs/bundle_descriptions.json var bundleDocs []byte -func GetBundleDocs() (*Docs, error) { - docs := Docs{} - err := yaml.Unmarshal(bundleDocs, &docs) +func BundleDocs(openapiSpecPath string) (*Docs, error) { + docs, err := initializeBundleDocs() if err != nil { return nil, err } - return &docs, nil + if openapiSpecPath != "" { + openapiSpec, err := os.ReadFile(openapiSpecPath) + if err != nil { + return nil, err + } + spec := &openapi.Specification{} + err = json.Unmarshal(openapiSpec, spec) + if err != nil { + return nil, err + } + openapiReader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + resourcesDocs, err := openapiReader.ResourcesDocs() + if err != nil { + return nil, err + } + resourceSchema, err := New(reflect.TypeOf(config.Resources{}), resourcesDocs) + if err != nil { + return nil, err + } + docs.Properties["resources"] = schemaToDocs(resourceSchema) + } + docs.refreshEnvironmentsDocs() + return docs, nil +} + +func (docs *Docs) refreshEnvironmentsDocs() error { + environmentsDocs, ok := docs.Properties["environments"] + if !ok || environmentsDocs.AdditionalProperties == nil || + environmentsDocs.AdditionalProperties.Properties == nil { + return fmt.Errorf("invalid environments descriptions") + } + environmentProperties := environmentsDocs.AdditionalProperties.Properties + propertiesToCopy := []string{"artifacts", "bundle", "resources", "workspace"} + for _, p := range propertiesToCopy { + environmentProperties[p] = docs.Properties[p] + } + return nil +} + +func initializeBundleDocs() (*Docs, error) { + // load embedded descriptions + embedded := Docs{} + err := json.Unmarshal(bundleDocs, &embedded) + if err != nil { + return nil, err + } + // generate schema with the embedded descriptions + schema, err := New(reflect.TypeOf(config.Root{}), &embedded) + if err != nil { + return nil, err + } + // converting the schema back to docs. This creates empty descriptions + // for any properties that were missing in the embedded descriptions + docs := schemaToDocs(schema) + return docs, nil +} + +// *Docs are a subset of *Schema, this function selects that subset +func schemaToDocs(schema *Schema) *Docs { + // terminate recursion if schema is nil + if schema == nil { + return nil + } + docs := &Docs{ + Description: schema.Description, + } + if len(schema.Properties) > 0 { + docs.Properties = make(map[string]*Docs) + } + for k, v := range schema.Properties { + docs.Properties[k] = schemaToDocs(v) + } + docs.Items = schemaToDocs(schema.Items) + if additionalProperties, ok := schema.AdditionalProperties.(*Schema); ok { + docs.AdditionalProperties = schemaToDocs(additionalProperties) + } + return docs } diff --git a/bundle/schema/docs/bundle_descriptions.json b/bundle/schema/docs/bundle_descriptions.json new file mode 100644 index 00000000..ed2cb72b --- /dev/null +++ b/bundle/schema/docs/bundle_descriptions.json @@ -0,0 +1,2862 @@ +{ + "description": "Root of the bundle config", + "properties": { + "artifacts": { + "description": "Artifacts contains a description of all code artifacts in this bundle.", + "additionalproperties": { + "description": "", + "properties": { + "notebook": { + "description": "", + "properties": { + "language": { + "description": "" + }, + "local_path": { + "description": "" + }, + "path": { + "description": "" + }, + "remote_path": { + "description": "" + } + } + } + } + } + }, + "bundle": { + "description": "Bundle contains details about this bundle, such as its name,\nversion of the spec (TODO), default cluster, default warehouse, etc.\n", + "properties": { + "environment": { + "description": "Environment is set by the mutator that selects the environment." + }, + "name": { + "description": "" + }, + "terraform": { + "description": "", + "properties": { + "exec_path": { + "description": "" + } + } + } + } + }, + "environments": { + "description": "", + "additionalproperties": { + "description": "", + "properties": { + "artifacts": { + "description": "Artifacts contains a description of all code artifacts in this bundle.", + "additionalproperties": { + "description": "", + "properties": { + "notebook": { + "description": "", + "properties": { + "language": { + "description": "" + }, + "local_path": { + "description": "" + }, + "path": { + "description": "" + }, + "remote_path": { + "description": "" + } + } + } + } + } + }, + "bundle": { + "description": "Bundle contains details about this bundle, such as its name,\nversion of the spec (TODO), default cluster, default warehouse, etc.\n", + "properties": { + "environment": { + "description": "Environment is set by the mutator that selects the environment." + }, + "name": { + "description": "" + }, + "terraform": { + "description": "", + "properties": { + "exec_path": { + "description": "" + } + } + } + } + }, + "default": { + "description": "" + }, + "resources": { + "description": "Specification of databricks resources to instantiate", + "properties": { + "jobs": { + "description": "List of job definations", + "additionalproperties": { + "description": "", + "properties": { + "email_notifications": { + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. The default behavior is to not send any emails.", + "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, + "on_failure": { + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `SKIPPED`, `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_start": { + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_success": { + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESSFUL` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + } + } + }, + "format": { + "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." + }, + "git_source": { + "description": "An optional specification for a remote repository containing the notebooks used by this job's notebook tasks.", + "properties": { + "git_branch": { + "description": "Name of the branch to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_tag or git_commit.\n\nThe maximum length is 255 characters.\n" + }, + "git_commit": { + "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag.\nThe maximum length is 64 characters." + }, + "git_provider": { + "description": "Unique identifier of the service used to host the Git repository. The value is case insensitive." + }, + "git_snapshot": { + "description": "", + "properties": { + "used_commit": { + "description": "Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to." + } + } + }, + "git_tag": { + "description": "Name of the tag to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_branch or git_commit.\n\nThe maximum length is 255 characters.\n" + }, + "git_url": { + "description": "URL of the repository to be cloned by this job.\nThe maximum length is 300 characters." + } + } + }, + "id": { + "description": "" + }, + "job_clusters": { + "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", + "items": { + "description": "", + "properties": { + "job_cluster_key": { + "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." + }, + "new_cluster": { + "description": "If new_cluster, a description of a cluster that is created for each task.", + "properties": { + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "autotermination_minutes": { + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either region or endpoint should also be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "cluster_name": { + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" + }, + "cluster_source": { + "description": "" + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" + }, + "enable_elastic_disk": { + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." + }, + "enable_local_disk_encryption": { + "description": "Whether to enable LUKS on cluster VMs' local disks" + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "runtime_engine": { + "description": "" + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "spark_version": { + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + }, + "workload_type": { + "description": "", + "properties": { + "clients": { + "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "properties": { + "jobs": { + "description": "With jobs set, the cluster can be used for jobs" + }, + "notebooks": { + "description": "With notebooks set, this cluster can be used for notebooks" + } + } + } + } + } + } + } + } + } + }, + "max_concurrent_runs": { + "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000\\. Setting this value to 0 causes all new runs to be skipped. The default behavior is to allow only 1 concurrent run." + }, + "name": { + "description": "An optional name for the job." + }, + "schedule": { + "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "properties": { + "pause_status": { + "description": "Indicate whether this schedule is paused or not." + }, + "quartz_cron_expression": { + "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" + }, + "timezone_id": { + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" + } + } + }, + "tags": { + "description": "A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job.", + "additionalproperties": { + "description": "" + } + }, + "tasks": { + "description": "A list of task specifications to be executed by this job.", + "items": { + "description": "", + "properties": { + "dbt_task": { + "description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", + "properties": { + "catalog": { + "description": "Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks \u003e= 1.1.1." + }, + "commands": { + "description": "A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided.", + "items": { + "description": "" + } + }, + "profiles_directory": { + "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." + }, + "project_directory": { + "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." + }, + "schema": { + "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." + }, + "warehouse_id": { + "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." + } + } + }, + "depends_on": { + "description": "", + "items": { + "description": "", + "properties": { + "task_key": { + "description": "" + } + } + } + }, + "description": { + "description": "An optional description for this task.\nThe maximum length is 4096 bytes." + }, + "email_notifications": { + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. The default behavior is to not send any emails.", + "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, + "on_failure": { + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `SKIPPED`, `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_start": { + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_success": { + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESSFUL` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + } + } + }, + "existing_cluster_id": { + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + }, + "job_cluster_key": { + "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." + }, + "libraries": { + "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", + "items": { + "description": "", + "properties": { + "cran": { + "description": "Specification of a CRAN library to be installed as part of the library", + "properties": { + "package": { + "description": "The name of the CRAN package to install." + }, + "repo": { + "description": "The repository where the package can be found. If not specified, the default CRAN repo is used." + } + } + }, + "egg": { + "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "jar": { + "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "maven": { + "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "properties": { + "coordinates": { + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." + }, + "exclusions": { + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "items": { + "description": "" + } + }, + "repo": { + "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." + } + } + }, + "pypi": { + "description": "Specification of a PyPi library to be installed. For example:\n`{ \"package\": \"simplejson\" }`", + "properties": { + "package": { + "description": "The name of the pypi package to install. An optional exact version specification is also\nsupported. Examples: \"simplejson\" and \"simplejson==3.8.0\"." + }, + "repo": { + "description": "The repository where the package can be found. If not specified, the default pip index is\nused." + } + } + }, + "whl": { + "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + } + } + } + }, + "max_retries": { + "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry." + }, + "min_retry_interval_millis": { + "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." + }, + "new_cluster": { + "description": "If new_cluster, a description of a cluster that is created for each task.", + "properties": { + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "autotermination_minutes": { + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either region or endpoint should also be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "cluster_name": { + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" + }, + "cluster_source": { + "description": "" + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" + }, + "enable_elastic_disk": { + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." + }, + "enable_local_disk_encryption": { + "description": "Whether to enable LUKS on cluster VMs' local disks" + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "runtime_engine": { + "description": "" + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "spark_version": { + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + }, + "workload_type": { + "description": "", + "properties": { + "clients": { + "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "properties": { + "jobs": { + "description": "With jobs set, the cluster can be used for jobs" + }, + "notebooks": { + "description": "With notebooks set, this cluster can be used for notebooks" + } + } + } + } + } + } + }, + "notebook_task": { + "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", + "properties": { + "base_parameters": { + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n", + "additionalproperties": { + "description": "" + } + }, + "notebook_path": { + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" + }, + "source": { + "description": "This describes an enum" + } + } + }, + "pipeline_task": { + "description": "If pipeline_task, indicates that this task must execute a Pipeline.", + "properties": { + "full_refresh": { + "description": "If true, a full refresh will be triggered on the delta live table." + }, + "pipeline_id": { + "description": "The full name of the pipeline task to execute." + } + } + }, + "python_wheel_task": { + "description": "If python_wheel_task, indicates that this job must execute a PythonWheel.", + "properties": { + "entry_point": { + "description": "Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()`" + }, + "named_parameters": { + "description": "Command-line parameters passed to Python wheel task in the form of `[\"--name=task\", \"--data=dbfs:/path/to/data.json\"]`. Leave it empty if `parameters` is not null.", + "additionalproperties": { + "description": "" + } + }, + "package_name": { + "description": "Name of the package to execute" + }, + "parameters": { + "description": "Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null.", + "items": { + "description": "" + } + } + } + }, + "retry_on_timeout": { + "description": "An optional policy to specify whether to retry a task when it times out. The default behavior is to not retry on timeout." + }, + "spark_jar_task": { + "description": "If spark_jar_task, indicates that this task must run a JAR.", + "properties": { + "jar_uri": { + "description": "Deprecated since 04/2016\\\\. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" + }, + "main_class_name": { + "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." + }, + "parameters": { + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + } + } + }, + "spark_python_task": { + "description": "If spark_python_task, indicates that this task must run a Python file.", + "properties": { + "parameters": { + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + }, + "python_file": { + "description": "" + } + } + }, + "spark_submit_task": { + "description": "If spark_submit_task, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.", + "properties": { + "parameters": { + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + } + } + }, + "sql_task": { + "description": "If sql_task, indicates that this job must execute a SQL task.", + "properties": { + "alert": { + "description": "If alert, indicates that this job must refresh a SQL alert.", + "properties": { + "alert_id": { + "description": "The canonical identifier of the SQL alert." + }, + "pause_subscriptions": { + "description": "If true, the alert notifications are not sent to subscribers." + }, + "subscriptions": { + "description": "If specified, alert notifications are sent to subscribers.", + "items": { + "description": "", + "properties": { + "destination_id": { + "description": "The canonical identifier of the destination to receive email notification." + }, + "user_name": { + "description": "The user name to receive the subscription email." + } + } + } + } + } + }, + "dashboard": { + "description": "If dashboard, indicates that this job must refresh a SQL dashboard.", + "properties": { + "custom_subject": { + "description": "Subject of the email sent to subscribers of this task." + }, + "dashboard_id": { + "description": "The canonical identifier of the SQL dashboard." + }, + "pause_subscriptions": { + "description": "If true, the dashboard snapshot is not taken, and emails are not sent to subscribers." + }, + "subscriptions": { + "description": "If specified, dashboard snapshots are sent to subscriptions.", + "items": { + "description": "", + "properties": { + "destination_id": { + "description": "The canonical identifier of the destination to receive email notification." + }, + "user_name": { + "description": "The user name to receive the subscription email." + } + } + } + } + } + }, + "parameters": { + "description": "Parameters to be used for each run of this job. The SQL alert task does not support custom parameters.", + "additionalproperties": { + "description": "" + } + }, + "query": { + "description": "If query, indicates that this job must execute a SQL query.", + "properties": { + "query_id": { + "description": "The canonical identifier of the SQL query." + } + } + }, + "warehouse_id": { + "description": "The canonical identifier of the SQL warehouse. Only serverless and pro SQL warehouses are supported." + } + } + }, + "task_key": { + "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset.\nThe maximum length is 100 characters." + }, + "timeout_seconds": { + "description": "An optional timeout applied to each run of this job task. The default behavior is to have no timeout." + } + } + } + }, + "timeout_seconds": { + "description": "An optional timeout applied to each run of this job. The default behavior is to have no timeout." + }, + "webhook_notifications": { + "description": "A collection of system notification IDs to notify when the run begins or completes. The default behavior is to not send any system notifications.", + "properties": { + "on_failure": { + "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_start": { + "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_success": { + "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + } + } + } + } + } + }, + "pipelines": { + "description": "List of pipeline definations", + "additionalproperties": { + "description": "", + "properties": { + "catalog": { + "description": "Catalog in UC to add tables to. If target is specified, tables in this pipeline will be\npublished to a \"target\" schema inside catalog (i.e. \u003ccatalog\u003e.\u003ctarget\u003e.\u003ctable\u003e)." + }, + "channel": { + "description": "DLT Release Channel that specifies which version to use." + }, + "clusters": { + "description": "Cluster settings for this pipeline deployment.", + "items": { + "description": "", + "properties": { + "apply_policy_default_values": { + "description": "Note: This field won't be persisted. Only API users will check this field." + }, + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either region or endpoint should also be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above." + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "label": { + "description": "Cluster label" + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nSee :method:clusters/create for more details.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + } + } + } + }, + "configuration": { + "description": "String-String configuration for this pipeline execution.", + "additionalproperties": { + "description": "" + } + }, + "continuous": { + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." + }, + "development": { + "description": "Whether the pipeline is in Development mode. Defaults to false." + }, + "edition": { + "description": "Pipeline product edition." + }, + "filters": { + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "properties": { + "exclude": { + "description": "Paths to exclude.", + "items": { + "description": "" + } + }, + "include": { + "description": "Paths to include.", + "items": { + "description": "" + } + } + } + }, + "id": { + "description": "Unique identifier for this pipeline." + }, + "libraries": { + "description": "Libraries or code needed by this deployment.", + "items": { + "description": "", + "properties": { + "jar": { + "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "maven": { + "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "properties": { + "coordinates": { + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." + }, + "exclusions": { + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "items": { + "description": "" + } + }, + "repo": { + "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." + } + } + }, + "notebook": { + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\nFor example: `{ \"notebook\" : { \"path\" : \"/my-pipeline-notebook-path\" } }`.\nCurrently, only Scala notebooks are supported, and pipelines must be defined in a package\ncell.", + "properties": { + "path": { + "description": "The absolute path of the notebook." + } + } + }, + "whl": { + "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + } + } + } + }, + "name": { + "description": "Friendly identifier for this pipeline." + }, + "photon": { + "description": "Whether Photon is enabled for this pipeline." + }, + "storage": { + "description": "DBFS root directory for storing checkpoints and tables." + }, + "target": { + "description": "Target schema (database) to add tables in this pipeline to." + }, + "trigger": { + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "properties": { + "cron": { + "description": "", + "properties": { + "quartz_cron_schedule": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "manual": { + "description": "" + } + } + } + } + } + } + } + }, + "workspace": { + "description": "", + "properties": { + "artifact_path": { + "description": "", + "properties": { + "dbfs": { + "description": "" + }, + "workspace": { + "description": "" + } + } + }, + "azure_client_id": { + "description": "" + }, + "azure_environment": { + "description": "" + }, + "azure_login_app_id": { + "description": "" + }, + "azure_tenant_id": { + "description": "" + }, + "azure_use_msi": { + "description": "" + }, + "azure_workspace_resource_id": { + "description": "" + }, + "current_user": { + "description": "", + "properties": { + "active": { + "description": "" + }, + "displayName": { + "description": "" + }, + "emails": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "entitlements": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "externalId": { + "description": "" + }, + "groups": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "id": { + "description": "" + }, + "name": { + "description": "", + "properties": { + "familyName": { + "description": "" + }, + "givenName": { + "description": "" + } + } + }, + "roles": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "userName": { + "description": "" + } + } + }, + "file_path": { + "description": "", + "properties": { + "dbfs": { + "description": "" + }, + "workspace": { + "description": "" + } + } + }, + "google_service_account": { + "description": "" + }, + "host": { + "description": "" + }, + "profile": { + "description": "" + }, + "root": { + "description": "" + }, + "state_path": { + "description": "", + "properties": { + "dbfs": { + "description": "" + }, + "workspace": { + "description": "" + } + } + } + } + } + } + } + }, + "include": { + "description": "", + "items": { + "description": "" + } + }, + "resources": { + "description": "Specification of databricks resources to instantiate", + "properties": { + "jobs": { + "description": "List of job definations", + "additionalproperties": { + "description": "", + "properties": { + "email_notifications": { + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. The default behavior is to not send any emails.", + "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, + "on_failure": { + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `SKIPPED`, `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_start": { + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_success": { + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESSFUL` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + } + } + }, + "format": { + "description": "Used to tell what is the format of the job. This field is ignored in Create/Update/Reset calls. When using the Jobs API 2.1 this value is always set to `\"MULTI_TASK\"`." + }, + "git_source": { + "description": "An optional specification for a remote repository containing the notebooks used by this job's notebook tasks.", + "properties": { + "git_branch": { + "description": "Name of the branch to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_tag or git_commit.\n\nThe maximum length is 255 characters.\n" + }, + "git_commit": { + "description": "Commit to be checked out and used by this job. This field cannot be specified in conjunction with git_branch or git_tag.\nThe maximum length is 64 characters." + }, + "git_provider": { + "description": "Unique identifier of the service used to host the Git repository. The value is case insensitive." + }, + "git_snapshot": { + "description": "", + "properties": { + "used_commit": { + "description": "Commit that was used to execute the run. If git_branch was specified, this points to the HEAD of the branch at the time of the run; if git_tag was specified, this points to the commit the tag points to." + } + } + }, + "git_tag": { + "description": "Name of the tag to be checked out and used by this job.\nThis field cannot be specified in conjunction with git_branch or git_commit.\n\nThe maximum length is 255 characters.\n" + }, + "git_url": { + "description": "URL of the repository to be cloned by this job.\nThe maximum length is 300 characters." + } + } + }, + "id": { + "description": "" + }, + "job_clusters": { + "description": "A list of job cluster specifications that can be shared and reused by tasks of this job. Libraries cannot be declared in a shared job cluster. You must declare dependent libraries in task settings.", + "items": { + "description": "", + "properties": { + "job_cluster_key": { + "description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution." + }, + "new_cluster": { + "description": "If new_cluster, a description of a cluster that is created for each task.", + "properties": { + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "autotermination_minutes": { + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either region or endpoint should also be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "cluster_name": { + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" + }, + "cluster_source": { + "description": "" + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" + }, + "enable_elastic_disk": { + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." + }, + "enable_local_disk_encryption": { + "description": "Whether to enable LUKS on cluster VMs' local disks" + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "runtime_engine": { + "description": "" + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "spark_version": { + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + }, + "workload_type": { + "description": "", + "properties": { + "clients": { + "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "properties": { + "jobs": { + "description": "With jobs set, the cluster can be used for jobs" + }, + "notebooks": { + "description": "With notebooks set, this cluster can be used for notebooks" + } + } + } + } + } + } + } + } + } + }, + "max_concurrent_runs": { + "description": "An optional maximum allowed number of concurrent runs of the job.\n\nSet this value if you want to be able to execute multiple runs of the same job concurrently. This is useful for example if you trigger your job on a frequent schedule and want to allow consecutive runs to overlap with each other, or if you want to trigger multiple runs which differ by their input parameters.\n\nThis setting affects only new runs. For example, suppose the job’s concurrency is 4 and there are 4 concurrent active runs. Then setting the concurrency to 3 won’t kill any of the active runs. However, from then on, new runs are skipped unless there are fewer than 3 active runs.\n\nThis value cannot exceed 1000\\. Setting this value to 0 causes all new runs to be skipped. The default behavior is to allow only 1 concurrent run." + }, + "name": { + "description": "An optional name for the job." + }, + "schedule": { + "description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.", + "properties": { + "pause_status": { + "description": "Indicate whether this schedule is paused or not." + }, + "quartz_cron_expression": { + "description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n" + }, + "timezone_id": { + "description": "A Java timezone ID. The schedule for a job is resolved with respect to this timezone.\nSee [Java TimeZone](https://docs.oracle.com/javase/7/docs/api/java/util/TimeZone.html) for details.\nThis field is required.\n" + } + } + }, + "tags": { + "description": "A map of tags associated with the job. These are forwarded to the cluster as cluster tags for jobs clusters, and are subject to the same limitations as cluster tags. A maximum of 25 tags can be added to the job.", + "additionalproperties": { + "description": "" + } + }, + "tasks": { + "description": "A list of task specifications to be executed by this job.", + "items": { + "description": "", + "properties": { + "dbt_task": { + "description": "If dbt_task, indicates that this must execute a dbt task. It requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.", + "properties": { + "catalog": { + "description": "Optional name of the catalog to use. The value is the top level in the 3-level namespace of Unity Catalog (catalog / schema / relation). The catalog value can only be specified if a warehouse_id is specified. Requires dbt-databricks \u003e= 1.1.1." + }, + "commands": { + "description": "A list of dbt commands to execute. All commands must start with `dbt`. This parameter must not be empty. A maximum of up to 10 commands can be provided.", + "items": { + "description": "" + } + }, + "profiles_directory": { + "description": "Optional (relative) path to the profiles directory. Can only be specified if no warehouse_id is specified. If no warehouse_id is specified and this folder is unset, the root directory is used." + }, + "project_directory": { + "description": "Optional (relative) path to the project directory, if no value is provided, the root of the git repository is used." + }, + "schema": { + "description": "Optional schema to write to. This parameter is only used when a warehouse_id is also provided. If not provided, the `default` schema is used." + }, + "warehouse_id": { + "description": "ID of the SQL warehouse to connect to. If provided, we automatically generate and provide the profile and connection details to dbt. It can be overridden on a per-command basis by using the `--profiles-dir` command line argument." + } + } + }, + "depends_on": { + "description": "", + "items": { + "description": "", + "properties": { + "task_key": { + "description": "" + } + } + } + }, + "description": { + "description": "An optional description for this task.\nThe maximum length is 4096 bytes." + }, + "email_notifications": { + "description": "An optional set of email addresses that is notified when runs of this job begin or complete as well as when this job is deleted. The default behavior is to not send any emails.", + "properties": { + "no_alert_for_skipped_runs": { + "description": "If true, do not send email to recipients specified in `on_failure` if the run is skipped." + }, + "on_failure": { + "description": "A list of email addresses to be notified when a run unsuccessfully completes. A run is considered to have completed unsuccessfully if it ends with an `INTERNAL_ERROR` `life_cycle_state` or a `SKIPPED`, `FAILED`, or `TIMED_OUT` result_state. If this is not specified on job creation, reset, or update the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_start": { + "description": "A list of email addresses to be notified when a run begins. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + }, + "on_success": { + "description": "A list of email addresses to be notified when a run successfully completes. A run is considered to have completed successfully if it ends with a `TERMINATED` `life_cycle_state` and a `SUCCESSFUL` result_state. If not specified on job creation, reset, or update, the list is empty, and notifications are not sent.", + "items": { + "description": "" + } + } + } + }, + "existing_cluster_id": { + "description": "If existing_cluster_id, the ID of an existing cluster that is used for all runs of this task. When running tasks on an existing cluster, you may need to manually restart the cluster if it stops responding. We suggest running jobs on new clusters for greater reliability." + }, + "job_cluster_key": { + "description": "If job_cluster_key, this task is executed reusing the cluster specified in `job.settings.job_clusters`." + }, + "libraries": { + "description": "An optional list of libraries to be installed on the cluster that executes the task. The default value is an empty list.", + "items": { + "description": "", + "properties": { + "cran": { + "description": "Specification of a CRAN library to be installed as part of the library", + "properties": { + "package": { + "description": "The name of the CRAN package to install." + }, + "repo": { + "description": "The repository where the package can be found. If not specified, the default CRAN repo is used." + } + } + }, + "egg": { + "description": "URI of the egg to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"egg\": \"dbfs:/my/egg\" }` or\n`{ \"egg\": \"s3://my-bucket/egg\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "jar": { + "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "maven": { + "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "properties": { + "coordinates": { + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." + }, + "exclusions": { + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "items": { + "description": "" + } + }, + "repo": { + "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." + } + } + }, + "pypi": { + "description": "Specification of a PyPi library to be installed. For example:\n`{ \"package\": \"simplejson\" }`", + "properties": { + "package": { + "description": "The name of the pypi package to install. An optional exact version specification is also\nsupported. Examples: \"simplejson\" and \"simplejson==3.8.0\"." + }, + "repo": { + "description": "The repository where the package can be found. If not specified, the default pip index is\nused." + } + } + }, + "whl": { + "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + } + } + } + }, + "max_retries": { + "description": "An optional maximum number of times to retry an unsuccessful run. A run is considered to be unsuccessful if it completes with the `FAILED` result_state or `INTERNAL_ERROR` `life_cycle_state`. The value -1 means to retry indefinitely and the value 0 means to never retry. The default behavior is to never retry." + }, + "min_retry_interval_millis": { + "description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried." + }, + "new_cluster": { + "description": "If new_cluster, a description of a cluster that is created for each task.", + "properties": { + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "autotermination_minutes": { + "description": "Automatically terminates the cluster after it is inactive for this time in minutes. If not set,\nthis cluster will not be automatically terminated. If specified, the threshold must be between\n10 and 10000 minutes.\nUsers can also set this value to 0 to explicitly disable automatic termination." + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Microsoft Azure.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either region or endpoint should also be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "cluster_name": { + "description": "Cluster name requested by the user. This doesn't have to be unique.\nIf not specified at creation, the cluster name will be an empty string.\n" + }, + "cluster_source": { + "description": "" + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver. Note that this field is optional;\nif unset, the driver node type will be set as the same value\nas `node_type_id` defined above.\n" + }, + "enable_elastic_disk": { + "description": "Autoscaling Local Storage: when enabled, this cluster will dynamically acquire additional disk\nspace when its Spark workers are running low on disk space. This feature requires specific AWS\npermissions to function correctly - refer to the User Guide for more details." + }, + "enable_local_disk_encryption": { + "description": "Whether to enable LUKS on cluster VMs' local disks" + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "runtime_engine": { + "description": "" + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nUsers can also pass in a string of extra JVM options to the driver and the executors via\n`spark.driver.extraJavaOptions` and `spark.executor.extraJavaOptions` respectively.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "spark_version": { + "description": "The Spark version of the cluster, e.g. `3.3.x-scala2.11`.\nA list of available Spark versions can be retrieved by using\nthe :method:clusters/sparkVersions API call.\n" + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + }, + "workload_type": { + "description": "", + "properties": { + "clients": { + "description": " defined what type of clients can use the cluster. E.g. Notebooks, Jobs", + "properties": { + "jobs": { + "description": "With jobs set, the cluster can be used for jobs" + }, + "notebooks": { + "description": "With notebooks set, this cluster can be used for notebooks" + } + } + } + } + } + } + }, + "notebook_task": { + "description": "If notebook_task, indicates that this task must run a notebook. This field may not be specified in conjunction with spark_jar_task.", + "properties": { + "base_parameters": { + "description": "Base parameters to be used for each run of this job. If the run is initiated by a call to\n:method:jobs/runNow with parameters specified, the two parameters maps are merged. If the same key is specified in\n`base_parameters` and in `run-now`, the value from `run-now` is used.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n\nIf the notebook takes a parameter that is not specified in the job’s `base_parameters` or the `run-now` override parameters,\nthe default value from the notebook is used.\n\nRetrieve these parameters in a notebook using [dbutils.widgets.get](https://docs.databricks.com/dev-tools/databricks-utils.html#dbutils-widgets).\n", + "additionalproperties": { + "description": "" + } + }, + "notebook_path": { + "description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n" + }, + "source": { + "description": "This describes an enum" + } + } + }, + "pipeline_task": { + "description": "If pipeline_task, indicates that this task must execute a Pipeline.", + "properties": { + "full_refresh": { + "description": "If true, a full refresh will be triggered on the delta live table." + }, + "pipeline_id": { + "description": "The full name of the pipeline task to execute." + } + } + }, + "python_wheel_task": { + "description": "If python_wheel_task, indicates that this job must execute a PythonWheel.", + "properties": { + "entry_point": { + "description": "Named entry point to use, if it does not exist in the metadata of the package it executes the function from the package directly using `$packageName.$entryPoint()`" + }, + "named_parameters": { + "description": "Command-line parameters passed to Python wheel task in the form of `[\"--name=task\", \"--data=dbfs:/path/to/data.json\"]`. Leave it empty if `parameters` is not null.", + "additionalproperties": { + "description": "" + } + }, + "package_name": { + "description": "Name of the package to execute" + }, + "parameters": { + "description": "Command-line parameters passed to Python wheel task. Leave it empty if `named_parameters` is not null.", + "items": { + "description": "" + } + } + } + }, + "retry_on_timeout": { + "description": "An optional policy to specify whether to retry a task when it times out. The default behavior is to not retry on timeout." + }, + "spark_jar_task": { + "description": "If spark_jar_task, indicates that this task must run a JAR.", + "properties": { + "jar_uri": { + "description": "Deprecated since 04/2016\\\\. Provide a `jar` through the `libraries` field instead. For an example, see :method:jobs/create.\n" + }, + "main_class_name": { + "description": "The full name of the class containing the main method to be executed. This class must be contained in a JAR provided as a library.\n\nThe code must use `SparkContext.getOrCreate` to obtain a Spark context; otherwise, runs of the job fail." + }, + "parameters": { + "description": "Parameters passed to the main method.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + } + } + }, + "spark_python_task": { + "description": "If spark_python_task, indicates that this task must run a Python file.", + "properties": { + "parameters": { + "description": "Command line parameters passed to the Python file.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + }, + "python_file": { + "description": "" + } + } + }, + "spark_submit_task": { + "description": "If spark_submit_task, indicates that this task must be launched by the spark submit script. This task can run only on new clusters.", + "properties": { + "parameters": { + "description": "Command-line parameters passed to spark submit.\n\nUse [Task parameter variables](https://docs.databricks.com/jobs.html#parameter-variables) to set parameters containing information about job runs.\n", + "items": { + "description": "" + } + } + } + }, + "sql_task": { + "description": "If sql_task, indicates that this job must execute a SQL task.", + "properties": { + "alert": { + "description": "If alert, indicates that this job must refresh a SQL alert.", + "properties": { + "alert_id": { + "description": "The canonical identifier of the SQL alert." + }, + "pause_subscriptions": { + "description": "If true, the alert notifications are not sent to subscribers." + }, + "subscriptions": { + "description": "If specified, alert notifications are sent to subscribers.", + "items": { + "description": "", + "properties": { + "destination_id": { + "description": "The canonical identifier of the destination to receive email notification." + }, + "user_name": { + "description": "The user name to receive the subscription email." + } + } + } + } + } + }, + "dashboard": { + "description": "If dashboard, indicates that this job must refresh a SQL dashboard.", + "properties": { + "custom_subject": { + "description": "Subject of the email sent to subscribers of this task." + }, + "dashboard_id": { + "description": "The canonical identifier of the SQL dashboard." + }, + "pause_subscriptions": { + "description": "If true, the dashboard snapshot is not taken, and emails are not sent to subscribers." + }, + "subscriptions": { + "description": "If specified, dashboard snapshots are sent to subscriptions.", + "items": { + "description": "", + "properties": { + "destination_id": { + "description": "The canonical identifier of the destination to receive email notification." + }, + "user_name": { + "description": "The user name to receive the subscription email." + } + } + } + } + } + }, + "parameters": { + "description": "Parameters to be used for each run of this job. The SQL alert task does not support custom parameters.", + "additionalproperties": { + "description": "" + } + }, + "query": { + "description": "If query, indicates that this job must execute a SQL query.", + "properties": { + "query_id": { + "description": "The canonical identifier of the SQL query." + } + } + }, + "warehouse_id": { + "description": "The canonical identifier of the SQL warehouse. Only serverless and pro SQL warehouses are supported." + } + } + }, + "task_key": { + "description": "A unique name for the task. This field is used to refer to this task from other tasks.\nThis field is required and must be unique within its parent job.\nOn Update or Reset, this field is used to reference the tasks to be updated or reset.\nThe maximum length is 100 characters." + }, + "timeout_seconds": { + "description": "An optional timeout applied to each run of this job task. The default behavior is to have no timeout." + } + } + } + }, + "timeout_seconds": { + "description": "An optional timeout applied to each run of this job. The default behavior is to have no timeout." + }, + "webhook_notifications": { + "description": "A collection of system notification IDs to notify when the run begins or completes. The default behavior is to not send any system notifications.", + "properties": { + "on_failure": { + "description": "An optional list of system notification IDs to call when the run fails. A maximum of 3 destinations can be specified for the `on_failure` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_start": { + "description": "An optional list of system notification IDs to call when the run starts. A maximum of 3 destinations can be specified for the `on_start` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + }, + "on_success": { + "description": "An optional list of system notification IDs to call when the run completes successfully. A maximum of 3 destinations can be specified for the `on_success` property.", + "items": { + "description": "", + "properties": { + "id": { + "description": "" + } + } + } + } + } + } + } + } + }, + "pipelines": { + "description": "List of pipeline definations", + "additionalproperties": { + "description": "", + "properties": { + "catalog": { + "description": "Catalog in UC to add tables to. If target is specified, tables in this pipeline will be\npublished to a \"target\" schema inside catalog (i.e. \u003ccatalog\u003e.\u003ctarget\u003e.\u003ctable\u003e)." + }, + "channel": { + "description": "DLT Release Channel that specifies which version to use." + }, + "clusters": { + "description": "Cluster settings for this pipeline deployment.", + "items": { + "description": "", + "properties": { + "apply_policy_default_values": { + "description": "Note: This field won't be persisted. Only API users will check this field." + }, + "autoscale": { + "description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.", + "properties": { + "max_workers": { + "description": "The maximum number of workers to which the cluster can scale up when overloaded.\nNote that `max_workers` must be strictly greater than `min_workers`." + }, + "min_workers": { + "description": "The minimum number of workers to which the cluster can scale down when underutilized.\nIt is also the initial number of workers the cluster will have after creation." + } + } + }, + "aws_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "ebs_volume_count": { + "description": "The number of volumes launched for each instance. Users can choose up to 10 volumes.\nThis feature is only enabled for supported node types. Legacy node types cannot specify\ncustom EBS volumes.\nFor node types with no instance store, at least one EBS volume needs to be specified;\notherwise, cluster creation will fail.\n\nThese EBS volumes will be mounted at `/ebs0`, `/ebs1`, and etc.\nInstance store volumes will be mounted at `/local_disk0`, `/local_disk1`, and etc.\n\nIf EBS volumes are attached, Databricks will configure Spark to use only the EBS volumes for\nscratch storage because heterogenously sized scratch devices can lead to inefficient disk\nutilization. If no EBS volumes are attached, Databricks will configure Spark to use instance\nstore volumes.\n\nPlease note that if EBS volumes are specified, then the Spark configuration `spark.local.dir`\nwill be overridden." + }, + "ebs_volume_iops": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_size": { + "description": "The size of each EBS volume (in GiB) launched for each instance. For general purpose\nSSD, this value must be within the range 100 - 4096. For throughput optimized HDD,\nthis value must be within the range 500 - 4096." + }, + "ebs_volume_throughput": { + "description": "\u003cneeds content added\u003e" + }, + "ebs_volume_type": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nIf this value is greater than 0, the cluster driver node in particular will be placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "instance_profile_arn": { + "description": "Nodes for this cluster will only be placed on AWS instances with this instance profile. If\nommitted, nodes will be placed on instances without an IAM instance profile. The instance\nprofile must have previously been added to the Databricks environment by an account\nadministrator.\n\nThis feature may only be available to certain customer plans.\n\nIf this field is ommitted, we will pull in the default from the conf if it exists." + }, + "spot_bid_price_percent": { + "description": "The bid price for AWS spot instances, as a percentage of the corresponding instance type's\non-demand price.\nFor example, if this field is set to 50, and the cluster needs a new `r3.xlarge` spot\ninstance, then the bid price is half of the price of\non-demand `r3.xlarge` instances. Similarly, if this field is set to 200, the bid price is twice\nthe price of on-demand `r3.xlarge` instances. If not specified, the default value is 100.\nWhen spot instances are requested for this cluster, only spot instances whose bid price\npercentage matches this field will be considered.\nNote that, for safety, we enforce this field to be no more than 10000.\n\nThe default value and documentation here should be kept consistent with\nCommonConf.defaultSpotBidPricePercent and CommonConf.maxSpotBidPricePercent." + }, + "zone_id": { + "description": "Identifier for the availability zone/datacenter in which the cluster resides.\nThis string will be of a form like \"us-west-2a\". The provided availability\nzone must be in the same region as the Databricks deployment. For example, \"us-west-2a\"\nis not a valid zone id if the Databricks deployment resides in the \"us-east-1\" region.\nThis is an optional field at cluster creation, and if not specified, a default zone will be used.\nIf the zone specified is \"auto\", will try to place cluster in a zone with high availability,\nand will retry placement in a different AZ if there is not enough capacity.\nSee [[AutoAZHelper.scala]] for more details.\nThe list of available zones as well as the default value can be found by using the\n`List Zones`_ method." + } + } + }, + "azure_attributes": { + "description": "Attributes related to clusters running on Amazon Web Services.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "first_on_demand": { + "description": "The first `first_on_demand` nodes of the cluster will be placed on on-demand instances.\nThis value should be greater than 0, to make sure the cluster driver node is placed on an\non-demand instance. If this value is greater than or equal to the current cluster size, all\nnodes will be placed on on-demand instances. If this value is less than the current cluster\nsize, `first_on_demand` nodes will be placed on on-demand instances and the remainder will\nbe placed on `availability` instances. Note that this value does not affect\ncluster size and cannot currently be mutated over the lifetime of a cluster." + }, + "log_analytics_info": { + "description": "Defines values necessary to configure and run Azure Log Analytics agent", + "properties": { + "log_analytics_primary_key": { + "description": "\u003cneeds content added\u003e" + }, + "log_analytics_workspace_id": { + "description": "\u003cneeds content added\u003e" + } + } + }, + "spot_bid_max_price": { + "description": "The max bid price to be used for Azure spot instances.\nThe Max price for the bid cannot be higher than the on-demand price of the instance.\nIf not specified, the default value is -1, which specifies that the instance cannot be evicted\non the basis of price, and only on the basis of availability. Further, the value should \u003e 0 or -1." + } + } + }, + "cluster_log_conf": { + "description": "The configuration for delivering spark logs to a long-term storage destination.\nTwo kinds of destinations (dbfs and s3) are supported. Only one destination can be specified\nfor one cluster. If the conf is given, the logs will be delivered to the destination every\n`5 mins`. The destination of driver logs is `$destination/$clusterId/driver`, while\nthe destination of executor logs is `$destination/$clusterId/executor`.", + "properties": { + "dbfs": { + "description": "destination needs to be provided. e.g.\n`{ \"dbfs\" : { \"destination\" : \"dbfs:/home/cluster_log\" } }`", + "properties": { + "destination": { + "description": "dbfs destination, e.g. `dbfs:/my/path`" + } + } + }, + "s3": { + "description": "destination and either region or endpoint should also be provided. e.g.\n`{ \"s3\": { \"destination\" : \"s3://cluster_log_bucket/prefix\", \"region\" : \"us-west-2\" } }`\nCluster iam role is used to access s3, please make sure the cluster iam role in\n`instance_profile_arn` has permission to write data to the s3 destination.", + "properties": { + "canned_acl": { + "description": "(Optional) Set canned access control list for the logs, e.g. `bucket-owner-full-control`.\nIf `canned_cal` is set, please make sure the cluster iam role has `s3:PutObjectAcl` permission on\nthe destination bucket and prefix. The full list of possible canned acl can be found at\nhttp://docs.aws.amazon.com/AmazonS3/latest/dev/acl-overview.html#canned-acl.\nPlease also note that by default only the object owner gets full controls. If you are using cross account\nrole for writing data, you may want to set `bucket-owner-full-control` to make bucket owner able to\nread the logs." + }, + "destination": { + "description": "S3 destination, e.g. `s3://my-bucket/some-prefix` Note that logs will be delivered using\ncluster iam role, please make sure you set cluster iam role and the role has write access to the\ndestination. Please also note that you cannot use AWS keys to deliver logs." + }, + "enable_encryption": { + "description": "(Optional) Flag to enable server side encryption, `false` by default." + }, + "encryption_type": { + "description": "(Optional) The encryption type, it could be `sse-s3` or `sse-kms`. It will be used only when\nencryption is enabled and the default type is `sse-s3`." + }, + "endpoint": { + "description": "S3 endpoint, e.g. `https://s3-us-west-2.amazonaws.com`. Either region or endpoint needs to be set.\nIf both are set, endpoint will be used." + }, + "kms_key": { + "description": "(Optional) Kms key which will be used if encryption is enabled and encryption type is set to `sse-kms`." + }, + "region": { + "description": "S3 region, e.g. `us-west-2`. Either region or endpoint needs to be set. If both are set,\nendpoint will be used." + } + } + } + } + }, + "custom_tags": { + "description": "Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS\ninstances and EBS volumes) with these tags in addition to `default_tags`. Notes:\n\n- Currently, Databricks allows at most 45 custom tags\n\n- Clusters can only reuse cloud resources if the resources' tags are a subset of the cluster tags", + "additionalproperties": { + "description": "" + } + }, + "driver_instance_pool_id": { + "description": "The optional ID of the instance pool for the driver of the cluster belongs.\nThe pool cluster uses the instance pool with id (instance_pool_id) if the driver pool is not\nassigned." + }, + "driver_node_type_id": { + "description": "The node type of the Spark driver.\nNote that this field is optional; if unset, the driver node type will be set as the same value\nas `node_type_id` defined above." + }, + "gcp_attributes": { + "description": "Attributes related to clusters running on Google Cloud Platform.\nIf not specified at cluster creation, a set of default values will be used.", + "properties": { + "availability": { + "description": "" + }, + "boot_disk_size": { + "description": "boot disk size in GB" + }, + "google_service_account": { + "description": "If provided, the cluster will impersonate the google service account when accessing\ngcloud services (like GCS). The google service account\nmust have previously been added to the Databricks environment by an account\nadministrator." + } + } + }, + "instance_pool_id": { + "description": "The optional ID of the instance pool to which the cluster belongs." + }, + "label": { + "description": "Cluster label" + }, + "node_type_id": { + "description": "This field encodes, through a single value, the resources available to each of\nthe Spark nodes in this cluster. For example, the Spark nodes can be provisioned\nand optimized for memory or compute intensive workloads. A list of available node\ntypes can be retrieved by using the :method:clusters/listNodeTypes API call.\n" + }, + "num_workers": { + "description": "Number of worker nodes that this cluster should have. A cluster has one Spark Driver\nand `num_workers` Executors for a total of `num_workers` + 1 Spark nodes.\n\nNote: When reading the properties of a cluster, this field reflects the desired number\nof workers rather than the actual current number of workers. For instance, if a cluster\nis resized from 5 to 10 workers, this field will immediately be updated to reflect\nthe target size of 10 workers, whereas the workers listed in `spark_info` will gradually\nincrease from 5 to 10 as the new nodes are provisioned." + }, + "policy_id": { + "description": "The ID of the cluster policy used to create the cluster if applicable." + }, + "spark_conf": { + "description": "An object containing a set of optional, user-specified Spark configuration key-value pairs.\nSee :method:clusters/create for more details.\n", + "additionalproperties": { + "description": "" + } + }, + "spark_env_vars": { + "description": "An object containing a set of optional, user-specified environment variable key-value pairs.\nPlease note that key-value pair of the form (X,Y) will be exported as is (i.e.,\n`export X='Y'`) while launching the driver and workers.\n\nIn order to specify an additional set of `SPARK_DAEMON_JAVA_OPTS`, we recommend appending\nthem to `$SPARK_DAEMON_JAVA_OPTS` as shown in the example below. This ensures that all\ndefault databricks managed environmental variables are included as well.\n\nExample Spark environment variables:\n`{\"SPARK_WORKER_MEMORY\": \"28000m\", \"SPARK_LOCAL_DIRS\": \"/local_disk0\"}` or\n`{\"SPARK_DAEMON_JAVA_OPTS\": \"$SPARK_DAEMON_JAVA_OPTS -Dspark.shuffle.service.enabled=true\"}`", + "additionalproperties": { + "description": "" + } + }, + "ssh_public_keys": { + "description": "SSH public key contents that will be added to each Spark node in this cluster. The\ncorresponding private keys can be used to login with the user name `ubuntu` on port `2200`.\nUp to 10 keys can be specified.", + "items": { + "description": "" + } + } + } + } + }, + "configuration": { + "description": "String-String configuration for this pipeline execution.", + "additionalproperties": { + "description": "" + } + }, + "continuous": { + "description": "Whether the pipeline is continuous or triggered. This replaces `trigger`." + }, + "development": { + "description": "Whether the pipeline is in Development mode. Defaults to false." + }, + "edition": { + "description": "Pipeline product edition." + }, + "filters": { + "description": "Filters on which Pipeline packages to include in the deployed graph.", + "properties": { + "exclude": { + "description": "Paths to exclude.", + "items": { + "description": "" + } + }, + "include": { + "description": "Paths to include.", + "items": { + "description": "" + } + } + } + }, + "id": { + "description": "Unique identifier for this pipeline." + }, + "libraries": { + "description": "Libraries or code needed by this deployment.", + "items": { + "description": "", + "properties": { + "jar": { + "description": "URI of the jar to be installed. Currently only DBFS and S3 URIs are supported.\nFor example: `{ \"jar\": \"dbfs:/mnt/databricks/library.jar\" }` or\n`{ \"jar\": \"s3://my-bucket/library.jar\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + }, + "maven": { + "description": "Specification of a maven library to be installed. For example:\n`{ \"coordinates\": \"org.jsoup:jsoup:1.7.2\" }`", + "properties": { + "coordinates": { + "description": "Gradle-style maven coordinates. For example: \"org.jsoup:jsoup:1.7.2\"." + }, + "exclusions": { + "description": "List of dependences to exclude. For example: `[\"slf4j:slf4j\", \"*:hadoop-client\"]`.\n\nMaven dependency exclusions:\nhttps://maven.apache.org/guides/introduction/introduction-to-optional-and-excludes-dependencies.html.", + "items": { + "description": "" + } + }, + "repo": { + "description": "Maven repo to install the Maven package from. If omitted, both Maven Central Repository\nand Spark Packages are searched." + } + } + }, + "notebook": { + "description": "The path to a notebook that defines a pipeline and is stored in the Databricks workspace.\nFor example: `{ \"notebook\" : { \"path\" : \"/my-pipeline-notebook-path\" } }`.\nCurrently, only Scala notebooks are supported, and pipelines must be defined in a package\ncell.", + "properties": { + "path": { + "description": "The absolute path of the notebook." + } + } + }, + "whl": { + "description": "URI of the wheel to be installed.\nFor example: `{ \"whl\": \"dbfs:/my/whl\" }` or `{ \"whl\": \"s3://my-bucket/whl\" }`.\nIf S3 is used, please make sure the cluster has read access on the library. You may need to\nlaunch the cluster with an IAM role to access the S3 URI." + } + } + } + }, + "name": { + "description": "Friendly identifier for this pipeline." + }, + "photon": { + "description": "Whether Photon is enabled for this pipeline." + }, + "storage": { + "description": "DBFS root directory for storing checkpoints and tables." + }, + "target": { + "description": "Target schema (database) to add tables in this pipeline to." + }, + "trigger": { + "description": "Which pipeline trigger to use. Deprecated: Use `continuous` instead.", + "properties": { + "cron": { + "description": "", + "properties": { + "quartz_cron_schedule": { + "description": "" + }, + "timezone_id": { + "description": "" + } + } + }, + "manual": { + "description": "" + } + } + } + } + } + } + } + }, + "workspace": { + "description": "", + "properties": { + "artifact_path": { + "description": "", + "properties": { + "dbfs": { + "description": "" + }, + "workspace": { + "description": "" + } + } + }, + "azure_client_id": { + "description": "" + }, + "azure_environment": { + "description": "" + }, + "azure_login_app_id": { + "description": "" + }, + "azure_tenant_id": { + "description": "" + }, + "azure_use_msi": { + "description": "" + }, + "azure_workspace_resource_id": { + "description": "" + }, + "current_user": { + "description": "", + "properties": { + "active": { + "description": "" + }, + "displayName": { + "description": "" + }, + "emails": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "entitlements": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "externalId": { + "description": "" + }, + "groups": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "id": { + "description": "" + }, + "name": { + "description": "", + "properties": { + "familyName": { + "description": "" + }, + "givenName": { + "description": "" + } + } + }, + "roles": { + "description": "", + "items": { + "description": "", + "properties": { + "$ref": { + "description": "" + }, + "display": { + "description": "" + }, + "primary": { + "description": "" + }, + "type": { + "description": "" + }, + "value": { + "description": "" + } + } + } + }, + "userName": { + "description": "" + } + } + }, + "file_path": { + "description": "", + "properties": { + "dbfs": { + "description": "" + }, + "workspace": { + "description": "" + } + } + }, + "google_service_account": { + "description": "" + }, + "host": { + "description": "" + }, + "profile": { + "description": "" + }, + "root": { + "description": "" + }, + "state_path": { + "description": "", + "properties": { + "dbfs": { + "description": "" + }, + "workspace": { + "description": "" + } + } + } + } + } + } +} \ No newline at end of file diff --git a/bundle/schema/docs_test.go b/bundle/schema/docs_test.go new file mode 100644 index 00000000..84d804b0 --- /dev/null +++ b/bundle/schema/docs_test.go @@ -0,0 +1,61 @@ +package schema + +import ( + "encoding/json" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestSchemaToDocs(t *testing.T) { + schema := &Schema{ + Type: "object", + Description: "root doc", + Properties: map[string]*Schema{ + "foo": {Type: "number", Description: "foo doc"}, + "bar": {Type: "string"}, + "octave": { + Type: "object", + AdditionalProperties: &Schema{Type: "number"}, + Description: "octave docs", + }, + "scales": { + Type: "object", + Description: "scale docs", + Items: &Schema{Type: "string"}, + }, + }, + } + docs := schemaToDocs(schema) + docsJson, err := json.MarshalIndent(docs, " ", " ") + require.NoError(t, err) + + expected := + `{ + "description": "root doc", + "properties": { + "bar": { + "description": "" + }, + "foo": { + "description": "foo doc" + }, + "octave": { + "description": "octave docs", + "additionalproperties": { + "description": "" + } + }, + "scales": { + "description": "scale docs", + "items": { + "description": "" + } + } + } + }` + t.Log("[DEBUG] actual: ", string(docsJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(docsJson)) +} diff --git a/bundle/schema/openapi.go b/bundle/schema/openapi.go new file mode 100644 index 00000000..97ec5141 --- /dev/null +++ b/bundle/schema/openapi.go @@ -0,0 +1,214 @@ +package schema + +import ( + "encoding/json" + "fmt" + "strings" + + "github.com/databricks/databricks-sdk-go/openapi" +) + +type OpenapiReader struct { + OpenapiSpec *openapi.Specification + Memo map[string]*Schema +} + +const SchemaPathPrefix = "#/components/schemas/" + +func (reader *OpenapiReader) readOpenapiSchema(path string) (*Schema, error) { + schemaKey := strings.TrimPrefix(path, SchemaPathPrefix) + + // return early if we already have a computed schema + memoSchema, ok := reader.Memo[schemaKey] + if ok { + return memoSchema, nil + } + + // check path is present in openapi spec + openapiSchema, ok := reader.OpenapiSpec.Components.Schemas[schemaKey] + if !ok { + return nil, fmt.Errorf("schema with path %s not found in openapi spec", path) + } + + // convert openapi schema to the native schema struct + bytes, err := json.Marshal(*openapiSchema) + if err != nil { + return nil, err + } + jsonSchema := &Schema{} + err = json.Unmarshal(bytes, jsonSchema) + if err != nil { + return nil, err + } + + // A hack to convert a map[string]interface{} to *Schema + // We rely on the type of a AdditionalProperties in downstream functions + // to do reference interpolation + _, ok = jsonSchema.AdditionalProperties.(map[string]interface{}) + if ok { + b, err := json.Marshal(jsonSchema.AdditionalProperties) + if err != nil { + return nil, err + } + additionalProperties := &Schema{} + err = json.Unmarshal(b, additionalProperties) + if err != nil { + return nil, err + } + jsonSchema.AdditionalProperties = additionalProperties + } + + // store read schema into memo + reader.Memo[schemaKey] = jsonSchema + + return jsonSchema, nil +} + +// safe againt loops in refs +func (reader *OpenapiReader) safeResolveRefs(root *Schema, seenRefs map[string]struct{}) (*Schema, error) { + if root.Reference == nil { + return reader.traverseSchema(root, seenRefs) + } + key := *root.Reference + _, ok := seenRefs[key] + if ok { + // self reference loops can be supported however the logic is non-trivial because + // cross refernce loops are not allowed (see: http://json-schema.org/understanding-json-schema/structuring.html#recursion) + return nil, fmt.Errorf("references loop detected") + } + ref := *root.Reference + description := root.Description + seenRefs[ref] = struct{}{} + + // Mark reference nil, so we do not traverse this again. This is tracked + // in the memo + root.Reference = nil + + // unroll one level of reference + selfRef, err := reader.readOpenapiSchema(ref) + if err != nil { + return nil, err + } + root = selfRef + root.Description = description + + // traverse again to find new references + root, err = reader.traverseSchema(root, seenRefs) + if err != nil { + return nil, err + } + delete(seenRefs, ref) + return root, err +} + +func (reader *OpenapiReader) traverseSchema(root *Schema, seenRefs map[string]struct{}) (*Schema, error) { + // case primitive (or invalid) + if root.Type != Object && root.Type != Array { + return root, nil + } + // only root references are resolved + if root.Reference != nil { + return reader.safeResolveRefs(root, seenRefs) + } + // case struct + if len(root.Properties) > 0 { + for k, v := range root.Properties { + childSchema, err := reader.safeResolveRefs(v, seenRefs) + if err != nil { + return nil, err + } + root.Properties[k] = childSchema + } + } + // case array + if root.Items != nil { + itemsSchema, err := reader.safeResolveRefs(root.Items, seenRefs) + if err != nil { + return nil, err + } + root.Items = itemsSchema + } + // case map + additionionalProperties, ok := root.AdditionalProperties.(*Schema) + if ok && additionionalProperties != nil { + valueSchema, err := reader.safeResolveRefs(additionionalProperties, seenRefs) + if err != nil { + return nil, err + } + root.AdditionalProperties = valueSchema + } + return root, nil +} + +func (reader *OpenapiReader) readResolvedSchema(path string) (*Schema, error) { + root, err := reader.readOpenapiSchema(path) + if err != nil { + return nil, err + } + seenRefs := make(map[string]struct{}) + seenRefs[path] = struct{}{} + root, err = reader.safeResolveRefs(root, seenRefs) + if err != nil { + trace := "" + count := 0 + for k := range seenRefs { + if count == len(seenRefs)-1 { + trace += k + break + } + trace += k + " -> " + count++ + } + return nil, fmt.Errorf("%s. schema ref trace: %s", err, trace) + } + return root, nil +} + +func (reader *OpenapiReader) jobsDocs() (*Docs, error) { + jobSettingsSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "jobs.JobSettings") + if err != nil { + return nil, err + } + jobDocs := schemaToDocs(jobSettingsSchema) + // TODO: add description for id if needed. + // Tracked in https://github.com/databricks/bricks/issues/242 + jobsDocs := &Docs{ + Description: "List of job definations", + AdditionalProperties: jobDocs, + } + return jobsDocs, nil +} + +func (reader *OpenapiReader) pipelinesDocs() (*Docs, error) { + pipelineSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "pipelines.PipelineSpec") + if err != nil { + return nil, err + } + pipelineDocs := schemaToDocs(pipelineSpecSchema) + // TODO: Two fields in resources.Pipeline have the json tag id. Clarify the + // semantics and then add a description if needed. (https://github.com/databricks/bricks/issues/242) + pipelinesDocs := &Docs{ + Description: "List of pipeline definations", + AdditionalProperties: pipelineDocs, + } + return pipelinesDocs, nil +} + +func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) { + jobsDocs, err := reader.jobsDocs() + if err != nil { + return nil, err + } + pipelinesDocs, err := reader.pipelinesDocs() + if err != nil { + return nil, err + } + + return &Docs{ + Description: "Specification of databricks resources to instantiate", + Properties: map[string]*Docs{ + "jobs": jobsDocs, + "pipelines": pipelinesDocs, + }, + }, nil +} diff --git a/bundle/schema/openapi_test.go b/bundle/schema/openapi_test.go new file mode 100644 index 00000000..faee64fa --- /dev/null +++ b/bundle/schema/openapi_test.go @@ -0,0 +1,435 @@ +package schema + +import ( + "encoding/json" + "testing" + + "github.com/databricks/databricks-sdk-go/openapi" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" +) + +func TestReadSchemaForObject(t *testing.T) { + specString := ` + { + "components": { + "schemas": { + "foo": { + "type": "number" + }, + "fruits": { + "type": "object", + "description": "fruits that are cool", + "properties": { + "guava": { + "type": "string", + "description": "a guava for my schema" + }, + "mango": { + "type": "object", + "description": "a mango for my schema", + "$ref": "#/components/schemas/mango" + } + } + }, + "mango": { + "type": "object", + "properties": { + "foo": { + "$ref": "#/components/schemas/foo" + } + } + } + } + } + } + ` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + + fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "description": "fruits that are cool", + "properties": { + "guava": { + "type": "string", + "description": "a guava for my schema" + }, + "mango": { + "type": "object", + "description": "a mango for my schema", + "properties": { + "foo": { + "type": "number" + } + } + } + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} + +func TestReadSchemaForArray(t *testing.T) { + specString := ` + { + "components": { + "schemas": { + "fruits": { + "type": "object", + "description": "fruits that are cool", + "items": { + "description": "some papayas, because papayas are fruits too", + "$ref": "#/components/schemas/papaya" + } + }, + "papaya": { + "type": "number" + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + + fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "description": "fruits that are cool", + "items": { + "type": "number", + "description": "some papayas, because papayas are fruits too" + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} + +func TestReadSchemaForMap(t *testing.T) { + specString := `{ + "components": { + "schemas": { + "fruits": { + "type": "object", + "description": "fruits that are meh", + "additionalProperties": { + "description": "watermelons. watermelons.", + "$ref": "#/components/schemas/watermelon" + } + }, + "watermelon": { + "type": "number" + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + + fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "description": "fruits that are meh", + "additionalProperties": { + "type": "number", + "description": "watermelons. watermelons." + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} + +func TestRootReferenceIsResolved(t *testing.T) { + specString := `{ + "components": { + "schemas": { + "foo": { + "type": "object", + "description": "this description is ignored", + "properties": { + "abc": { + "type": "string" + } + } + }, + "fruits": { + "type": "object", + "description": "foo fighters fighting fruits", + "$ref": "#/components/schemas/foo" + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + schema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + fruitsSchemaJson, err := json.MarshalIndent(schema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "description": "foo fighters fighting fruits", + "properties": { + "abc": { + "type": "string" + } + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} + +func TestSelfReferenceLoopErrors(t *testing.T) { + specString := `{ + "components": { + "schemas": { + "foo": { + "type": "object", + "description": "this description is ignored", + "properties": { + "bar": { + "type": "object", + "$ref": "#/components/schemas/foo" + } + } + }, + "fruits": { + "type": "object", + "description": "foo fighters fighting fruits", + "$ref": "#/components/schemas/foo" + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + _, err = reader.readResolvedSchema("#/components/schemas/fruits") + assert.ErrorContains(t, err, "references loop detected. schema ref trace: #/components/schemas/fruits -> #/components/schemas/foo") +} + +func TestCrossReferenceLoopErrors(t *testing.T) { + specString := `{ + "components": { + "schemas": { + "foo": { + "type": "object", + "description": "this description is ignored", + "properties": { + "bar": { + "type": "object", + "$ref": "#/components/schemas/fruits" + } + } + }, + "fruits": { + "type": "object", + "description": "foo fighters fighting fruits", + "$ref": "#/components/schemas/foo" + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + _, err = reader.readResolvedSchema("#/components/schemas/fruits") + assert.ErrorContains(t, err, "references loop detected. schema ref trace: #/components/schemas/fruits -> #/components/schemas/foo") +} + +func TestReferenceResolutionForMapInObject(t *testing.T) { + specString := ` + { + "components": { + "schemas": { + "foo": { + "type": "number" + }, + "fruits": { + "type": "object", + "description": "fruits that are cool", + "properties": { + "guava": { + "type": "string", + "description": "a guava for my schema" + }, + "mangos": { + "type": "object", + "description": "multiple mangos", + "$ref": "#/components/schemas/mango" + } + } + }, + "mango": { + "type": "object", + "additionalProperties": { + "description": "a single mango", + "$ref": "#/components/schemas/foo" + } + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + + fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "description": "fruits that are cool", + "properties": { + "guava": { + "type": "string", + "description": "a guava for my schema" + }, + "mangos": { + "type": "object", + "description": "multiple mangos", + "additionalProperties": { + "type": "number", + "description": "a single mango" + } + } + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} + +func TestReferenceResolutionForArrayInObject(t *testing.T) { + specString := `{ + "components": { + "schemas": { + "foo": { + "type": "number" + }, + "fruits": { + "type": "object", + "description": "fruits that are cool", + "properties": { + "guava": { + "type": "string", + "description": "a guava for my schema" + }, + "mangos": { + "type": "object", + "description": "multiple mangos", + "$ref": "#/components/schemas/mango" + } + } + }, + "mango": { + "type": "object", + "items": { + "description": "a single mango", + "$ref": "#/components/schemas/foo" + } + } + } + } + }` + spec := &openapi.Specification{} + reader := &OpenapiReader{ + OpenapiSpec: spec, + Memo: make(map[string]*Schema), + } + err := json.Unmarshal([]byte(specString), spec) + require.NoError(t, err) + + fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits") + require.NoError(t, err) + + fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ") + require.NoError(t, err) + + expected := `{ + "type": "object", + "description": "fruits that are cool", + "properties": { + "guava": { + "type": "string", + "description": "a guava for my schema" + }, + "mangos": { + "type": "object", + "description": "multiple mangos", + "items": { + "type": "number", + "description": "a single mango" + } + } + } + }` + + t.Log("[DEBUG] actual: ", string(fruitsSchemaJson)) + t.Log("[DEBUG] expected: ", expected) + assert.Equal(t, expected, string(fruitsSchemaJson)) +} diff --git a/bundle/schema/schema.go b/bundle/schema/schema.go index d6d0295d..2e50298c 100644 --- a/bundle/schema/schema.go +++ b/bundle/schema/schema.go @@ -35,6 +35,9 @@ type Schema struct { // Required properties for the object. Any fields missing the "omitempty" // json tag will be included Required []string `json:"required,omitempty"` + + // URI to a json schema + Reference *string `json:"$ref,omitempty"` } // This function translates golang types into json schema. Here is the mapping @@ -187,7 +190,7 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*Schema, e schema := &Schema{Type: rootJavascriptType} if docs != nil { - schema.Description = docs.Documentation + schema.Description = docs.Description } // case array/slice @@ -197,7 +200,11 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*Schema, e if err != nil { return nil, err } - elemProps, err := safeToSchema(elemGolangType, docs, "", tracker) + var childDocs *Docs + if docs != nil { + childDocs = docs.Items + } + elemProps, err := safeToSchema(elemGolangType, childDocs, "", tracker) if err != nil { return nil, err } @@ -215,7 +222,11 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*Schema, e if golangType.Key().Kind() != reflect.String { return nil, fmt.Errorf("only string keyed maps allowed") } - schema.AdditionalProperties, err = safeToSchema(golangType.Elem(), docs, "", tracker) + var childDocs *Docs + if docs != nil { + childDocs = docs.AdditionalProperties + } + schema.AdditionalProperties, err = safeToSchema(golangType.Elem(), childDocs, "", tracker) if err != nil { return nil, err } @@ -240,8 +251,8 @@ func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*Schema, e // get docs for the child if they exist var childDocs *Docs if docs != nil { - if val, ok := docs.Children[childName]; ok { - childDocs = &val + if val, ok := docs.Properties[childName]; ok { + childDocs = val } } diff --git a/bundle/schema/schema_test.go b/bundle/schema/schema_test.go index 1539e35f..966aab81 100644 --- a/bundle/schema/schema_test.go +++ b/bundle/schema/schema_test.go @@ -1062,60 +1062,31 @@ func TestFieldsWithoutOmitEmptyAreRequired(t *testing.T) { assert.Equal(t, expectedSchema, string(jsonSchema)) } -func TestDocIngestionInSchema(t *testing.T) { +func TestDocIngestionForObject(t *testing.T) { docs := &Docs{ - Documentation: "docs for root", - Children: map[string]Docs{ + Description: "docs for root", + Properties: map[string]*Docs{ "my_struct": { - Documentation: "docs for my struct", - }, - "my_val": { - Documentation: "docs for my val", - }, - "my_slice": { - Documentation: "docs for my slice", - Children: map[string]Docs{ - "guava": { - Documentation: "docs for guava", + Description: "docs for my struct", + Properties: map[string]*Docs{ + "a": { + Description: "docs for a", }, - "pineapple": { - Documentation: "docs for pineapple", - }, - }, - }, - "my_map": { - Documentation: "docs for my map", - Children: map[string]Docs{ - "apple": { - Documentation: "docs for apple", - }, - "mango": { - Documentation: "docs for mango", + "c": { + Description: "docs for c which does not exist on my_struct", }, }, }, }, } - type Foo struct { - Apple int `json:"apple"` - Mango int `json:"mango"` - } - - type Bar struct { - Guava int `json:"guava"` - Pineapple int `json:"pineapple"` - } - type MyStruct struct { A string `json:"a"` + B int `json:"b"` } type Root struct { - MyStruct *MyStruct `json:"my_struct"` - MyVal int `json:"my_val"` - MySlice []Bar `json:"my_slice"` - MyMap map[string]*Foo `json:"my_map"` + MyStruct *MyStruct `json:"my_struct"` } elem := Root{} @@ -1131,29 +1102,82 @@ func TestDocIngestionInSchema(t *testing.T) { "type": "object", "description": "docs for root", "properties": { - "my_map": { + "my_struct": { "type": "object", - "description": "docs for my map", - "additionalProperties": { - "type": "object", - "description": "docs for my map", - "properties": { - "apple": { - "type": "number", - "description": "docs for apple" - }, - "mango": { - "type": "number", - "description": "docs for mango" - } + "description": "docs for my struct", + "properties": { + "a": { + "type": "string", + "description": "docs for a" }, - "additionalProperties": false, - "required": [ - "apple", - "mango" - ] - } + "b": { + "type": "number" + } + }, + "additionalProperties": false, + "required": [ + "a", + "b" + ] + } + }, + "additionalProperties": false, + "required": [ + "my_struct" + ] + }` + + t.Log("[DEBUG] actual: ", string(jsonSchema)) + t.Log("[DEBUG] expected: ", expectedSchema) + + assert.Equal(t, expectedSchema, string(jsonSchema)) +} + +func TestDocIngestionForSlice(t *testing.T) { + docs := &Docs{ + Description: "docs for root", + Properties: map[string]*Docs{ + "my_slice": { + Description: "docs for my slice", + Items: &Docs{ + Properties: map[string]*Docs{ + "guava": { + Description: "docs for guava", + }, + "pineapple": { + Description: "docs for pineapple", + }, + "watermelon": { + Description: "docs for watermelon which does not exist in schema", + }, + }, }, + }, + }, + } + + type Bar struct { + Guava int `json:"guava"` + Pineapple int `json:"pineapple"` + } + + type Root struct { + MySlice []Bar `json:"my_slice"` + } + + elem := Root{} + + schema, err := New(reflect.TypeOf(elem), docs) + require.NoError(t, err) + + jsonSchema, err := json.MarshalIndent(schema, " ", " ") + assert.NoError(t, err) + + expectedSchema := + `{ + "type": "object", + "description": "docs for root", + "properties": { "my_slice": { "type": "array", "description": "docs for my slice", @@ -1175,20 +1199,130 @@ func TestDocIngestionInSchema(t *testing.T) { "pineapple" ] } - }, - "my_struct": { - "type": "object", - "description": "docs for my struct", - "properties": { - "a": { - "type": "string" - } + } + }, + "additionalProperties": false, + "required": [ + "my_slice" + ] + }` + + t.Log("[DEBUG] actual: ", string(jsonSchema)) + t.Log("[DEBUG] expected: ", expectedSchema) + + assert.Equal(t, expectedSchema, string(jsonSchema)) +} + +func TestDocIngestionForMap(t *testing.T) { + docs := &Docs{ + Description: "docs for root", + Properties: map[string]*Docs{ + "my_map": { + Description: "docs for my map", + AdditionalProperties: &Docs{ + Properties: map[string]*Docs{ + "apple": { + Description: "docs for apple", + }, + "mango": { + Description: "docs for mango", + }, + "watermelon": { + Description: "docs for watermelon which does not exist in schema", + }, + "papaya": { + Description: "docs for papaya which does not exist in schema", + }, }, - "additionalProperties": false, - "required": [ - "a" - ] }, + }, + }, + } + + type Foo struct { + Apple int `json:"apple"` + Mango int `json:"mango"` + } + + type Root struct { + MyMap map[string]*Foo `json:"my_map"` + } + + elem := Root{} + + schema, err := New(reflect.TypeOf(elem), docs) + require.NoError(t, err) + + jsonSchema, err := json.MarshalIndent(schema, " ", " ") + assert.NoError(t, err) + + expectedSchema := + `{ + "type": "object", + "description": "docs for root", + "properties": { + "my_map": { + "type": "object", + "description": "docs for my map", + "additionalProperties": { + "type": "object", + "properties": { + "apple": { + "type": "number", + "description": "docs for apple" + }, + "mango": { + "type": "number", + "description": "docs for mango" + } + }, + "additionalProperties": false, + "required": [ + "apple", + "mango" + ] + } + } + }, + "additionalProperties": false, + "required": [ + "my_map" + ] + }` + + t.Log("[DEBUG] actual: ", string(jsonSchema)) + t.Log("[DEBUG] expected: ", expectedSchema) + + assert.Equal(t, expectedSchema, string(jsonSchema)) +} + +func TestDocIngestionForTopLevelPrimitive(t *testing.T) { + docs := &Docs{ + Description: "docs for root", + Properties: map[string]*Docs{ + "my_val": { + Description: "docs for my val", + }, + }, + } + + type Root struct { + MyVal int `json:"my_val"` + } + + elem := Root{} + + schema, err := New(reflect.TypeOf(elem), docs) + require.NoError(t, err) + + jsonSchema, err := json.MarshalIndent(schema, " ", " ") + assert.NoError(t, err) + + expectedSchema := + `{ + "type": "object", + "description": "docs for root", + "properties": { "my_val": { "type": "number", "description": "docs for my val" @@ -1196,10 +1330,7 @@ func TestDocIngestionInSchema(t *testing.T) { }, "additionalProperties": false, "required": [ - "my_struct", - "my_val", - "my_slice", - "my_map" + "my_val" ] }` diff --git a/cmd/bundle/launch.go b/cmd/bundle/launch.go new file mode 100644 index 00000000..2f748b78 --- /dev/null +++ b/cmd/bundle/launch.go @@ -0,0 +1,37 @@ +package bundle + +import ( + "fmt" + + "github.com/databricks/bricks/cmd/root" + "github.com/spf13/cobra" +) + +var launchCmd = &cobra.Command{ + Use: "launch", + Short: "Launches a notebook on development cluster", + Long: `Reads a file and executes it on dev cluster`, + Args: cobra.ExactArgs(1), + + // We're not ready to expose this command until we specify its semantics. + Hidden: true, + + PreRunE: root.MustConfigureBundle, + RunE: func(cmd *cobra.Command, args []string) error { + return fmt.Errorf("TODO") + // contents, err := os.ReadFile(args[0]) + // if err != nil { + // return err + // } + // results := project.RunPythonOnDev(cmd.Context(), string(contents)) + // if results.Failed() { + // return results.Err() + // } + // fmt.Fprintf(cmd.OutOrStdout(), "Success: %s", results.Text()) + // return nil + }, +} + +func init() { + AddCommand(launchCmd) +} diff --git a/cmd/bundle/run.go b/cmd/bundle/run.go index cc358c7c..c45353cf 100644 --- a/cmd/bundle/run.go +++ b/cmd/bundle/run.go @@ -21,7 +21,6 @@ var runCmd = &cobra.Command{ b := bundle.Get(cmd.Context()) err := bundle.Apply(cmd.Context(), b, []bundle.Mutator{ phases.Initialize(), - terraform.Initialize(), terraform.Load(), }) if err != nil { diff --git a/cmd/bundle/schema.go b/cmd/bundle/schema.go index e3c19c46..d59e08d1 100644 --- a/cmd/bundle/schema.go +++ b/cmd/bundle/schema.go @@ -14,7 +14,7 @@ var schemaCmd = &cobra.Command{ Short: "Generate JSON Schema for bundle configuration", RunE: func(cmd *cobra.Command, args []string) error { - docs, err := schema.GetBundleDocs() + docs, err := schema.BundleDocs(openapi) if err != nil { return err } @@ -22,15 +22,26 @@ var schemaCmd = &cobra.Command{ if err != nil { return err } - jsonSchema, err := json.MarshalIndent(schema, "", " ") + result, err := json.MarshalIndent(schema, "", " ") if err != nil { return err } - cmd.OutOrStdout().Write(jsonSchema) + if onlyDocs { + result, err = json.MarshalIndent(docs, "", " ") + if err != nil { + return err + } + } + cmd.OutOrStdout().Write(result) return nil }, } +var openapi string +var onlyDocs bool + func init() { AddCommand(schemaCmd) + schemaCmd.Flags().StringVar(&openapi, "openapi", "", "path to a databricks openapi spec") + schemaCmd.Flags().BoolVar(&onlyDocs, "only-docs", false, "only generate descriptions for the schema") } diff --git a/cmd/bundle/test.go b/cmd/bundle/test.go new file mode 100644 index 00000000..1e6c09aa --- /dev/null +++ b/cmd/bundle/test.go @@ -0,0 +1,32 @@ +package bundle + +import ( + "fmt" + + "github.com/databricks/bricks/cmd/root" + "github.com/spf13/cobra" +) + +var testCmd = &cobra.Command{ + Use: "test", + Short: "run tests for the project", + Long: `This is longer description of the command`, + + // We're not ready to expose this command until we specify its semantics. + Hidden: true, + + PreRunE: root.MustConfigureBundle, + RunE: func(cmd *cobra.Command, args []string) error { + return fmt.Errorf("TODO") + // results := project.RunPythonOnDev(cmd.Context(), `return 1`) + // if results.Failed() { + // return results.Err() + // } + // fmt.Fprintf(cmd.OutOrStdout(), "Success: %s", results.Text()) + // return nil + }, +} + +func init() { + AddCommand(testCmd) +} diff --git a/cmd/launch/launch.go b/cmd/launch/launch.go deleted file mode 100644 index 402a6963..00000000 --- a/cmd/launch/launch.go +++ /dev/null @@ -1,36 +0,0 @@ -package launch - -import ( - "fmt" - "log" - "os" - - "github.com/databricks/bricks/cmd/root" - "github.com/databricks/bricks/project" - "github.com/spf13/cobra" -) - -// launchCmd represents the launch command -var launchCmd = &cobra.Command{ - Use: "launch", - Short: "Launches a notebook on development cluster", - Long: `Reads a file and executes it on dev cluster`, - Args: cobra.ExactArgs(1), - Run: func(cmd *cobra.Command, args []string) { - contents, err := os.ReadFile(args[0]) - if err != nil { - log.Fatal(err) - } - results := project.RunPythonOnDev(cmd.Context(), string(contents)) - if results.Failed() { - log.Fatal(results.Error()) - } - fmt.Println(results.Text()) - }, -} - -func init() { - // TODO: detect if we can skip registering - // launch command for DBSQL projects - root.RootCmd.AddCommand(launchCmd) -} diff --git a/cmd/test/test.go b/cmd/test/test.go deleted file mode 100644 index 33a0a6e1..00000000 --- a/cmd/test/test.go +++ /dev/null @@ -1,37 +0,0 @@ -package test - -import ( - "log" - - "github.com/databricks/bricks/cmd/root" - "github.com/databricks/bricks/project" - "github.com/spf13/cobra" -) - -// testCmd represents the test command -var testCmd = &cobra.Command{ - Use: "test", - Short: "run tests for the project", - Long: `This is longer description of the command`, - Run: func(cmd *cobra.Command, args []string) { - results := project.RunPythonOnDev(cmd.Context(), `return 1`) - if results.Failed() { - log.Fatal(results.Error()) - } - log.Printf("Success: %s", results.Text()) - }, -} - -func init() { - root.RootCmd.AddCommand(testCmd) - - // Here you will define your flags and configuration settings. - - // Cobra supports Persistent Flags which will work for this command - // and all subcommands, e.g.: - // testCmd.PersistentFlags().String("foo", "", "A help for foo") - - // Cobra supports local flags which will only run when this command - // is called directly, e.g.: - // testCmd.Flags().BoolP("toggle", "t", false, "Help message for toggle") -} diff --git a/go.mod b/go.mod index 755f428b..a24657be 100644 --- a/go.mod +++ b/go.mod @@ -22,17 +22,18 @@ require ( github.com/google/uuid v1.3.0 github.com/hashicorp/go-version v1.6.0 github.com/hashicorp/hc-install v0.5.0 - github.com/hashicorp/terraform-exec v0.17.3 - github.com/hashicorp/terraform-json v0.15.0 + github.com/hashicorp/terraform-exec v0.18.1 + github.com/hashicorp/terraform-json v0.16.0 golang.org/x/exp v0.0.0-20221031165847-c99f073a8326 golang.org/x/sync v0.1.0 ) require ( + github.com/apparentlymart/go-textseg/v13 v13.0.0 // indirect github.com/hashicorp/go-cleanhttp v0.5.2 // indirect github.com/mattn/go-colorable v0.1.13 // indirect github.com/mattn/go-isatty v0.0.17 // indirect - github.com/zclconf/go-cty v1.11.0 // indirect + github.com/zclconf/go-cty v1.13.0 // indirect golang.org/x/crypto v0.5.0 // indirect ) @@ -61,5 +62,5 @@ require ( google.golang.org/grpc v1.53.0 // indirect google.golang.org/protobuf v1.28.1 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect - gopkg.in/yaml.v3 v3.0.1 + gopkg.in/yaml.v3 v3.0.1 // indirect ) diff --git a/go.sum b/go.sum index 4df836ab..71c1eb98 100644 --- a/go.sum +++ b/go.sum @@ -15,7 +15,7 @@ github.com/ProtonMail/go-crypto v0.0.0-20210428141323-04723f9f07d7/go.mod h1:z4/ github.com/acomagu/bufpipe v1.0.3 h1:fxAGrHZTgQ9w5QqVItgzwj235/uYZYgbXitB+dLupOk= github.com/acomagu/bufpipe v1.0.3/go.mod h1:mxdxdup/WdsKVreO5GpW4+M/1CE2sMG4jeGJ2sYmHc4= github.com/anmitsu/go-shlex v0.0.0-20161002113705-648efa622239/go.mod h1:2FmKhYUyUczH0OGQWaF5ceTx0UBShxjsH6f8oGKYe2c= -github.com/apparentlymart/go-textseg v1.0.0/go.mod h1:z96Txxhf3xSFMPmb5X/1W05FF/Nj9VFpLOpjS5yuumk= +github.com/apparentlymart/go-textseg/v13 v13.0.0 h1:Y+KvPE1NYz0xl601PVImeQfFyEy6iT90AvPUL1NNfNw= github.com/apparentlymart/go-textseg/v13 v13.0.0/go.mod h1:ZK2fH7c4NqDTLtiYLvIkEghdlcqw7yxLeM89kiTRPUo= github.com/armon/go-radix v0.0.0-20180808171621-7fddfc383310/go.mod h1:ufUuZ+zHj4x4TnLV4JWEpy2hxWSpsRywHrMgIH9cCH8= github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= @@ -33,8 +33,13 @@ github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDk github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc= github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= +<<<<<<< HEAD github.com/databricks/databricks-sdk-go v0.5.0 h1:u5tytS0+BHZXLS2cqg4gW+6LOGgfXOriBuT5bt+gTSk= github.com/databricks/databricks-sdk-go v0.5.0/go.mod h1:MTJHMjKBotE0IsyPbMcPvHb3JKknkRtOms/ET7F0rB8= +======= +github.com/databricks/databricks-sdk-go v0.4.1 h1:SX1Owt2XcY/NVt/LRbuO/0dtomEebfNa8yekQlQy4gM= +github.com/databricks/databricks-sdk-go v0.4.1/go.mod h1:MOtUlDw9bj/1PuYmFkNSx+29Hu1clrob8vQQ6Fsji8A= +>>>>>>> origin github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -63,11 +68,9 @@ github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfU github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY= github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc= github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= -github.com/golang/protobuf v1.1.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8= github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA= github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs= @@ -110,10 +113,10 @@ github.com/hashicorp/go-version v1.6.0/go.mod h1:fltr4n8CU8Ke44wwGCBoEymUuxUHl09 github.com/hashicorp/hc-install v0.5.0 h1:D9bl4KayIYKEeJ4vUDe9L5huqxZXczKaykSRcmQ0xY0= github.com/hashicorp/hc-install v0.5.0/go.mod h1:JyzMfbzfSBSjoDCRPna1vi/24BEDxFaCPfdHtM5SCdo= github.com/hashicorp/logutils v1.0.0/go.mod h1:QIAnNjmIWmVIIkWDTG1z5v++HQmx9WQRO+LraFDTW64= -github.com/hashicorp/terraform-exec v0.17.3 h1:MX14Kvnka/oWGmIkyuyvL6POx25ZmKrjlaclkx3eErU= -github.com/hashicorp/terraform-exec v0.17.3/go.mod h1:+NELG0EqQekJzhvikkeQsOAZpsw0cv/03rbeQJqscAI= -github.com/hashicorp/terraform-json v0.15.0 h1:/gIyNtR6SFw6h5yzlbDbACyGvIhKtQi8mTsbkNd79lE= -github.com/hashicorp/terraform-json v0.15.0/go.mod h1:+L1RNzjDU5leLFZkHTFTbJXaoqUC6TqXlFgDoOXrtvk= +github.com/hashicorp/terraform-exec v0.18.1 h1:LAbfDvNQU1l0NOQlTuudjczVhHj061fNX5H8XZxHlH4= +github.com/hashicorp/terraform-exec v0.18.1/go.mod h1:58wg4IeuAJ6LVsLUeD2DWZZoc/bYi6dzhLHzxM41980= +github.com/hashicorp/terraform-json v0.16.0 h1:UKkeWRWb23do5LNAFlh/K3N0ymn1qTOO8c+85Albo3s= +github.com/hashicorp/terraform-json v0.16.0/go.mod h1:v0Ufk9jJnk6tcIZvScHvetlKfiNTC+WS21mnXIlc0B0= github.com/huandu/xstrings v1.3.1/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/huandu/xstrings v1.3.2/go.mod h1:y5/lhBue+AyNmUVz9RLU9xbLR0o4KIIExikq4ovT0aE= github.com/imdario/mergo v0.3.11/go.mod h1:jmQim1M+e3UYxmgPu/WyfjB3N3VflVyUjjjwH0dnCYA= @@ -135,7 +138,6 @@ github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= -github.com/kylelemons/godebug v0.0.0-20170820004349-d65d576e9348/go.mod h1:B69LEHPfb2qLo0BaaOLcbitczOKLWTsrBG9LczfCD4k= github.com/manifoldco/promptui v0.9.0 h1:3V4HzJk1TtXW1MTZMP7mdlwbBpIinw3HztaIlYthEiA= github.com/manifoldco/promptui v0.9.0/go.mod h1:ka04sppxSGFAtxX0qhlYQjISsg9mR4GWtQEhdbn6Pgg= github.com/matryer/is v1.2.0/go.mod h1:2fLPjFQM9rhQ15aVEtbuwhJinnOqrmgXPNdZsdwlWXA= @@ -148,11 +150,9 @@ github.com/mattn/go-isatty v0.0.17 h1:BTarxUcIeDqL27Mc+vyvdWYSL28zpIhv3RoTdsLMPn github.com/mattn/go-isatty v0.0.17/go.mod h1:kYGgaQfpe5nmfYZH+SKPsOc2e4SrIfOl2e/yFXSvRLM= github.com/mitchellh/cli v1.1.5/go.mod h1:v8+iFts2sPIKUV1ltktPXMCC8fumSKFItNcD2cLtRR4= github.com/mitchellh/copystructure v1.0.0/go.mod h1:SNtv71yrdKgLRyLFxmLdkAbkKEFWgYaq1OVrnRcwhnw= -github.com/mitchellh/copystructure v1.2.0/go.mod h1:qLl+cE2AmVv+CoeAwDPye/v+N2HKCj9FbZEVFJRxO9s= github.com/mitchellh/go-homedir v1.1.0 h1:lukF9ziXFxDFPkA1vsr5zpc1XuPDn/wFntq5mG+4E0Y= github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0= github.com/mitchellh/reflectwalk v1.0.0/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= -github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8 h1:KoWmjvw+nsYOo29YJK9vDA65RGE3NrOnUtO7a+RF9HU= github.com/pkg/browser v0.0.0-20210911075715-681adbf594b8/go.mod h1:HKlIX3XHQyzLZPlr7++PzdhaXEj94dEiJgZDTsxEqUI= @@ -165,7 +165,6 @@ github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1: github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06 h1:OkMGxebDjyw0ULyrTYWeN0UNCCkmCWfjPnIA2W6oviI= github.com/sabhiram/go-gitignore v0.0.0-20210923224102-525f6e181f06/go.mod h1:+ePHsJ1keEjQtpvf9HHw0f4ZeJ0TLRsxhunSI2hYJSs= -github.com/sebdah/goldie v1.0.0/go.mod h1:jXP4hmWywNEwZzhMuv2ccnqTSFpuq8iyQhtQdkkZBH4= github.com/sergi/go-diff v1.1.0/go.mod h1:STckp+ISIX8hZLjrqAeVduY0gWCT9IjLuqbuNXdaHfM= github.com/sergi/go-diff v1.2.0 h1:XU+rvMAioB0UC3q1MFrIQy4Vo5/4VsRDQQXHsEya6xQ= github.com/shopspring/decimal v1.2.0/go.mod h1:DKyhrW/HYNuLGql+MJL6WCR6knT2jwCFRcu2hWCYk4o= @@ -180,7 +179,6 @@ github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+ github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= -github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA= github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= @@ -190,19 +188,19 @@ github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8= github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= +<<<<<<< HEAD github.com/vmihailenco/msgpack v3.3.3+incompatible/go.mod h1:fy3FlTQTDXWkZ7Bh6AcGMlsjHatGryHQYUTf1ShIgkk= github.com/vmihailenco/msgpack/v4 v4.3.12/go.mod h1:gborTTJjAo/GWTqqRjrLCn9pgNN+NXzzngzBKDPIqw4= github.com/vmihailenco/tagparser v0.1.1/go.mod h1:OeAg3pn3UbLjkWt+rN9oFYB6u/cQgqMEUPoW2WPyhdI= +======= +>>>>>>> origin github.com/whilp/git-urls v1.0.0 h1:95f6UMWN5FKW71ECsXRUd3FVYiXdrE7aX4NZKcPmIjU= github.com/whilp/git-urls v1.0.0/go.mod h1:J16SAmobsqc3Qcy98brfl5f5+e0clUvg1krgwk/qCfE= github.com/xanzy/ssh-agent v0.3.0 h1:wUMzuKtKilRgBAD1sUb8gOwwRr2FGoBVumcjoOACClI= github.com/xanzy/ssh-agent v0.3.0/go.mod h1:3s9xbODqPuuhK9JV1R321M/FlMZSBvE5aY6eAcqrDh0= github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY= -github.com/zclconf/go-cty v1.2.0/go.mod h1:hOPWgoHbaTUnI5k4D2ld+GRpFJSCe6bCM7m1q/N4PQ8= -github.com/zclconf/go-cty v1.10.0/go.mod h1:vVKLxnk3puL4qRAv72AO+W99LUD4da90g3uUAzyuvAk= -github.com/zclconf/go-cty v1.11.0 h1:726SxLdi2SDnjY+BStqB9J1hNp4+2WlzyXLuimibIe0= -github.com/zclconf/go-cty v1.11.0/go.mod h1:s9IfD1LK5ccNMSWCVFCE2rJfHiZgi7JijgeWIMfhLvA= -github.com/zclconf/go-cty-debug v0.0.0-20191215020915-b22d67c1ba0b/go.mod h1:ZRKQfBXbGkpdV6QMzT3rU1kSTAnfu1dO8dPKjYprgj8= +github.com/zclconf/go-cty v1.13.0 h1:It5dfKTTZHe9aeppbNOda3mN7Ag7sg6QkBNm6TkyFa0= +github.com/zclconf/go-cty v1.13.0/go.mod h1:YKQzy/7pZ7iq2jNFzy5go57xdxdWoLLpaEp4u238AE0= go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0= go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo= golang.org/x/crypto v0.0.0-20190219172222-a4c6cb3142f2/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= @@ -226,14 +224,12 @@ golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/mod v0.9.0 h1:KENHtAZL2y3NLMYZeHY9DW8HW8V+kQyJsY/V9JlKvCs= golang.org/x/mod v0.9.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs= golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= -golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= -golang.org/x/net v0.0.0-20200301022130-244492dfa37a/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg= golang.org/x/net v0.0.0-20210326060303-6b1517762897/go.mod h1:uSPa2vr4CLtc/ILN5odXGNXS6mhrKVzTaCXzk9m6W3k= @@ -277,7 +273,6 @@ golang.org/x/term v0.4.0/go.mod h1:9P2UbLfCdcvo3p/nzKvsmas4TnlujnuoV9hGgYzW1lQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= -golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ= golang.org/x/text v0.6.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8= golang.org/x/text v0.8.0 h1:57P1ETyNKtuIjB4SRd15iJxuhj8Gc416Y78H3qgMh68= @@ -293,18 +288,27 @@ golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtn golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +<<<<<<< HEAD google.golang.org/api v0.112.0 h1:iDmzvZ4C086R3+en4nSyIf07HlQKMOX1Xx2dmia/+KQ= google.golang.org/api v0.112.0/go.mod h1:737UfWHNsOq4F3REUTmb+GN9pugkgNLCayLTfoIKpPc= +======= +google.golang.org/api v0.111.0 h1:bwKi+z2BsdwYFRKrqwutM+axAlYLz83gt5pDSXCJT+0= +google.golang.org/api v0.111.0/go.mod h1:qtFHvU9mhgTJegR31csQ+rwxyUTHOKFqCKWp1J0fdw0= +>>>>>>> origin google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= -google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc= google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo= +<<<<<<< HEAD google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488 h1:QQF+HdiI4iocoxUjjpLgvTYDHKm99C/VtTBFnfiCJos= google.golang.org/genproto v0.0.0-20230303212802-e74f57abe488/go.mod h1:TvhZT5f700eVlTNwND1xoEZQeWTB2RY/65kplwl/bFA= +======= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923 h1:znp6mq/drrY+6khTAlJUDNFFcDGV2ENLYKpMq8SyCds= +google.golang.org/genproto v0.0.0-20230223222841-637eb2293923/go.mod h1:3Dl5ZL0q0isWJt+FVcfpQyirqemEuLAK/iFvg1UP1Hw= +>>>>>>> origin google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c= google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg= google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY= @@ -326,7 +330,6 @@ google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQ google.golang.org/protobuf v1.28.1 h1:d0NfwRgPtno5B1Wa6L2DAG+KivqkdutMf1UhdNx175w= google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= -gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20200227125254-8fa46927fb4f/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= diff --git a/libs/sync/snapshot.go b/libs/sync/snapshot.go index 3fd80368..e06e81c9 100644 --- a/libs/sync/snapshot.go +++ b/libs/sync/snapshot.go @@ -165,13 +165,17 @@ func loadOrNewSnapshot(opts *SyncOptions) (*Snapshot, error) { } func (s *Snapshot) diff(all []fileset.File) (change diff, err error) { - currentFilenames := map[string]bool{} lastModifiedTimes := s.LastUpdatedTimes remoteToLocalNames := s.RemoteToLocalNames localToRemoteNames := s.LocalToRemoteNames + + // set of files currently present in the local file system and tracked by git + localFileSet := map[string]struct{}{} + for _, f := range all { + localFileSet[f.Relative] = struct{}{} + } + for _, f := range all { - // create set of current files to figure out if removals are needed - currentFilenames[f.Relative] = true // get current modified timestamp modified := f.Modified() lastSeenModified, seen := lastModifiedTimes[f.Relative] @@ -207,11 +211,13 @@ func (s *Snapshot) diff(all []fileset.File) (change diff, err error) { change.delete = append(change.delete, oldRemoteName) delete(remoteToLocalNames, oldRemoteName) } + // We cannot allow two local files in the project to point to the same // remote path - oldLocalName, ok := remoteToLocalNames[remoteName] - if ok && oldLocalName != f.Relative { - return change, fmt.Errorf("both %s and %s point to the same remote file location %s. Please remove one of them from your local project", oldLocalName, f.Relative, remoteName) + prevLocalName, ok := remoteToLocalNames[remoteName] + _, prevLocalFileExists := localFileSet[prevLocalName] + if ok && prevLocalName != f.Relative && prevLocalFileExists { + return change, fmt.Errorf("both %s and %s point to the same remote file location %s. Please remove one of them from your local project", prevLocalName, f.Relative, remoteName) } localToRemoteNames[f.Relative] = remoteName remoteToLocalNames[remoteName] = f.Relative @@ -220,7 +226,7 @@ func (s *Snapshot) diff(all []fileset.File) (change diff, err error) { // figure out files in the snapshot.lastModifiedTimes, but not on local // filesystem. These will be deleted for localName := range lastModifiedTimes { - _, exists := currentFilenames[localName] + _, exists := localFileSet[localName] if exists { continue } diff --git a/libs/sync/snapshot_test.go b/libs/sync/snapshot_test.go index 4995beca..1252dea4 100644 --- a/libs/sync/snapshot_test.go +++ b/libs/sync/snapshot_test.go @@ -249,6 +249,43 @@ func TestErrorWhenIdenticalRemoteName(t *testing.T) { assert.ErrorContains(t, err, "both foo and foo.py point to the same remote file location foo. Please remove one of them from your local project") } +func TestNoErrorRenameWithIdenticalRemoteName(t *testing.T) { + // Create temp project dir + projectDir := t.TempDir() + fileSet, err := git.NewFileSet(projectDir) + require.NoError(t, err) + state := Snapshot{ + LastUpdatedTimes: make(map[string]time.Time), + LocalToRemoteNames: make(map[string]string), + RemoteToLocalNames: make(map[string]string), + } + + // upload should work since they point to different destinations + pythonFoo := testfile.CreateFile(t, filepath.Join(projectDir, "foo.py")) + defer pythonFoo.Close(t) + pythonFoo.Overwrite(t, "# Databricks notebook source\n") + files, err := fileSet.All() + assert.NoError(t, err) + change, err := state.diff(files) + assert.NoError(t, err) + assert.Len(t, change.delete, 0) + assert.Len(t, change.put, 1) + assert.Contains(t, change.put, "foo.py") + + pythonFoo.Remove(t) + sqlFoo := testfile.CreateFile(t, filepath.Join(projectDir, "foo.sql")) + defer sqlFoo.Close(t) + sqlFoo.Overwrite(t, "-- Databricks notebook source\n") + files, err = fileSet.All() + assert.NoError(t, err) + change, err = state.diff(files) + assert.NoError(t, err) + assert.Len(t, change.delete, 1) + assert.Len(t, change.put, 1) + assert.Contains(t, change.put, "foo.sql") + assert.Contains(t, change.delete, "foo") +} + func defaultOptions(t *testing.T) *SyncOptions { return &SyncOptions{ Host: "www.foobar.com", diff --git a/libs/sync/sync.go b/libs/sync/sync.go index b288685b..452cebad 100644 --- a/libs/sync/sync.go +++ b/libs/sync/sync.go @@ -118,8 +118,6 @@ func (s *Sync) notifyComplete(ctx context.Context, d diff) { } func (s *Sync) RunOnce(ctx context.Context) error { - applyDiff := syncCallback(ctx, s) - // tradeoff: doing portable monitoring only due to macOS max descriptor manual ulimit setting requirement // https://github.com/gorakhargosh/watchdog/blob/master/src/watchdog/observers/kqueue.py#L394-L418 all, err := s.fileSet.All() @@ -139,7 +137,7 @@ func (s *Sync) RunOnce(ctx context.Context) error { return nil } - err = applyDiff(change) + err = s.applyDiff(ctx, change) if err != nil { return err } diff --git a/libs/sync/watchdog.go b/libs/sync/watchdog.go index 257b5442..3e7acccc 100644 --- a/libs/sync/watchdog.go +++ b/libs/sync/watchdog.go @@ -6,53 +6,63 @@ import ( "golang.org/x/sync/errgroup" ) -// See https://docs.databricks.com/resources/limits.html#limits-api-rate-limits for per api -// rate limits +// Maximum number of concurrent requests during sync. const MaxRequestsInFlight = 20 -func syncCallback(ctx context.Context, s *Sync) func(localDiff diff) error { - return func(d diff) error { - // Abstraction over wait groups which allows you to get the errors - // returned in goroutines - var g errgroup.Group +// Perform a DELETE of the specified remote path. +func (s *Sync) applyDelete(ctx context.Context, group *errgroup.Group, remoteName string) { + // Return early if the context has already been cancelled. + select { + case <-ctx.Done(): + return + default: + // Proceed. + } - // Allow MaxRequestLimit maxiumum concurrent api calls - g.SetLimit(MaxRequestsInFlight) - - for _, remoteName := range d.delete { - // Copy of remoteName created to make this safe for concurrent use. - // directly using remoteName can cause race conditions since the loop - // might iterate over to the next remoteName before the go routine function - // is evaluated - remoteNameCopy := remoteName - g.Go(func() error { - s.notifyProgress(ctx, EventActionDelete, remoteNameCopy, 0.0) - err := s.repoFiles.DeleteFile(ctx, remoteNameCopy) - if err != nil { - return err - } - s.notifyProgress(ctx, EventActionDelete, remoteNameCopy, 1.0) - return nil - }) - } - for _, localRelativePath := range d.put { - // Copy of localName created to make this safe for concurrent use. - localRelativePathCopy := localRelativePath - g.Go(func() error { - s.notifyProgress(ctx, EventActionPut, localRelativePathCopy, 0.0) - err := s.repoFiles.PutFile(ctx, localRelativePathCopy) - if err != nil { - return err - } - s.notifyProgress(ctx, EventActionPut, localRelativePathCopy, 1.0) - return nil - }) - } - // wait for goroutines to finish and return first non-nil error return - // if any - if err := g.Wait(); err != nil { + group.Go(func() error { + s.notifyProgress(ctx, EventActionDelete, remoteName, 0.0) + err := s.repoFiles.DeleteFile(ctx, remoteName) + if err != nil { return err } + s.notifyProgress(ctx, EventActionDelete, remoteName, 1.0) return nil - } + }) +} + +// Perform a PUT of the specified local path. +func (s *Sync) applyPut(ctx context.Context, group *errgroup.Group, localName string) { + // Return early if the context has already been cancelled. + select { + case <-ctx.Done(): + return + default: + // Proceed. + } + + group.Go(func() error { + s.notifyProgress(ctx, EventActionPut, localName, 0.0) + err := s.repoFiles.PutFile(ctx, localName) + if err != nil { + return err + } + s.notifyProgress(ctx, EventActionPut, localName, 1.0) + return nil + }) +} + +func (s *Sync) applyDiff(ctx context.Context, d diff) error { + group, ctx := errgroup.WithContext(ctx) + group.SetLimit(MaxRequestsInFlight) + + for _, remoteName := range d.delete { + s.applyDelete(ctx, group, remoteName) + } + + for _, localName := range d.put { + s.applyPut(ctx, group, localName) + } + + // Wait for goroutines to finish and return first non-nil error return if any. + return group.Wait() } diff --git a/main.go b/main.go index a3131c4d..b0fc5d7e 100644 --- a/main.go +++ b/main.go @@ -9,10 +9,8 @@ import ( _ "github.com/databricks/bricks/cmd/configure" _ "github.com/databricks/bricks/cmd/fs" _ "github.com/databricks/bricks/cmd/init" - _ "github.com/databricks/bricks/cmd/launch" "github.com/databricks/bricks/cmd/root" _ "github.com/databricks/bricks/cmd/sync" - _ "github.com/databricks/bricks/cmd/test" _ "github.com/databricks/bricks/cmd/version" )