mirror of https://github.com/databricks/cli.git
added ml docs
This commit is contained in:
parent
ad3b3d7dcc
commit
be1c422ce6
|
@ -170,7 +170,7 @@
|
|||
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether this schedule is paused or not."
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -240,7 +240,7 @@
|
|||
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
|
||||
},
|
||||
"new_cluster": {
|
||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
||||
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
|
||||
"properties": {
|
||||
"autoscale": {
|
||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||
|
@ -594,7 +594,7 @@
|
|||
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether this schedule is paused or not."
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
},
|
||||
"quartz_cron_expression": {
|
||||
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
|
||||
|
@ -767,7 +767,7 @@
|
|||
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
|
||||
},
|
||||
"new_cluster": {
|
||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
||||
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
|
||||
"properties": {
|
||||
"autoscale": {
|
||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||
|
@ -1065,7 +1065,7 @@
|
|||
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1153,7 +1153,7 @@
|
|||
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1281,7 +1281,7 @@
|
|||
}
|
||||
},
|
||||
"pause_status": {
|
||||
"description": "Indicate whether this schedule is paused or not."
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1327,79 +1327,79 @@
|
|||
}
|
||||
},
|
||||
"models": {
|
||||
"description": "",
|
||||
"description": "List of MLflow models",
|
||||
"additionalproperties": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"creation_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when this `registered_model` was created."
|
||||
},
|
||||
"description": {
|
||||
"description": ""
|
||||
"description": "Description of this `registered_model`."
|
||||
},
|
||||
"last_updated_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when metadata for this `registered_model` was last updated."
|
||||
},
|
||||
"latest_versions": {
|
||||
"description": "",
|
||||
"description": "Collection of latest model versions for each stage.\nOnly contains models with current `READY` status.",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"creation_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when this `model_version` was created."
|
||||
},
|
||||
"current_stage": {
|
||||
"description": ""
|
||||
"description": "Current stage for this `model_version`."
|
||||
},
|
||||
"description": {
|
||||
"description": ""
|
||||
"description": "Description of this `model_version`."
|
||||
},
|
||||
"last_updated_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when metadata for this `model_version` was last updated."
|
||||
},
|
||||
"name": {
|
||||
"description": ""
|
||||
"description": "Unique name of the model"
|
||||
},
|
||||
"run_id": {
|
||||
"description": ""
|
||||
"description": "MLflow run ID used when creating `model_version`, if `source` was generated by an\nexperiment run stored in MLflow tracking server."
|
||||
},
|
||||
"run_link": {
|
||||
"description": ""
|
||||
"description": "Run Link: Direct link to the run that generated this version"
|
||||
},
|
||||
"source": {
|
||||
"description": ""
|
||||
"description": "URI indicating the location of the source model artifacts, used when creating `model_version`"
|
||||
},
|
||||
"status": {
|
||||
"description": ""
|
||||
"description": "Current status of `model_version`"
|
||||
},
|
||||
"status_message": {
|
||||
"description": ""
|
||||
"description": "Details on current `status`, if it is pending or failed."
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"description": "Tags: Additional metadata key-value pairs for this `model_version`.",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": ""
|
||||
"description": "The tag key."
|
||||
},
|
||||
"value": {
|
||||
"description": ""
|
||||
"description": "The tag value."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"user_id": {
|
||||
"description": ""
|
||||
"description": "User that created this `model_version`."
|
||||
},
|
||||
"version": {
|
||||
"description": ""
|
||||
"description": "Model's version number."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": ""
|
||||
"description": "Unique name for the model."
|
||||
},
|
||||
"permissions": {
|
||||
"description": "",
|
||||
|
@ -1422,21 +1422,21 @@
|
|||
}
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"description": "Tags: Additional metadata key-value pairs for this `registered_model`.",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": ""
|
||||
"description": "The tag key."
|
||||
},
|
||||
"value": {
|
||||
"description": ""
|
||||
"description": "The tag value."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"user_id": {
|
||||
"description": ""
|
||||
"description": "User that created this `registered_model`"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -1926,7 +1926,7 @@
|
|||
"description": "An optional continuous property for this job. The continuous property will ensure that there is always one run executing. Only one of `schedule` and `continuous` can be used.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether this schedule is paused or not."
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -1996,7 +1996,7 @@
|
|||
"description": "A unique name for the job cluster. This field is required and must be unique within the job.\n`JobTaskSettings` may refer to this field to determine which cluster to launch for the task execution."
|
||||
},
|
||||
"new_cluster": {
|
||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
||||
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
|
||||
"properties": {
|
||||
"autoscale": {
|
||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||
|
@ -2350,7 +2350,7 @@
|
|||
"description": "An optional periodic schedule for this job. The default behavior is that the job only runs when triggered by clicking “Run Now” in the Jobs UI or sending an API request to `runNow`.",
|
||||
"properties": {
|
||||
"pause_status": {
|
||||
"description": "Indicate whether this schedule is paused or not."
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
},
|
||||
"quartz_cron_expression": {
|
||||
"description": "A Cron expression using Quartz syntax that describes the schedule for a job.\nSee [Cron Trigger](http://www.quartz-scheduler.org/documentation/quartz-2.3.0/tutorials/crontrigger.html)\nfor details. This field is required.\"\n"
|
||||
|
@ -2523,7 +2523,7 @@
|
|||
"description": "An optional minimal interval in milliseconds between the start of the failed run and the subsequent retry run. The default behavior is that unsuccessful runs are immediately retried."
|
||||
},
|
||||
"new_cluster": {
|
||||
"description": "If new_cluster, a description of a cluster that is created for each task.",
|
||||
"description": "If new_cluster, a description of a cluster that is created for only for this task.",
|
||||
"properties": {
|
||||
"autoscale": {
|
||||
"description": "Parameters needed in order to automatically scale clusters up and down based on load.\nNote: autoscaling works best with DB runtime versions 3.0 or later.",
|
||||
|
@ -2821,7 +2821,7 @@
|
|||
"description": "The path of the notebook to be run in the Databricks workspace or remote repository.\nFor notebooks stored in the Databricks workspace, the path must be absolute and begin with a slash.\nFor notebooks stored in a remote repository, the path must be relative. This field is required.\n"
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -2909,7 +2909,7 @@
|
|||
"description": "The Python file to be executed. Cloud file URIs (such as dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For python files stored in the Databricks workspace, the path must be absolute and begin with `/`. For files stored in a remote repository, the path must be relative. This field is required."
|
||||
},
|
||||
"source": {
|
||||
"description": "Optional location type of the notebook. When set to `WORKSPACE`, the notebook will be retrieved\nfrom the local \u003cDatabricks\u003e workspace. When set to `GIT`, the notebook will be retrieved from a Git repository\ndefined in `git_source`. If the value is empty, the task will use `GIT` if `git_source` is defined and `WORKSPACE` otherwise.\n\n* `WORKSPACE`: Notebook is located in \u003cDatabricks\u003e workspace.\n* `GIT`: Notebook is located in cloud Git provider.\n"
|
||||
"description": "Optional location type of the Python file. When set to `WORKSPACE` or not specified, the file will be retrieved\nfrom the local \u003cDatabricks\u003e workspace or cloud location (if the `python_file` has a URI format). When set to `GIT`,\nthe Python file will be retrieved from a Git repository defined in `git_source`.\n\n* `WORKSPACE`: The Python file is located in a \u003cDatabricks\u003e workspace or at a cloud filesystem URI.\n* `GIT`: The Python file is located in a remote Git repository.\n"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -3037,7 +3037,7 @@
|
|||
}
|
||||
},
|
||||
"pause_status": {
|
||||
"description": "Indicate whether this schedule is paused or not."
|
||||
"description": "Indicate whether the continuous execution of the job is paused or not. Defaults to UNPAUSED."
|
||||
}
|
||||
}
|
||||
},
|
||||
|
@ -3083,79 +3083,79 @@
|
|||
}
|
||||
},
|
||||
"models": {
|
||||
"description": "",
|
||||
"description": "List of MLflow models",
|
||||
"additionalproperties": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"creation_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when this `registered_model` was created."
|
||||
},
|
||||
"description": {
|
||||
"description": ""
|
||||
"description": "Description of this `registered_model`."
|
||||
},
|
||||
"last_updated_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when metadata for this `registered_model` was last updated."
|
||||
},
|
||||
"latest_versions": {
|
||||
"description": "",
|
||||
"description": "Collection of latest model versions for each stage.\nOnly contains models with current `READY` status.",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"creation_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when this `model_version` was created."
|
||||
},
|
||||
"current_stage": {
|
||||
"description": ""
|
||||
"description": "Current stage for this `model_version`."
|
||||
},
|
||||
"description": {
|
||||
"description": ""
|
||||
"description": "Description of this `model_version`."
|
||||
},
|
||||
"last_updated_timestamp": {
|
||||
"description": ""
|
||||
"description": "Timestamp recorded when metadata for this `model_version` was last updated."
|
||||
},
|
||||
"name": {
|
||||
"description": ""
|
||||
"description": "Unique name of the model"
|
||||
},
|
||||
"run_id": {
|
||||
"description": ""
|
||||
"description": "MLflow run ID used when creating `model_version`, if `source` was generated by an\nexperiment run stored in MLflow tracking server."
|
||||
},
|
||||
"run_link": {
|
||||
"description": ""
|
||||
"description": "Run Link: Direct link to the run that generated this version"
|
||||
},
|
||||
"source": {
|
||||
"description": ""
|
||||
"description": "URI indicating the location of the source model artifacts, used when creating `model_version`"
|
||||
},
|
||||
"status": {
|
||||
"description": ""
|
||||
"description": "Current status of `model_version`"
|
||||
},
|
||||
"status_message": {
|
||||
"description": ""
|
||||
"description": "Details on current `status`, if it is pending or failed."
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"description": "Tags: Additional metadata key-value pairs for this `model_version`.",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": ""
|
||||
"description": "The tag key."
|
||||
},
|
||||
"value": {
|
||||
"description": ""
|
||||
"description": "The tag value."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"user_id": {
|
||||
"description": ""
|
||||
"description": "User that created this `model_version`."
|
||||
},
|
||||
"version": {
|
||||
"description": ""
|
||||
"description": "Model's version number."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"name": {
|
||||
"description": ""
|
||||
"description": "Unique name for the model."
|
||||
},
|
||||
"permissions": {
|
||||
"description": "",
|
||||
|
@ -3178,21 +3178,21 @@
|
|||
}
|
||||
},
|
||||
"tags": {
|
||||
"description": "",
|
||||
"description": "Tags: Additional metadata key-value pairs for this `registered_model`.",
|
||||
"items": {
|
||||
"description": "",
|
||||
"properties": {
|
||||
"key": {
|
||||
"description": ""
|
||||
"description": "The tag key."
|
||||
},
|
||||
"value": {
|
||||
"description": ""
|
||||
"description": "The tag value."
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"user_id": {
|
||||
"description": ""
|
||||
"description": "User that created this `registered_model`"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -196,6 +196,19 @@ func (reader *OpenapiReader) experimentsDocs() (*Docs, error) {
|
|||
return experimentsDocs, nil
|
||||
}
|
||||
|
||||
func (reader *OpenapiReader) modelsDocs() (*Docs, error) {
|
||||
modelSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "ml.Model")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modelDocs := schemaToDocs(modelSpecSchema)
|
||||
modelsDocs := &Docs{
|
||||
Description: "List of MLflow models",
|
||||
AdditionalProperties: modelDocs,
|
||||
}
|
||||
return modelsDocs, nil
|
||||
}
|
||||
|
||||
func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) {
|
||||
jobsDocs, err := reader.jobsDocs()
|
||||
if err != nil {
|
||||
|
@ -209,6 +222,10 @@ func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
modelsDocs, err := reader.modelsDocs()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Docs{
|
||||
Description: "Collection of Databricks resources to deploy.",
|
||||
|
@ -216,6 +233,7 @@ func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) {
|
|||
"jobs": jobsDocs,
|
||||
"pipelines": pipelinesDocs,
|
||||
"experiments": experimentsDocs,
|
||||
"models": modelsDocs,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue