Compare commits

...

7 Commits

Author SHA1 Message Date
Shreyas Goenka d07192fe03
merge 2024-08-27 11:54:41 +02:00
Shreyas Goenka 2dea889621
delete the old schema code 2024-08-27 11:53:29 +02:00
Shreyas Goenka 870e41912a
from_type method is mostly complete 2024-08-27 11:42:33 +02:00
dependabot[bot] 056d203236
Bump github.com/databricks/databricks-sdk-go from 0.44.0 to 0.45.0 (#1719)
Bumps
[github.com/databricks/databricks-sdk-go](https://github.com/databricks/databricks-sdk-go)
from 0.44.0 to 0.45.0.
<details>
<summary>Release notes</summary>
<p><em>Sourced from <a
href="https://github.com/databricks/databricks-sdk-go/releases">github.com/databricks/databricks-sdk-go's
releases</a>.</em></p>
<blockquote>
<h2>v0.45.0</h2>
<h2>0.45.0</h2>
<h3>Bug Fixes</h3>
<ul>
<li>Add INVALID_STATE to error code mapping (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1014">#1014</a>).</li>
<li>Do not specify <code>--tenant</code> flag when fetching managed
identity access token from the CLI (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1021">#1021</a>).</li>
</ul>
<h3>Internal Changes</h3>
<ul>
<li>Add terraform aliases to Entity (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1017">#1017</a>).</li>
<li>Added Service.NamedIdMap (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1016">#1016</a>).</li>
<li>Fix billing test for budget configuration update (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1019">#1019</a>).</li>
</ul>
<h3>API Changes:</h3>
<ul>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#PolicyComplianceForClustersAPI">w.PolicyComplianceForClusters</a>
workspace-level service.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PolicyComplianceForJobsAPI">w.PolicyComplianceForJobs</a>
workspace-level service.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ResourceQuotasAPI">w.ResourceQuotas</a>
workspace-level service.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#GetQuotaRequest">catalog.GetQuotaRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#GetQuotaResponse">catalog.GetQuotaResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ListQuotasRequest">catalog.ListQuotasRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ListQuotasResponse">catalog.ListQuotasResponse</a>
and <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#QuotaInfo">catalog.QuotaInfo</a>.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ClusterCompliance">compute.ClusterCompliance</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ClusterSettingsChange">compute.ClusterSettingsChange</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EnforceClusterComplianceRequest">compute.EnforceClusterComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EnforceClusterComplianceResponse">compute.EnforceClusterComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetClusterComplianceRequest">compute.GetClusterComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetClusterComplianceResponse">compute.GetClusterComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ListClusterCompliancesRequest">compute.ListClusterCompliancesRequest</a>
and <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ListClusterCompliancesResponse">compute.ListClusterCompliancesResponse</a>.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#EnforcePolicyComplianceForJobResponseJobClusterSettingsChange">jobs.EnforcePolicyComplianceForJobResponseJobClusterSettingsChange</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#EnforcePolicyComplianceRequest">jobs.EnforcePolicyComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#EnforcePolicyComplianceResponse">jobs.EnforcePolicyComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#GetPolicyComplianceRequest">jobs.GetPolicyComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#GetPolicyComplianceResponse">jobs.GetPolicyComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#JobCompliance">jobs.JobCompliance</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#ListJobComplianceForPolicyResponse">jobs.ListJobComplianceForPolicyResponse</a>
and <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#ListJobComplianceRequest">jobs.ListJobComplianceRequest</a>.</li>
<li>Added <code>Fallback</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CreateExternalLocation">catalog.CreateExternalLocation</a>.</li>
<li>Added <code>Fallback</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ExternalLocationInfo">catalog.ExternalLocationInfo</a>.</li>
<li>Added <code>Fallback</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#UpdateExternalLocation">catalog.UpdateExternalLocation</a>.</li>
<li>Added <code>JobRunId</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseRun">jobs.BaseRun</a>.</li>
<li>Added <code>JobRunId</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run">jobs.Run</a>.</li>
<li>Added <code>IncludeMetrics</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#ListQueryHistoryRequest">sql.ListQueryHistoryRequest</a>.</li>
<li>Added <code>StatementIds</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryFilter">sql.QueryFilter</a>.</li>
<li>Removed <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#ContextFilter">sql.ContextFilter</a>.</li>
<li>Removed <code>ContextFilter</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryFilter">sql.QueryFilter</a>.</li>
<li>Removed <code>PipelineId</code> and <code>PipelineUpdateId</code>
fields for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QuerySource">sql.QuerySource</a>.</li>
</ul>
<p>OpenAPI SHA: 3eae49b444cac5a0118a3503e5b7ecef7f96527a, Date:
2024-08-21</p>
</blockquote>
</details>
<details>
<summary>Changelog</summary>
<p><em>Sourced from <a
href="https://github.com/databricks/databricks-sdk-go/blob/main/CHANGELOG.md">github.com/databricks/databricks-sdk-go's
changelog</a>.</em></p>
<blockquote>
<h2>[Release] Release v0.45.0</h2>
<h3>Bug Fixes</h3>
<ul>
<li>Add INVALID_STATE to error code mapping (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1014">#1014</a>).</li>
<li>Do not specify <code>--tenant</code> flag when fetching managed
identity access token from the CLI (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1021">#1021</a>).</li>
</ul>
<h3>Internal Changes</h3>
<ul>
<li>Add terraform aliases to Entity (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1017">#1017</a>).</li>
<li>Added Service.NamedIdMap (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1016">#1016</a>).</li>
<li>Fix billing test for budget configuration update (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/pull/1019">#1019</a>).</li>
</ul>
<h3>API Changes:</h3>
<ul>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#PolicyComplianceForClustersAPI">w.PolicyComplianceForClusters</a>
workspace-level service.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PolicyComplianceForJobsAPI">w.PolicyComplianceForJobs</a>
workspace-level service.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ResourceQuotasAPI">w.ResourceQuotas</a>
workspace-level service.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#GetQuotaRequest">catalog.GetQuotaRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#GetQuotaResponse">catalog.GetQuotaResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ListQuotasRequest">catalog.ListQuotasRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ListQuotasResponse">catalog.ListQuotasResponse</a>
and <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#QuotaInfo">catalog.QuotaInfo</a>.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ClusterCompliance">compute.ClusterCompliance</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ClusterSettingsChange">compute.ClusterSettingsChange</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EnforceClusterComplianceRequest">compute.EnforceClusterComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EnforceClusterComplianceResponse">compute.EnforceClusterComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetClusterComplianceRequest">compute.GetClusterComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetClusterComplianceResponse">compute.GetClusterComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ListClusterCompliancesRequest">compute.ListClusterCompliancesRequest</a>
and <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ListClusterCompliancesResponse">compute.ListClusterCompliancesResponse</a>.</li>
<li>Added <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#EnforcePolicyComplianceForJobResponseJobClusterSettingsChange">jobs.EnforcePolicyComplianceForJobResponseJobClusterSettingsChange</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#EnforcePolicyComplianceRequest">jobs.EnforcePolicyComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#EnforcePolicyComplianceResponse">jobs.EnforcePolicyComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#GetPolicyComplianceRequest">jobs.GetPolicyComplianceRequest</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#GetPolicyComplianceResponse">jobs.GetPolicyComplianceResponse</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#JobCompliance">jobs.JobCompliance</a>,
<a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#ListJobComplianceForPolicyResponse">jobs.ListJobComplianceForPolicyResponse</a>
and <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#ListJobComplianceRequest">jobs.ListJobComplianceRequest</a>.</li>
<li>Added <code>Fallback</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CreateExternalLocation">catalog.CreateExternalLocation</a>.</li>
<li>Added <code>Fallback</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ExternalLocationInfo">catalog.ExternalLocationInfo</a>.</li>
<li>Added <code>Fallback</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#UpdateExternalLocation">catalog.UpdateExternalLocation</a>.</li>
<li>Added <code>JobRunId</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseRun">jobs.BaseRun</a>.</li>
<li>Added <code>JobRunId</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run">jobs.Run</a>.</li>
<li>Added <code>IncludeMetrics</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#ListQueryHistoryRequest">sql.ListQueryHistoryRequest</a>.</li>
<li>Added <code>StatementIds</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryFilter">sql.QueryFilter</a>.</li>
<li>Removed <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#ContextFilter">sql.ContextFilter</a>.</li>
<li>Removed <code>ContextFilter</code> field for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryFilter">sql.QueryFilter</a>.</li>
<li>Removed <code>PipelineId</code> and <code>PipelineUpdateId</code>
fields for <a
href="https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QuerySource">sql.QuerySource</a>.</li>
</ul>
<p>OpenAPI SHA: 3eae49b444cac5a0118a3503e5b7ecef7f96527a, Date:
2024-08-21</p>
</blockquote>
</details>
<details>
<summary>Commits</summary>
<ul>
<li><a
href="6d867882d0"><code>6d86788</code></a>
[Release] Release v0.45.0 (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/issues/1023">#1023</a>)</li>
<li><a
href="ba4489b946"><code>ba4489b</code></a>
[Fix] Do not specify <code>--tenant</code> flag when fetching managed
identity access to...</li>
<li><a
href="f6248097d1"><code>f624809</code></a>
[Internal] Fix billing test for budget configuration update (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/issues/1019">#1019</a>)</li>
<li><a
href="27a5055609"><code>27a5055</code></a>
[Internal] Add terraform aliases to Entity (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/issues/1017">#1017</a>)</li>
<li><a
href="382a38d380"><code>382a38d</code></a>
[Internal] Added Service.NamedIdMap (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/issues/1016">#1016</a>)</li>
<li><a
href="1ef9931dc9"><code>1ef9931</code></a>
[Fix] Add INVALID_STATE to error code mapping (<a
href="https://redirect.github.com/databricks/databricks-sdk-go/issues/1014">#1014</a>)</li>
<li>See full diff in <a
href="https://github.com/databricks/databricks-sdk-go/compare/v0.44.0...v0.45.0">compare
view</a></li>
</ul>
</details>
<br />

<details>
<summary>Most Recent Ignore Conditions Applied to This Pull
Request</summary>

| Dependency Name | Ignore Conditions |
| --- | --- |
| github.com/databricks/databricks-sdk-go | [>= 0.28.a, < 0.29] |
</details>


[![Dependabot compatibility
score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.44.0&new-version=0.45.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores)

Dependabot will resolve any conflicts with this PR as long as you don't
alter it yourself. You can also trigger a rebase manually by commenting
`@dependabot rebase`.

[//]: # (dependabot-automerge-start)
[//]: # (dependabot-automerge-end)

---

<details>
<summary>Dependabot commands and options</summary>
<br />

You can trigger Dependabot actions by commenting on this PR:
- `@dependabot rebase` will rebase this PR
- `@dependabot recreate` will recreate this PR, overwriting any edits
that have been made to it
- `@dependabot merge` will merge this PR after your CI passes on it
- `@dependabot squash and merge` will squash and merge this PR after
your CI passes on it
- `@dependabot cancel merge` will cancel a previously requested merge
and block automerging
- `@dependabot reopen` will reopen this PR if it is closed
- `@dependabot close` will close this PR and stop Dependabot recreating
it. You can achieve the same result by closing it manually
- `@dependabot show <dependency name> ignore conditions` will show all
of the ignore conditions of the specified dependency
- `@dependabot ignore this major version` will close this PR and stop
Dependabot creating any more for this major version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this minor version` will close this PR and stop
Dependabot creating any more for this minor version (unless you reopen
the PR or upgrade to it yourself)
- `@dependabot ignore this dependency` will close this PR and stop
Dependabot creating any more for this dependency (unless you reopen the
PR or upgrade to it yourself)


</details>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: Andrew Nester <andrew.nester@databricks.com>
2024-08-27 08:54:05 +00:00
Shreyas Goenka 11dfdc036d
more iteration 2024-08-26 20:16:45 +02:00
Andrew Nester 783e05c939
Do not treat empty path as a local path (#1717)
## Changes
Fixes issue introduced here https://github.com/databricks/cli/pull/1699
where PyPi packages were treated as local library.

The reason is that `libraryPath` returns an empty string as a path for
PyPi packages and then `IsLibraryLocal` treated empty string as local
path.

Both of these functions are fixed in this PR.

## Tests
Added regression test
2024-08-26 10:03:56 +00:00
Lennart Kats (databricks) 84b47745e4
Ignore CLI version check on development builds of the CLI (#1714)
## Changes

This changes makes sure we ignore CLI version check on development
builds of the CLI.

Before:

```
$ cat databricks.yml | grep cli_version
  databricks_cli_version: ">= 0.223.1"
$ cli bundle deploy
Error: Databricks CLI version constraint not satisfied. Required: >= 0.223.1, current: 0.0.0-dev+06b169284737
```

after

```
...
$ cli bundle deploy
...
Warning: Ignoring Databricks CLI version constraint for development build. Required: >= 0.223.1, current: 0.0.0-dev+d52d6f08fcd5
```


## Tests
<!-- How is this tested? -->
2024-08-23 10:13:21 +00:00
35 changed files with 1263 additions and 8229 deletions

View File

@ -1 +1 @@
f98c07f9c71f579de65d2587bb0292f83d10e55d
3eae49b444cac5a0118a3503e5b7ecef7f96527a

3
.gitattributes vendored
View File

@ -75,6 +75,8 @@ cmd/workspace/online-tables/online-tables.go linguist-generated=true
cmd/workspace/permission-migration/permission-migration.go linguist-generated=true
cmd/workspace/permissions/permissions.go linguist-generated=true
cmd/workspace/pipelines/pipelines.go linguist-generated=true
cmd/workspace/policy-compliance-for-clusters/policy-compliance-for-clusters.go linguist-generated=true
cmd/workspace/policy-compliance-for-jobs/policy-compliance-for-jobs.go linguist-generated=true
cmd/workspace/policy-families/policy-families.go linguist-generated=true
cmd/workspace/provider-exchange-filters/provider-exchange-filters.go linguist-generated=true
cmd/workspace/provider-exchanges/provider-exchanges.go linguist-generated=true
@ -94,6 +96,7 @@ cmd/workspace/recipient-activation/recipient-activation.go linguist-generated=tr
cmd/workspace/recipients/recipients.go linguist-generated=true
cmd/workspace/registered-models/registered-models.go linguist-generated=true
cmd/workspace/repos/repos.go linguist-generated=true
cmd/workspace/resource-quotas/resource-quotas.go linguist-generated=true
cmd/workspace/restrict-workspace-admins/restrict-workspace-admins.go linguist-generated=true
cmd/workspace/schemas/schemas.go linguist-generated=true
cmd/workspace/secrets/secrets.go linguist-generated=true

View File

@ -40,6 +40,10 @@ func (v *verifyCliVersion) Apply(ctx context.Context, b *bundle.Bundle) diag.Dia
}
if !c.Check(version) {
if version.Prerelease() == "dev" && version.Major() == 0 {
return diag.Warningf("Ignoring Databricks CLI version constraint for development build. Required: %s, current: %s", constraint, currentVersion)
}
return diag.Errorf("Databricks CLI version constraint not satisfied. Required: %s, current: %s", constraint, currentVersion)
}

View File

@ -107,6 +107,11 @@ func TestVerifyCliVersion(t *testing.T) {
constraint: "^0.100",
expectedError: "invalid version constraint \"^0.100\" specified. Please specify the version constraint in the format (>=) 0.0.0(, <= 1.0.0)",
},
{
currentVersion: "0.0.0-dev+06b169284737",
constraint: ">= 0.100.0",
expectedError: "Ignoring Databricks CLI version constraint for development build. Required: >= 0.100.0",
},
}
t.Cleanup(func() {
@ -130,7 +135,7 @@ func TestVerifyCliVersion(t *testing.T) {
diags := bundle.Apply(context.Background(), b, VerifyCliVersion())
if tc.expectedError != "" {
require.NotEmpty(t, diags)
require.Equal(t, tc.expectedError, diags.Error().Error())
require.Contains(t, diags[0].Summary, tc.expectedError)
} else {
require.Empty(t, diags)
}

View File

@ -1,42 +0,0 @@
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"github.com/databricks/cli/bundle/schema"
)
func main() {
if len(os.Args) != 2 {
fmt.Println("Usage: go run main.go <output-file>")
os.Exit(1)
}
// Output file, to write the generated schema descriptions to.
outputFile := os.Args[1]
// Input file, the databricks openapi spec.
inputFile := os.Getenv("DATABRICKS_OPENAPI_SPEC")
if inputFile == "" {
log.Fatal("DATABRICKS_OPENAPI_SPEC environment variable not set")
}
// Generate the schema descriptions.
docs, err := schema.UpdateBundleDescriptions(inputFile)
if err != nil {
log.Fatal(err)
}
result, err := json.MarshalIndent(docs, "", " ")
if err != nil {
log.Fatal(err)
}
// Write the schema descriptions to the output file.
err = os.WriteFile(outputFile, result, 0644)
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,97 @@
package main
import (
"encoding/json"
"fmt"
"log"
"os"
"reflect"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/jsonschema"
)
func interpolationPattern(s string) string {
return fmt.Sprintf(`\$\{(%s(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\[[0-9]+\])*)*(\[[0-9]+\])*)\}`, s)
}
func addInterpolationPatterns(_ reflect.Type, s jsonschema.Schema) jsonschema.Schema {
switch s.Type {
case jsonschema.ArrayType, jsonschema.ObjectType:
// arrays and objects can have complex variable values specified.
return jsonschema.Schema{
AnyOf: []jsonschema.Schema{s, {
Type: jsonschema.StringType,
// TODO: Are multi-level complex variable references supported?
Pattern: interpolationPattern("var"),
}},
}
case jsonschema.StringType, jsonschema.IntegerType, jsonschema.NumberType, jsonschema.BooleanType:
// primitives can have variable values, or references like ${bundle.xyz}
// or ${workspace.xyz}
// TODO: Followup, do not allow references like ${} in the schema unless
// they are of the permitted patterns?
return jsonschema.Schema{
AnyOf: []jsonschema.Schema{s,
// TODO: Add "resources" here
{Type: jsonschema.StringType, Pattern: interpolationPattern("bundle")},
{Type: jsonschema.StringType, Pattern: interpolationPattern("workspace")},
{Type: jsonschema.StringType, Pattern: interpolationPattern("var")},
},
}
default:
return s
}
}
// TODO: Add a couple of end to end tests that the bundle schema generated is
// correct.
// TODO: Call out in the PR description that recursive types like "for_each_task"
// are now supported. Manually test for_each_task.
// TODO: The bundle_descriptions.json file contains a bunch of custom descriptions
// as well. Make sure to pull those in.
// TODO: Add unit tests for all permutations of structs, maps and slices for the FromType
// method.
// TODO: Note the minor regression of losing the bundle descriptions
func main() {
if len(os.Args) != 2 {
fmt.Println("Usage: go run main.go <output-file>")
os.Exit(1)
}
// Output file, where the generated JSON schema will be written to.
outputFile := os.Args[1]
// Input file, the databricks openapi spec.
inputFile := os.Getenv("DATABRICKS_OPENAPI_SPEC")
if inputFile == "" {
log.Fatal("DATABRICKS_OPENAPI_SPEC environment variable not set")
}
p, err := newParser(inputFile)
if err != nil {
log.Fatal(err)
}
// Generate the JSON schema from the bundle Go struct.
s, err := jsonschema.FromType(reflect.TypeOf(config.Root{}), []func(reflect.Type, jsonschema.Schema) jsonschema.Schema{
p.addDescriptions,
p.addEnums,
addInterpolationPatterns,
})
if err != nil {
log.Fatal(err)
}
b, err := json.MarshalIndent(s, "", " ")
if err != nil {
log.Fatal(err)
}
// Write the schema descriptions to the output file.
err = os.WriteFile(outputFile, b, 0644)
if err != nil {
log.Fatal(err)
}
}

View File

@ -0,0 +1,131 @@
package main
import (
"encoding/json"
"fmt"
"os"
"path"
"reflect"
"strings"
"github.com/databricks/cli/libs/jsonschema"
)
type Components struct {
Schemas map[string]jsonschema.Schema `json:"schemas,omitempty"`
}
type Specification struct {
Components Components `json:"components"`
}
type openapiParser struct {
ref map[string]jsonschema.Schema
}
func newParser(path string) (*openapiParser, error) {
b, err := os.ReadFile(path)
if err != nil {
return nil, err
}
spec := Specification{}
err = json.Unmarshal(b, &spec)
if err != nil {
return nil, err
}
p := &openapiParser{}
p.ref = spec.Components.Schemas
return p, nil
}
// This function finds any JSON schemas that were defined in the OpenAPI spec
// that correspond to the given Go SDK type. It looks both at the type itself
// and any embedded types within it.
func (p *openapiParser) findRef(typ reflect.Type) (jsonschema.Schema, bool) {
typs := []reflect.Type{typ}
// If the type is a struct, the corresponding Go SDK struct might be embedded
// in it. We need to check for those as well.
if typ.Kind() == reflect.Struct {
for i := 0; i < typ.NumField(); i++ {
if !typ.Field(i).Anonymous {
continue
}
// Deference current type if it's a pointer.
ctyp := typ.Field(i).Type
for ctyp.Kind() == reflect.Ptr {
ctyp = ctyp.Elem()
}
typs = append(typs, ctyp)
}
}
for _, ctyp := range typs {
// Skip if it's not a Go SDK type.
if !strings.HasPrefix(ctyp.PkgPath(), "github.com/databricks/databricks-sdk-go") {
continue
}
pkgName := path.Base(ctyp.PkgPath())
k := fmt.Sprintf("%s.%s", pkgName, ctyp.Name())
// Skip if the type is not in the openapi spec.
_, ok := p.ref[k]
if !ok {
continue
}
// Return the first Go SDK type found in the openapi spec.
return p.ref[k], true
}
return jsonschema.Schema{}, false
}
// Use the OpenAPI spec to load descriptions for the given type.
func (p *openapiParser) addDescriptions(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema {
ref, ok := p.findRef(typ)
if !ok {
return s
}
s.Description = ref.Description
// Iterate over properties to load descriptions. This is not needed for any
// OpenAPI spec generated from protobufs, which are guaranteed to be one level
// deep.
// Needed for any hand-written OpenAPI specs.
for k, v := range s.Properties {
if refProp, ok := ref.Properties[k]; ok {
v.Description = refProp.Description
}
}
return s
}
// Use the OpenAPI spec add enum values for the given type.
func (p *openapiParser) addEnums(typ reflect.Type, s jsonschema.Schema) jsonschema.Schema {
ref, ok := p.findRef(typ)
if !ok {
return s
}
s.Enum = append(s.Enum, ref.Enum...)
// Iterate over properties to load enums. This is not needed for any
// OpenAPI spec generated from protobufs, which are guaranteed to be one level
// deep.
// Needed for any hand-written OpenAPI specs.
for k, v := range s.Properties {
if refProp, ok := ref.Properties[k]; ok {
v.Enum = append(v.Enum, refProp.Enum...)
}
}
return s
}

View File

@ -1,19 +1,24 @@
package libraries
import "github.com/databricks/databricks-sdk-go/service/compute"
import (
"fmt"
func libraryPath(library *compute.Library) string {
"github.com/databricks/databricks-sdk-go/service/compute"
)
func libraryPath(library *compute.Library) (string, error) {
if library.Whl != "" {
return library.Whl
return library.Whl, nil
}
if library.Jar != "" {
return library.Jar
return library.Jar, nil
}
if library.Egg != "" {
return library.Egg
return library.Egg, nil
}
if library.Requirements != "" {
return library.Requirements
return library.Requirements, nil
}
return ""
return "", fmt.Errorf("not supported library type")
}

View File

@ -10,9 +10,27 @@ import (
func TestLibraryPath(t *testing.T) {
path := "/some/path"
assert.Equal(t, path, libraryPath(&compute.Library{Whl: path}))
assert.Equal(t, path, libraryPath(&compute.Library{Jar: path}))
assert.Equal(t, path, libraryPath(&compute.Library{Egg: path}))
assert.Equal(t, path, libraryPath(&compute.Library{Requirements: path}))
assert.Equal(t, "", libraryPath(&compute.Library{}))
p, err := libraryPath(&compute.Library{Whl: path})
assert.Equal(t, path, p)
assert.Nil(t, err)
p, err = libraryPath(&compute.Library{Jar: path})
assert.Equal(t, path, p)
assert.Nil(t, err)
p, err = libraryPath(&compute.Library{Egg: path})
assert.Equal(t, path, p)
assert.Nil(t, err)
p, err = libraryPath(&compute.Library{Requirements: path})
assert.Equal(t, path, p)
assert.Nil(t, err)
p, err = libraryPath(&compute.Library{})
assert.Equal(t, "", p)
assert.NotNil(t, err)
p, err = libraryPath(&compute.Library{Pypi: &compute.PythonPyPiLibrary{Package: "pypipackage"}})
assert.Equal(t, "", p)
assert.NotNil(t, err)
}

View File

@ -67,7 +67,12 @@ func FindTasksWithLocalLibraries(b *bundle.Bundle) []jobs.Task {
func isTaskWithLocalLibraries(task jobs.Task) bool {
for _, l := range task.Libraries {
if IsLibraryLocal(libraryPath(&l)) {
p, err := libraryPath(&l)
// If there's an error, skip the library because it's not of supported type
if err != nil {
continue
}
if IsLibraryLocal(p) {
return true
}
}

View File

@ -43,6 +43,10 @@ func IsLocalPath(p string) bool {
// We can't use IsLocalPath beacuse environment dependencies can be
// a pypi package name which can be misinterpreted as a local path by IsLocalPath.
func IsLibraryLocal(dep string) bool {
if dep == "" {
return false
}
possiblePrefixes := []string{
".",
}

View File

@ -48,6 +48,7 @@ func TestIsLibraryLocal(t *testing.T) {
{path: "../../local/*.whl", expected: true},
{path: "..\\..\\local\\*.whl", expected: true},
{path: "file://path/to/package/whl.whl", expected: true},
{path: "", expected: false},
{path: "pypipackage", expected: false},
{path: "/Volumes/catalog/schema/volume/path.whl", expected: false},
{path: "/Workspace/my_project/dist.whl", expected: false},

View File

@ -29,8 +29,8 @@ func IsWorkspacePath(path string) bool {
// IsWorkspaceLibrary returns true if the specified library refers to a workspace path.
func IsWorkspaceLibrary(library *compute.Library) bool {
path := libraryPath(library)
if path == "" {
path, err := libraryPath(library)
if err != nil {
return false
}

View File

@ -223,6 +223,17 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
{Whl: "./dist/test.whl"},
},
},
{
TaskKey: "key7",
PythonWheelTask: &jobs.PythonWheelTask{},
ExistingClusterId: "test-key-2",
Libraries: []compute.Library{
{Whl: "signol_lib-0.4.4-20240822+prod-py3-none-any.whl"},
{Pypi: &compute.PythonPyPiLibrary{
Package: "requests==2.25.1",
}},
},
},
},
},
},
@ -241,6 +252,46 @@ func TestNoIncompatibleWheelTasks(t *testing.T) {
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
}
func TestTasksWithPyPiPackageAreCompatible(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{
Resources: config.Resources{
Jobs: map[string]*resources.Job{
"job1": {
JobSettings: &jobs.JobSettings{
JobClusters: []jobs.JobCluster{
{
JobClusterKey: "cluster1",
NewCluster: compute.ClusterSpec{
SparkVersion: "12.2.x-scala2.12",
},
},
},
Tasks: []jobs.Task{
{
TaskKey: "key1",
PythonWheelTask: &jobs.PythonWheelTask{},
ExistingClusterId: "test-key-2",
Libraries: []compute.Library{
{Pypi: &compute.PythonPyPiLibrary{
Package: "requests==2.25.1",
}},
},
},
},
},
},
},
},
},
}
m := mocks.NewMockWorkspaceClient(t)
b.SetWorkpaceClient(m.WorkspaceClient)
require.False(t, hasIncompatibleWheelTasks(context.Background(), b))
}
func TestNoWarningWhenPythonWheelWrapperIsOn(t *testing.T) {
b := &bundle.Bundle{
Config: config.Root{

View File

@ -1,18 +0,0 @@
### Overview
`docs/bundle_descriptions.json` contains both autogenerated as well as manually written
descriptions for the json schema. Specifically
1. `resources` : almost all descriptions are autogenerated from the OpenAPI spec
2. `targets` : almost all descriptions are copied over from root level entities (eg: `bundle`, `artifacts`)
3. `bundle` : manually editted
4. `include` : manually editted
5. `workspace` : manually editted
6. `artifacts` : manually editted
These descriptions are rendered in the inline documentation in an IDE
### SOP: Add schema descriptions for new fields in bundle config
Manually edit bundle_descriptions.json to add your descriptions. Note that the
descriptions in `resources` block is generated from the OpenAPI spec, and thus
any changes there will be overwritten.

View File

@ -1,109 +0,0 @@
package schema
import (
_ "embed"
"encoding/json"
"fmt"
"os"
"reflect"
"github.com/databricks/cli/bundle/config"
"github.com/databricks/cli/libs/jsonschema"
)
// A subset of Schema struct
type Docs struct {
Description string `json:"description"`
Properties map[string]*Docs `json:"properties,omitempty"`
Items *Docs `json:"items,omitempty"`
AdditionalProperties *Docs `json:"additionalproperties,omitempty"`
}
//go:embed docs/bundle_descriptions.json
var bundleDocs []byte
func (docs *Docs) refreshTargetsDocs() error {
targetsDocs, ok := docs.Properties["targets"]
if !ok || targetsDocs.AdditionalProperties == nil ||
targetsDocs.AdditionalProperties.Properties == nil {
return fmt.Errorf("invalid targets descriptions")
}
targetProperties := targetsDocs.AdditionalProperties.Properties
propertiesToCopy := []string{"artifacts", "bundle", "resources", "workspace"}
for _, p := range propertiesToCopy {
targetProperties[p] = docs.Properties[p]
}
return nil
}
func LoadBundleDescriptions() (*Docs, error) {
embedded := Docs{}
err := json.Unmarshal(bundleDocs, &embedded)
return &embedded, err
}
func UpdateBundleDescriptions(openapiSpecPath string) (*Docs, error) {
embedded, err := LoadBundleDescriptions()
if err != nil {
return nil, err
}
// Generate schema from the embedded descriptions, and convert it back to docs.
// This creates empty descriptions for any properties that were missing in the
// embedded descriptions.
schema, err := New(reflect.TypeOf(config.Root{}), embedded)
if err != nil {
return nil, err
}
docs := schemaToDocs(schema)
// Load the Databricks OpenAPI spec
openapiSpec, err := os.ReadFile(openapiSpecPath)
if err != nil {
return nil, err
}
spec := &Specification{}
err = json.Unmarshal(openapiSpec, spec)
if err != nil {
return nil, err
}
openapiReader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
// Generate descriptions for the "resources" field
resourcesDocs, err := openapiReader.ResourcesDocs()
if err != nil {
return nil, err
}
resourceSchema, err := New(reflect.TypeOf(config.Resources{}), resourcesDocs)
if err != nil {
return nil, err
}
docs.Properties["resources"] = schemaToDocs(resourceSchema)
docs.refreshTargetsDocs()
return docs, nil
}
// *Docs are a subset of *Schema, this function selects that subset
func schemaToDocs(jsonSchema *jsonschema.Schema) *Docs {
// terminate recursion if schema is nil
if jsonSchema == nil {
return nil
}
docs := &Docs{
Description: jsonSchema.Description,
}
if len(jsonSchema.Properties) > 0 {
docs.Properties = make(map[string]*Docs)
}
for k, v := range jsonSchema.Properties {
docs.Properties[k] = schemaToDocs(v)
}
docs.Items = schemaToDocs(jsonSchema.Items)
if additionalProperties, ok := jsonSchema.AdditionalProperties.(*jsonschema.Schema); ok {
docs.AdditionalProperties = schemaToDocs(additionalProperties)
}
return docs
}

File diff suppressed because it is too large Load Diff

View File

@ -1,62 +0,0 @@
package schema
import (
"encoding/json"
"testing"
"github.com/databricks/cli/libs/jsonschema"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestSchemaToDocs(t *testing.T) {
jsonSchema := &jsonschema.Schema{
Type: "object",
Description: "root doc",
Properties: map[string]*jsonschema.Schema{
"foo": {Type: "number", Description: "foo doc"},
"bar": {Type: "string"},
"octave": {
Type: "object",
AdditionalProperties: &jsonschema.Schema{Type: "number"},
Description: "octave docs",
},
"scales": {
Type: "object",
Description: "scale docs",
Items: &jsonschema.Schema{Type: "string"},
},
},
}
docs := schemaToDocs(jsonSchema)
docsJson, err := json.MarshalIndent(docs, " ", " ")
require.NoError(t, err)
expected :=
`{
"description": "root doc",
"properties": {
"bar": {
"description": ""
},
"foo": {
"description": "foo doc"
},
"octave": {
"description": "octave docs",
"additionalproperties": {
"description": ""
}
},
"scales": {
"description": "scale docs",
"items": {
"description": ""
}
}
}
}`
t.Log("[DEBUG] actual: ", string(docsJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(docsJson))
}

View File

@ -1,293 +0,0 @@
package schema
import (
"encoding/json"
"fmt"
"strings"
"github.com/databricks/cli/libs/jsonschema"
)
type OpenapiReader struct {
// OpenAPI spec to read schemas from.
OpenapiSpec *Specification
// In-memory cache of schemas read from the OpenAPI spec.
memo map[string]jsonschema.Schema
}
const SchemaPathPrefix = "#/components/schemas/"
// Read a schema directly from the OpenAPI spec.
func (reader *OpenapiReader) readOpenapiSchema(path string) (jsonschema.Schema, error) {
schemaKey := strings.TrimPrefix(path, SchemaPathPrefix)
// return early if we already have a computed schema
memoSchema, ok := reader.memo[schemaKey]
if ok {
return memoSchema, nil
}
// check path is present in openapi spec
openapiSchema, ok := reader.OpenapiSpec.Components.Schemas[schemaKey]
if !ok {
return jsonschema.Schema{}, fmt.Errorf("schema with path %s not found in openapi spec", path)
}
// convert openapi schema to the native schema struct
bytes, err := json.Marshal(*openapiSchema)
if err != nil {
return jsonschema.Schema{}, err
}
jsonSchema := jsonschema.Schema{}
err = json.Unmarshal(bytes, &jsonSchema)
if err != nil {
return jsonschema.Schema{}, err
}
// A hack to convert a map[string]interface{} to *Schema
// We rely on the type of a AdditionalProperties in downstream functions
// to do reference interpolation
_, ok = jsonSchema.AdditionalProperties.(map[string]interface{})
if ok {
b, err := json.Marshal(jsonSchema.AdditionalProperties)
if err != nil {
return jsonschema.Schema{}, err
}
additionalProperties := &jsonschema.Schema{}
err = json.Unmarshal(b, additionalProperties)
if err != nil {
return jsonschema.Schema{}, err
}
jsonSchema.AdditionalProperties = additionalProperties
}
// store read schema into memo
reader.memo[schemaKey] = jsonSchema
return jsonSchema, nil
}
// Resolve all nested "$ref" references in the schema. This function unrolls a single
// level of "$ref" in the schema and calls into traverseSchema to resolve nested references.
// Thus this function and traverseSchema are mutually recursive.
//
// This function is safe against reference loops. If a reference loop is detected, an error
// is returned.
func (reader *OpenapiReader) safeResolveRefs(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) {
if root.Reference == nil {
return reader.traverseSchema(root, tracker)
}
key := *root.Reference
// HACK to unblock CLI release (13th Feb 2024). This is temporary until proper
// support for recursive types is added to the docs generator. PR: https://github.com/databricks/cli/pull/1204
if strings.Contains(key, "ForEachTask") {
return root, nil
}
if tracker.hasCycle(key) {
// self reference loops can be supported however the logic is non-trivial because
// cross refernce loops are not allowed (see: http://json-schema.org/understanding-json-schema/structuring.html#recursion)
return nil, fmt.Errorf("references loop detected")
}
ref := *root.Reference
description := root.Description
tracker.push(ref, ref)
// Mark reference nil, so we do not traverse this again. This is tracked
// in the memo
root.Reference = nil
// unroll one level of reference.
selfRef, err := reader.readOpenapiSchema(ref)
if err != nil {
return nil, err
}
root = &selfRef
root.Description = description
// traverse again to find new references
root, err = reader.traverseSchema(root, tracker)
if err != nil {
return nil, err
}
tracker.pop(ref)
return root, err
}
// Traverse the nested properties of the schema to resolve "$ref" references. This function
// and safeResolveRefs are mutually recursive.
func (reader *OpenapiReader) traverseSchema(root *jsonschema.Schema, tracker *tracker) (*jsonschema.Schema, error) {
// case primitive (or invalid)
if root.Type != jsonschema.ObjectType && root.Type != jsonschema.ArrayType {
return root, nil
}
// only root references are resolved
if root.Reference != nil {
return reader.safeResolveRefs(root, tracker)
}
// case struct
if len(root.Properties) > 0 {
for k, v := range root.Properties {
childSchema, err := reader.safeResolveRefs(v, tracker)
if err != nil {
return nil, err
}
root.Properties[k] = childSchema
}
}
// case array
if root.Items != nil {
itemsSchema, err := reader.safeResolveRefs(root.Items, tracker)
if err != nil {
return nil, err
}
root.Items = itemsSchema
}
// case map
additionalProperties, ok := root.AdditionalProperties.(*jsonschema.Schema)
if ok && additionalProperties != nil {
valueSchema, err := reader.safeResolveRefs(additionalProperties, tracker)
if err != nil {
return nil, err
}
root.AdditionalProperties = valueSchema
}
return root, nil
}
func (reader *OpenapiReader) readResolvedSchema(path string) (*jsonschema.Schema, error) {
root, err := reader.readOpenapiSchema(path)
if err != nil {
return nil, err
}
tracker := newTracker()
tracker.push(path, path)
resolvedRoot, err := reader.safeResolveRefs(&root, tracker)
if err != nil {
return nil, tracker.errWithTrace(err.Error(), "")
}
return resolvedRoot, nil
}
func (reader *OpenapiReader) jobsDocs() (*Docs, error) {
jobSettingsSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "jobs.JobSettings")
if err != nil {
return nil, err
}
jobDocs := schemaToDocs(jobSettingsSchema)
// TODO: add description for id if needed.
// Tracked in https://github.com/databricks/cli/issues/242
jobsDocs := &Docs{
Description: "List of Databricks jobs",
AdditionalProperties: jobDocs,
}
return jobsDocs, nil
}
func (reader *OpenapiReader) pipelinesDocs() (*Docs, error) {
pipelineSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "pipelines.PipelineSpec")
if err != nil {
return nil, err
}
pipelineDocs := schemaToDocs(pipelineSpecSchema)
// TODO: Two fields in resources.Pipeline have the json tag id. Clarify the
// semantics and then add a description if needed. (https://github.com/databricks/cli/issues/242)
pipelinesDocs := &Docs{
Description: "List of DLT pipelines",
AdditionalProperties: pipelineDocs,
}
return pipelinesDocs, nil
}
func (reader *OpenapiReader) experimentsDocs() (*Docs, error) {
experimentSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "ml.Experiment")
if err != nil {
return nil, err
}
experimentDocs := schemaToDocs(experimentSpecSchema)
experimentsDocs := &Docs{
Description: "List of MLflow experiments",
AdditionalProperties: experimentDocs,
}
return experimentsDocs, nil
}
func (reader *OpenapiReader) modelsDocs() (*Docs, error) {
modelSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "ml.Model")
if err != nil {
return nil, err
}
modelDocs := schemaToDocs(modelSpecSchema)
modelsDocs := &Docs{
Description: "List of MLflow models",
AdditionalProperties: modelDocs,
}
return modelsDocs, nil
}
func (reader *OpenapiReader) modelServingEndpointsDocs() (*Docs, error) {
modelServingEndpointsSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "serving.CreateServingEndpoint")
if err != nil {
return nil, err
}
modelServingEndpointsDocs := schemaToDocs(modelServingEndpointsSpecSchema)
modelServingEndpointsAllDocs := &Docs{
Description: "List of Model Serving Endpoints",
AdditionalProperties: modelServingEndpointsDocs,
}
return modelServingEndpointsAllDocs, nil
}
func (reader *OpenapiReader) registeredModelDocs() (*Docs, error) {
registeredModelsSpecSchema, err := reader.readResolvedSchema(SchemaPathPrefix + "catalog.CreateRegisteredModelRequest")
if err != nil {
return nil, err
}
registeredModelsDocs := schemaToDocs(registeredModelsSpecSchema)
registeredModelsAllDocs := &Docs{
Description: "List of Registered Models",
AdditionalProperties: registeredModelsDocs,
}
return registeredModelsAllDocs, nil
}
func (reader *OpenapiReader) ResourcesDocs() (*Docs, error) {
jobsDocs, err := reader.jobsDocs()
if err != nil {
return nil, err
}
pipelinesDocs, err := reader.pipelinesDocs()
if err != nil {
return nil, err
}
experimentsDocs, err := reader.experimentsDocs()
if err != nil {
return nil, err
}
modelsDocs, err := reader.modelsDocs()
if err != nil {
return nil, err
}
modelServingEndpointsDocs, err := reader.modelServingEndpointsDocs()
if err != nil {
return nil, err
}
registeredModelsDocs, err := reader.registeredModelDocs()
if err != nil {
return nil, err
}
return &Docs{
Description: "Collection of Databricks resources to deploy.",
Properties: map[string]*Docs{
"jobs": jobsDocs,
"pipelines": pipelinesDocs,
"experiments": experimentsDocs,
"models": modelsDocs,
"model_serving_endpoints": modelServingEndpointsDocs,
"registered_models": registeredModelsDocs,
},
}, nil
}

View File

@ -1,493 +0,0 @@
package schema
import (
"encoding/json"
"testing"
"github.com/databricks/cli/libs/jsonschema"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
func TestReadSchemaForObject(t *testing.T) {
specString := `
{
"components": {
"schemas": {
"foo": {
"type": "number"
},
"fruits": {
"type": "object",
"description": "fruits that are cool",
"properties": {
"guava": {
"type": "string",
"description": "a guava for my schema"
},
"mango": {
"type": "object",
"description": "a mango for my schema",
"$ref": "#/components/schemas/mango"
}
}
},
"mango": {
"type": "object",
"properties": {
"foo": {
"$ref": "#/components/schemas/foo"
}
}
}
}
}
}
`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"description": "fruits that are cool",
"properties": {
"guava": {
"type": "string",
"description": "a guava for my schema"
},
"mango": {
"type": "object",
"description": "a mango for my schema",
"properties": {
"foo": {
"type": "number"
}
}
}
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}
func TestReadSchemaForArray(t *testing.T) {
specString := `
{
"components": {
"schemas": {
"fruits": {
"type": "object",
"description": "fruits that are cool",
"items": {
"description": "some papayas, because papayas are fruits too",
"$ref": "#/components/schemas/papaya"
}
},
"papaya": {
"type": "number"
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"description": "fruits that are cool",
"items": {
"type": "number",
"description": "some papayas, because papayas are fruits too"
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}
func TestReadSchemaForMap(t *testing.T) {
specString := `{
"components": {
"schemas": {
"fruits": {
"type": "object",
"description": "fruits that are meh",
"additionalProperties": {
"description": "watermelons. watermelons.",
"$ref": "#/components/schemas/watermelon"
}
},
"watermelon": {
"type": "number"
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"description": "fruits that are meh",
"additionalProperties": {
"type": "number",
"description": "watermelons. watermelons."
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}
func TestRootReferenceIsResolved(t *testing.T) {
specString := `{
"components": {
"schemas": {
"foo": {
"type": "object",
"description": "this description is ignored",
"properties": {
"abc": {
"type": "string"
}
}
},
"fruits": {
"type": "object",
"description": "foo fighters fighting fruits",
"$ref": "#/components/schemas/foo"
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
schema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(schema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"description": "foo fighters fighting fruits",
"properties": {
"abc": {
"type": "string"
}
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}
func TestSelfReferenceLoopErrors(t *testing.T) {
specString := `{
"components": {
"schemas": {
"foo": {
"type": "object",
"description": "this description is ignored",
"properties": {
"bar": {
"type": "object",
"$ref": "#/components/schemas/foo"
}
}
},
"fruits": {
"type": "object",
"description": "foo fighters fighting fruits",
"$ref": "#/components/schemas/foo"
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
_, err = reader.readResolvedSchema("#/components/schemas/fruits")
assert.ErrorContains(t, err, "references loop detected. traversal trace: -> #/components/schemas/fruits -> #/components/schemas/foo")
}
func TestCrossReferenceLoopErrors(t *testing.T) {
specString := `{
"components": {
"schemas": {
"foo": {
"type": "object",
"description": "this description is ignored",
"properties": {
"bar": {
"type": "object",
"$ref": "#/components/schemas/fruits"
}
}
},
"fruits": {
"type": "object",
"description": "foo fighters fighting fruits",
"$ref": "#/components/schemas/foo"
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
_, err = reader.readResolvedSchema("#/components/schemas/fruits")
assert.ErrorContains(t, err, "references loop detected. traversal trace: -> #/components/schemas/fruits -> #/components/schemas/foo")
}
func TestReferenceResolutionForMapInObject(t *testing.T) {
specString := `
{
"components": {
"schemas": {
"foo": {
"type": "number"
},
"fruits": {
"type": "object",
"description": "fruits that are cool",
"properties": {
"guava": {
"type": "string",
"description": "a guava for my schema"
},
"mangos": {
"type": "object",
"description": "multiple mangos",
"$ref": "#/components/schemas/mango"
}
}
},
"mango": {
"type": "object",
"additionalProperties": {
"description": "a single mango",
"$ref": "#/components/schemas/foo"
}
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"description": "fruits that are cool",
"properties": {
"guava": {
"type": "string",
"description": "a guava for my schema"
},
"mangos": {
"type": "object",
"description": "multiple mangos",
"additionalProperties": {
"type": "number",
"description": "a single mango"
}
}
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}
func TestReferenceResolutionForArrayInObject(t *testing.T) {
specString := `{
"components": {
"schemas": {
"foo": {
"type": "number"
},
"fruits": {
"type": "object",
"description": "fruits that are cool",
"properties": {
"guava": {
"type": "string",
"description": "a guava for my schema"
},
"mangos": {
"type": "object",
"description": "multiple mangos",
"$ref": "#/components/schemas/mango"
}
}
},
"mango": {
"type": "object",
"items": {
"description": "a single mango",
"$ref": "#/components/schemas/foo"
}
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"description": "fruits that are cool",
"properties": {
"guava": {
"type": "string",
"description": "a guava for my schema"
},
"mangos": {
"type": "object",
"description": "multiple mangos",
"items": {
"type": "number",
"description": "a single mango"
}
}
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}
func TestReferenceResolutionDoesNotOverwriteDescriptions(t *testing.T) {
specString := `{
"components": {
"schemas": {
"foo": {
"type": "number"
},
"fruits": {
"type": "object",
"properties": {
"guava": {
"type": "object",
"description": "Guava is a fruit",
"$ref": "#/components/schemas/foo"
},
"mango": {
"type": "object",
"description": "What is a mango?",
"$ref": "#/components/schemas/foo"
}
}
}
}
}
}`
spec := &Specification{}
reader := &OpenapiReader{
OpenapiSpec: spec,
memo: make(map[string]jsonschema.Schema),
}
err := json.Unmarshal([]byte(specString), spec)
require.NoError(t, err)
fruitsSchema, err := reader.readResolvedSchema("#/components/schemas/fruits")
require.NoError(t, err)
fruitsSchemaJson, err := json.MarshalIndent(fruitsSchema, " ", " ")
require.NoError(t, err)
expected := `{
"type": "object",
"properties": {
"guava": {
"type": "number",
"description": "Guava is a fruit"
},
"mango": {
"type": "number",
"description": "What is a mango?"
}
}
}`
t.Log("[DEBUG] actual: ", string(fruitsSchemaJson))
t.Log("[DEBUG] expected: ", expected)
assert.Equal(t, expected, string(fruitsSchemaJson))
}

View File

@ -1,306 +0,0 @@
package schema
import (
"reflect"
"github.com/databricks/cli/libs/dyn/dynvar"
"github.com/databricks/cli/libs/jsonschema"
)
// // Fields tagged "readonly" should not be emitted in the schema as they are
// // computed at runtime, and should not be assigned a value by the bundle author.
// const readonlyTag = "readonly"
// // Annotation for internal bundle fields that should not be exposed to customers.
// // Fields can be tagged as "internal" to remove them from the generated schema.
// const internalTag = "internal"
// // Annotation for bundle fields that have been deprecated.
// // Fields tagged as "deprecated" are removed/omitted from the generated schema.
// const deprecatedTag = "deprecated"
// This function translates golang types into json schema. Here is the mapping
// between json schema types and golang types
//
// - GolangType -> Javascript type / Json Schema2
//
// - bool -> boolean
//
// - string -> string
//
// - int (all variants) -> number
//
// - float (all variants) -> number
//
// - map[string]MyStruct -> { type: object, additionalProperties: {}}
// for details visit: https://json-schema.org/understanding-json-schema/reference/object.html#additional-properties
//
// - []MyStruct -> {type: array, items: {}}
// for details visit: https://json-schema.org/understanding-json-schema/reference/array.html#items
//
// - []MyStruct -> {type: object, properties: {}, additionalProperties: false}
// for details visit: https://json-schema.org/understanding-json-schema/reference/object.html#properties
func New(golangType reflect.Type, docs *Docs) (*jsonschema.Schema, error) {
s, err := jsonschema.FromType(golangType, func(s jsonschema.Schema) jsonschema.Schema {
if s.Type == jsonschema.NumberType || s.Type == jsonschema.BooleanType {
s = jsonschema.Schema{
AnyOf: []jsonschema.Schema{
s,
{
Type: jsonschema.StringType,
// TODO: Narrow down the scope of the regex match.
// Also likely need to rename this variable.
Pattern: dynvar.VariableRegex,
},
},
}
}
return s
})
if err != nil {
return nil, err
}
return &s, nil
// tracker := newTracker()
// schema, err := safeToSchema(golangType, docs, "", tracker)
// if err != nil {
// return nil, tracker.errWithTrace(err.Error(), "root")
// }
// return schema, nil
}
// func jsonSchemaType(golangType reflect.Type) (jsonschema.Type, error) {
// switch golangType.Kind() {
// case reflect.Bool:
// return jsonschema.BooleanType, nil
// case reflect.String:
// return jsonschema.StringType, nil
// case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64,
// reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
// reflect.Float32, reflect.Float64:
// return jsonschema.NumberType, nil
// case reflect.Struct:
// return jsonschema.ObjectType, nil
// case reflect.Map:
// if golangType.Key().Kind() != reflect.String {
// return jsonschema.InvalidType, fmt.Errorf("only strings map keys are valid. key type: %v", golangType.Key().Kind())
// }
// return jsonschema.ObjectType, nil
// case reflect.Array, reflect.Slice:
// return jsonschema.ArrayType, nil
// default:
// return jsonschema.InvalidType, fmt.Errorf("unhandled golang type: %s", golangType)
// }
// }
// A wrapper over toSchema function to:
// 1. Detect cycles in the bundle config struct.
// 2. Update tracker
//
// params:
//
// - golangType: Golang type to generate json schema for
//
// - docs: Contains documentation to be injected into the generated json schema
//
// - traceId: An identifier for the current type, to trace recursive traversal.
// Its value is the first json tag in case of struct fields and "" in other cases
// like array, map or no json tags
//
// - tracker: Keeps track of types / traceIds seen during recursive traversal
// func safeToSchema(golangType reflect.Type, docs *Docs, traceId string, tracker *tracker) (*jsonschema.Schema, error) {
// // HACK to unblock CLI release (13th Feb 2024). This is temporary until proper
// // support for recursive types is added to the schema generator. PR: https://github.com/databricks/cli/pull/1204
// if traceId == "for_each_task" {
// return &jsonschema.Schema{
// Type: jsonschema.ObjectType,
// }, nil
// }
// // WE ERROR OUT IF THERE ARE CYCLES IN THE JSON SCHEMA
// // There are mechanisms to deal with cycles though recursive identifiers in json
// // schema. However if we use them, we would need to make sure we are able to detect
// // cycles where two properties (directly or indirectly) pointing to each other
// //
// // see: https://json-schema.org/understanding-json-schema/structuring.html#recursion
// // for details
// if tracker.hasCycle(golangType) {
// return nil, fmt.Errorf("cycle detected")
// }
// tracker.push(golangType, traceId)
// props, err := toSchema(golangType, docs, tracker)
// if err != nil {
// return nil, err
// }
// tracker.pop(golangType)
// return props, nil
// }
// This function returns all member fields of the provided type.
// If the type has embedded (aka anonymous) fields, this function traverses
// those in a breadth first manner
// func getStructFields(golangType reflect.Type) []reflect.StructField {
// fields := []reflect.StructField{}
// bfsQueue := list.New()
// for i := 0; i < golangType.NumField(); i++ {
// bfsQueue.PushBack(golangType.Field(i))
// }
// for bfsQueue.Len() > 0 {
// front := bfsQueue.Front()
// field := front.Value.(reflect.StructField)
// bfsQueue.Remove(front)
// if !field.Anonymous {
// fields = append(fields, field)
// continue
// }
// fieldType := field.Type
// if fieldType.Kind() == reflect.Pointer {
// fieldType = fieldType.Elem()
// }
// for i := 0; i < fieldType.NumField(); i++ {
// bfsQueue.PushBack(fieldType.Field(i))
// }
// }
// return fields
// }
// func toSchema(golangType reflect.Type, docs *Docs, tracker *tracker) (*jsonschema.Schema, error) {
// // *Struct and Struct generate identical json schemas
// if golangType.Kind() == reflect.Pointer {
// return safeToSchema(golangType.Elem(), docs, "", tracker)
// }
// if golangType.Kind() == reflect.Interface {
// return &jsonschema.Schema{}, nil
// }
// rootJavascriptType, err := jsonSchemaType(golangType)
// if err != nil {
// return nil, err
// }
// jsonSchema := &jsonschema.Schema{Type: rootJavascriptType}
// // If the type is a non-string primitive, then we allow it to be a string
// // provided it's a pure variable reference (ie only a single variable reference).
// if rootJavascriptType == jsonschema.BooleanType || rootJavascriptType == jsonschema.NumberType {
// jsonSchema = &jsonschema.Schema{
// AnyOf: []*jsonschema.Schema{
// {
// Type: rootJavascriptType,
// },
// {
// Type: jsonschema.StringType,
// Pattern: dynvar.VariableRegex,
// },
// },
// }
// }
// if docs != nil {
// jsonSchema.Description = docs.Description
// }
// // case array/slice
// if golangType.Kind() == reflect.Array || golangType.Kind() == reflect.Slice {
// elemGolangType := golangType.Elem()
// elemJavascriptType, err := jsonSchemaType(elemGolangType)
// if err != nil {
// return nil, err
// }
// var childDocs *Docs
// if docs != nil {
// childDocs = docs.Items
// }
// elemProps, err := safeToSchema(elemGolangType, childDocs, "", tracker)
// if err != nil {
// return nil, err
// }
// jsonSchema.Items = &jsonschema.Schema{
// Type: elemJavascriptType,
// Properties: elemProps.Properties,
// AdditionalProperties: elemProps.AdditionalProperties,
// Items: elemProps.Items,
// Required: elemProps.Required,
// }
// }
// // case map
// if golangType.Kind() == reflect.Map {
// if golangType.Key().Kind() != reflect.String {
// return nil, fmt.Errorf("only string keyed maps allowed")
// }
// var childDocs *Docs
// if docs != nil {
// childDocs = docs.AdditionalProperties
// }
// jsonSchema.AdditionalProperties, err = safeToSchema(golangType.Elem(), childDocs, "", tracker)
// if err != nil {
// return nil, err
// }
// }
// // case struct
// if golangType.Kind() == reflect.Struct {
// children := getStructFields(golangType)
// properties := map[string]*jsonschema.Schema{}
// required := []string{}
// for _, child := range children {
// bundleTag := child.Tag.Get("bundle")
// // Fields marked as "readonly", "internal" or "deprecated" are skipped
// // while generating the schema
// if bundleTag == readonlyTag || bundleTag == internalTag || bundleTag == deprecatedTag {
// continue
// }
// // get child json tags
// childJsonTag := strings.Split(child.Tag.Get("json"), ",")
// childName := childJsonTag[0]
// // skip children that have no json tags, the first json tag is ""
// // or the first json tag is "-"
// if childName == "" || childName == "-" {
// continue
// }
// // get docs for the child if they exist
// var childDocs *Docs
// if docs != nil {
// if val, ok := docs.Properties[childName]; ok {
// childDocs = val
// }
// }
// // compute if the child is a required field. Determined by the
// // presence of "omitempty" in the json tags
// hasOmitEmptyTag := false
// for i := 1; i < len(childJsonTag); i++ {
// if childJsonTag[i] == "omitempty" {
// hasOmitEmptyTag = true
// }
// }
// if !hasOmitEmptyTag {
// required = append(required, childName)
// }
// // compute Schema.Properties for the child recursively
// fieldProps, err := safeToSchema(child.Type, childDocs, childName, tracker)
// if err != nil {
// return nil, err
// }
// properties[childName] = fieldProps
// }
// jsonSchema.AdditionalProperties = false
// jsonSchema.Properties = properties
// jsonSchema.Required = required
// }
// return jsonSchema, nil
// }

View File

@ -1,341 +0,0 @@
package schema
import (
"encoding/json"
"reflect"
"testing"
"github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require"
)
// TODO: Add a test that checks the primitive overrides for reference regexs work.
// Basically that the custom override for bundle regex works.
// TODO: Add a bundle of end to end tests, that both fail and pass the schema validation.
func TestDocIngestionForObject(t *testing.T) {
docs := &Docs{
Description: "docs for root",
Properties: map[string]*Docs{
"my_struct": {
Description: "docs for my struct",
Properties: map[string]*Docs{
"a": {
Description: "docs for a",
},
"c": {
Description: "docs for c which does not exist on my_struct",
},
},
},
},
}
type MyStruct struct {
A string `json:"a"`
B int `json:"b"`
}
type Root struct {
MyStruct *MyStruct `json:"my_struct"`
}
elem := Root{}
schema, err := New(reflect.TypeOf(elem), docs)
require.NoError(t, err)
jsonSchema, err := json.MarshalIndent(schema, " ", " ")
assert.NoError(t, err)
expectedSchema :=
`{
"type": "object",
"description": "docs for root",
"properties": {
"my_struct": {
"type": "object",
"description": "docs for my struct",
"properties": {
"a": {
"type": "string",
"description": "docs for a"
},
"b": {
"anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}"
}
]
}
},
"additionalProperties": false,
"required": [
"a",
"b"
]
}
},
"additionalProperties": false,
"required": [
"my_struct"
]
}`
t.Log("[DEBUG] actual: ", string(jsonSchema))
t.Log("[DEBUG] expected: ", expectedSchema)
assert.Equal(t, expectedSchema, string(jsonSchema))
}
func TestDocIngestionForSlice(t *testing.T) {
docs := &Docs{
Description: "docs for root",
Properties: map[string]*Docs{
"my_slice": {
Description: "docs for my slice",
Items: &Docs{
Properties: map[string]*Docs{
"guava": {
Description: "docs for guava",
},
"pineapple": {
Description: "docs for pineapple",
},
"watermelon": {
Description: "docs for watermelon which does not exist in schema",
},
},
},
},
},
}
type Bar struct {
Guava int `json:"guava"`
Pineapple int `json:"pineapple"`
}
type Root struct {
MySlice []Bar `json:"my_slice"`
}
elem := Root{}
schema, err := New(reflect.TypeOf(elem), docs)
require.NoError(t, err)
jsonSchema, err := json.MarshalIndent(schema, " ", " ")
assert.NoError(t, err)
expectedSchema :=
`{
"type": "object",
"description": "docs for root",
"properties": {
"my_slice": {
"type": "array",
"description": "docs for my slice",
"items": {
"type": "object",
"properties": {
"guava": {
"description": "docs for guava",
"anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}"
}
]
},
"pineapple": {
"description": "docs for pineapple",
"anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}"
}
]
}
},
"additionalProperties": false,
"required": [
"guava",
"pineapple"
]
}
}
},
"additionalProperties": false,
"required": [
"my_slice"
]
}`
t.Log("[DEBUG] actual: ", string(jsonSchema))
t.Log("[DEBUG] expected: ", expectedSchema)
assert.Equal(t, expectedSchema, string(jsonSchema))
}
func TestDocIngestionForMap(t *testing.T) {
docs := &Docs{
Description: "docs for root",
Properties: map[string]*Docs{
"my_map": {
Description: "docs for my map",
AdditionalProperties: &Docs{
Properties: map[string]*Docs{
"apple": {
Description: "docs for apple",
},
"mango": {
Description: "docs for mango",
},
"watermelon": {
Description: "docs for watermelon which does not exist in schema",
},
"papaya": {
Description: "docs for papaya which does not exist in schema",
},
},
},
},
},
}
type Foo struct {
Apple int `json:"apple"`
Mango int `json:"mango"`
}
type Root struct {
MyMap map[string]*Foo `json:"my_map"`
}
elem := Root{}
schema, err := New(reflect.TypeOf(elem), docs)
require.NoError(t, err)
jsonSchema, err := json.MarshalIndent(schema, " ", " ")
assert.NoError(t, err)
expectedSchema :=
`{
"type": "object",
"description": "docs for root",
"properties": {
"my_map": {
"type": "object",
"description": "docs for my map",
"additionalProperties": {
"type": "object",
"properties": {
"apple": {
"description": "docs for apple",
"anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}"
}
]
},
"mango": {
"description": "docs for mango",
"anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}"
}
]
}
},
"additionalProperties": false,
"required": [
"apple",
"mango"
]
}
}
},
"additionalProperties": false,
"required": [
"my_map"
]
}`
t.Log("[DEBUG] actual: ", string(jsonSchema))
t.Log("[DEBUG] expected: ", expectedSchema)
assert.Equal(t, expectedSchema, string(jsonSchema))
}
func TestDocIngestionForTopLevelPrimitive(t *testing.T) {
docs := &Docs{
Description: "docs for root",
Properties: map[string]*Docs{
"my_val": {
Description: "docs for my val",
},
},
}
type Root struct {
MyVal int `json:"my_val"`
}
elem := Root{}
schema, err := New(reflect.TypeOf(elem), docs)
require.NoError(t, err)
jsonSchema, err := json.MarshalIndent(schema, " ", " ")
assert.NoError(t, err)
expectedSchema :=
`{
"type": "object",
"description": "docs for root",
"properties": {
"my_val": {
"description": "docs for my val",
"anyOf": [
{
"type": "number"
},
{
"type": "string",
"pattern": "\\$\\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\\[[0-9]+\\])*)*(\\[[0-9]+\\])*)\\}"
}
]
}
},
"additionalProperties": false,
"required": [
"my_val"
]
}`
t.Log("[DEBUG] actual: ", string(jsonSchema))
t.Log("[DEBUG] expected: ", expectedSchema)
assert.Equal(t, expectedSchema, string(jsonSchema))
}

View File

@ -1,11 +0,0 @@
package schema
import "github.com/databricks/cli/libs/jsonschema"
type Specification struct {
Components *Components `json:"components"`
}
type Components struct {
Schemas map[string]*jsonschema.Schema `json:"schemas,omitempty"`
}

View File

@ -1,53 +0,0 @@
package schema
import (
"container/list"
"fmt"
)
type tracker struct {
// Nodes encountered in current path during the recursive traversal. Used to
// check for cycles
seenNodes map[interface{}]struct{}
// List of node names encountered in order in current path during the recursive traversal.
// Used to hydrate errors with path to the exact node where error occured.
//
// NOTE: node and node names can be the same
listOfNodes *list.List
}
func newTracker() *tracker {
return &tracker{
seenNodes: map[interface{}]struct{}{},
listOfNodes: list.New(),
}
}
func (t *tracker) errWithTrace(prefix string, initTrace string) error {
traceString := initTrace
curr := t.listOfNodes.Front()
for curr != nil {
if curr.Value.(string) != "" {
traceString += " -> " + curr.Value.(string)
}
curr = curr.Next()
}
return fmt.Errorf(prefix + ". traversal trace: " + traceString)
}
func (t *tracker) hasCycle(node interface{}) bool {
_, ok := t.seenNodes[node]
return ok
}
func (t *tracker) push(node interface{}, name string) {
t.seenNodes[node] = struct{}{}
t.listOfNodes.PushBack(name)
}
func (t *tracker) pop(nodeType interface{}) {
back := t.listOfNodes.Back()
t.listOfNodes.Remove(back)
delete(t.seenNodes, nodeType)
}

6
cmd/workspace/cmd.go generated
View File

@ -44,6 +44,8 @@ import (
permission_migration "github.com/databricks/cli/cmd/workspace/permission-migration"
permissions "github.com/databricks/cli/cmd/workspace/permissions"
pipelines "github.com/databricks/cli/cmd/workspace/pipelines"
policy_compliance_for_clusters "github.com/databricks/cli/cmd/workspace/policy-compliance-for-clusters"
policy_compliance_for_jobs "github.com/databricks/cli/cmd/workspace/policy-compliance-for-jobs"
policy_families "github.com/databricks/cli/cmd/workspace/policy-families"
provider_exchange_filters "github.com/databricks/cli/cmd/workspace/provider-exchange-filters"
provider_exchanges "github.com/databricks/cli/cmd/workspace/provider-exchanges"
@ -63,6 +65,7 @@ import (
recipients "github.com/databricks/cli/cmd/workspace/recipients"
registered_models "github.com/databricks/cli/cmd/workspace/registered-models"
repos "github.com/databricks/cli/cmd/workspace/repos"
resource_quotas "github.com/databricks/cli/cmd/workspace/resource-quotas"
schemas "github.com/databricks/cli/cmd/workspace/schemas"
secrets "github.com/databricks/cli/cmd/workspace/secrets"
service_principals "github.com/databricks/cli/cmd/workspace/service-principals"
@ -130,6 +133,8 @@ func All() []*cobra.Command {
out = append(out, permission_migration.New())
out = append(out, permissions.New())
out = append(out, pipelines.New())
out = append(out, policy_compliance_for_clusters.New())
out = append(out, policy_compliance_for_jobs.New())
out = append(out, policy_families.New())
out = append(out, provider_exchange_filters.New())
out = append(out, provider_exchanges.New())
@ -149,6 +154,7 @@ func All() []*cobra.Command {
out = append(out, recipients.New())
out = append(out, registered_models.New())
out = append(out, repos.New())
out = append(out, resource_quotas.New())
out = append(out, schemas.New())
out = append(out, secrets.New())
out = append(out, service_principals.New())

View File

@ -75,6 +75,7 @@ func newCreate() *cobra.Command {
cmd.Flags().StringVar(&createReq.AccessPoint, "access-point", createReq.AccessPoint, `The AWS access point to use when accesing s3 for this external location.`)
cmd.Flags().StringVar(&createReq.Comment, "comment", createReq.Comment, `User-provided free-form text description.`)
// TODO: complex arg: encryption_details
cmd.Flags().BoolVar(&createReq.Fallback, "fallback", createReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
cmd.Flags().BoolVar(&createReq.ReadOnly, "read-only", createReq.ReadOnly, `Indicates whether the external location is read-only.`)
cmd.Flags().BoolVar(&createReq.SkipValidation, "skip-validation", createReq.SkipValidation, `Skips validation of the storage credential associated with the external location.`)
@ -347,6 +348,7 @@ func newUpdate() *cobra.Command {
cmd.Flags().StringVar(&updateReq.Comment, "comment", updateReq.Comment, `User-provided free-form text description.`)
cmd.Flags().StringVar(&updateReq.CredentialName, "credential-name", updateReq.CredentialName, `Name of the storage credential used with this location.`)
// TODO: complex arg: encryption_details
cmd.Flags().BoolVar(&updateReq.Fallback, "fallback", updateReq.Fallback, `Indicates whether fallback mode is enabled for this external location.`)
cmd.Flags().BoolVar(&updateReq.Force, "force", updateReq.Force, `Force update even if changing url invalidates dependent external tables or mounts.`)
cmd.Flags().Var(&updateReq.IsolationMode, "isolation-mode", `Whether the current securable is accessible from all workspaces or a specific set of workspaces. Supported values: [ISOLATION_MODE_ISOLATED, ISOLATION_MODE_OPEN]`)
cmd.Flags().StringVar(&updateReq.NewName, "new-name", updateReq.NewName, `New name for the external location.`)

View File

@ -0,0 +1,260 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package policy_compliance_for_clusters
import (
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/compute"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "policy-compliance-for-clusters",
Short: `The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace.`,
Long: `The policy compliance APIs allow you to view and manage the policy compliance
status of clusters in your workspace.
A cluster is compliant with its policy if its configuration satisfies all its
policy rules. Clusters could be out of compliance if their policy was updated
after the cluster was last edited.
The get and list compliance APIs allow you to view the policy compliance
status of a cluster. The enforce compliance API allows you to update a cluster
to be compliant with the current version of its policy.`,
GroupID: "compute",
Annotations: map[string]string{
"package": "compute",
},
}
// Add methods
cmd.AddCommand(newEnforceCompliance())
cmd.AddCommand(newGetCompliance())
cmd.AddCommand(newListCompliance())
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start enforce-compliance command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var enforceComplianceOverrides []func(
*cobra.Command,
*compute.EnforceClusterComplianceRequest,
)
func newEnforceCompliance() *cobra.Command {
cmd := &cobra.Command{}
var enforceComplianceReq compute.EnforceClusterComplianceRequest
var enforceComplianceJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews the changes that would be made to a cluster to enforce compliance but does not update the cluster.`)
cmd.Use = "enforce-compliance CLUSTER_ID"
cmd.Short = `Enforce cluster policy compliance.`
cmd.Long = `Enforce cluster policy compliance.
Updates a cluster to be compliant with the current version of its policy. A
cluster can be updated if it is in a RUNNING or TERMINATED state.
If a cluster is updated while in a RUNNING state, it will be restarted so
that the new attributes can take effect.
If a cluster is updated while in a TERMINATED state, it will remain
TERMINATED. The next time the cluster is started, the new attributes will
take effect.
Clusters created by the Databricks Jobs, DLT, or Models services cannot be
enforced by this API. Instead, use the "Enforce job policy compliance" API to
enforce policy compliance on jobs.
Arguments:
CLUSTER_ID: The ID of the cluster you want to enforce policy compliance on.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := root.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'cluster_id' in your JSON input")
}
return nil
}
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
if err != nil {
return err
}
}
if !cmd.Flags().Changed("json") {
enforceComplianceReq.ClusterId = args[0]
}
response, err := w.PolicyComplianceForClusters.EnforceCompliance(ctx, enforceComplianceReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range enforceComplianceOverrides {
fn(cmd, &enforceComplianceReq)
}
return cmd
}
// start get-compliance command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getComplianceOverrides []func(
*cobra.Command,
*compute.GetClusterComplianceRequest,
)
func newGetCompliance() *cobra.Command {
cmd := &cobra.Command{}
var getComplianceReq compute.GetClusterComplianceRequest
// TODO: short flags
cmd.Use = "get-compliance CLUSTER_ID"
cmd.Short = `Get cluster policy compliance.`
cmd.Long = `Get cluster policy compliance.
Returns the policy compliance status of a cluster. Clusters could be out of
compliance if their policy was updated after the cluster was last edited.
Arguments:
CLUSTER_ID: The ID of the cluster to get the compliance status`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
getComplianceReq.ClusterId = args[0]
response, err := w.PolicyComplianceForClusters.GetCompliance(ctx, getComplianceReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getComplianceOverrides {
fn(cmd, &getComplianceReq)
}
return cmd
}
// start list-compliance command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var listComplianceOverrides []func(
*cobra.Command,
*compute.ListClusterCompliancesRequest,
)
func newListCompliance() *cobra.Command {
cmd := &cobra.Command{}
var listComplianceReq compute.ListClusterCompliancesRequest
// TODO: short flags
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
cmd.Use = "list-compliance POLICY_ID"
cmd.Short = `List cluster policy compliance.`
cmd.Long = `List cluster policy compliance.
Returns the policy compliance status of all clusters that use a given policy.
Clusters could be out of compliance if their policy was updated after the
cluster was last edited.
Arguments:
POLICY_ID: Canonical unique identifier for the cluster policy.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
listComplianceReq.PolicyId = args[0]
response := w.PolicyComplianceForClusters.ListCompliance(ctx, listComplianceReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range listComplianceOverrides {
fn(cmd, &listComplianceReq)
}
return cmd
}
// end service PolicyComplianceForClusters

View File

@ -0,0 +1,262 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package policy_compliance_for_jobs
import (
"fmt"
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/cli/libs/flags"
"github.com/databricks/databricks-sdk-go/service/jobs"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "policy-compliance-for-jobs",
Short: `The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace.`,
Long: `The compliance APIs allow you to view and manage the policy compliance status
of jobs in your workspace. This API currently only supports compliance
controls for cluster policies.
A job is in compliance if its cluster configurations satisfy the rules of all
their respective cluster policies. A job could be out of compliance if a
cluster policy it uses was updated after the job was last edited. The job is
considered out of compliance if any of its clusters no longer comply with
their updated policies.
The get and list compliance APIs allow you to view the policy compliance
status of a job. The enforce compliance API allows you to update a job so that
it becomes compliant with all of its policies.`,
GroupID: "jobs",
Annotations: map[string]string{
"package": "jobs",
},
}
// Add methods
cmd.AddCommand(newEnforceCompliance())
cmd.AddCommand(newGetCompliance())
cmd.AddCommand(newListCompliance())
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start enforce-compliance command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var enforceComplianceOverrides []func(
*cobra.Command,
*jobs.EnforcePolicyComplianceRequest,
)
func newEnforceCompliance() *cobra.Command {
cmd := &cobra.Command{}
var enforceComplianceReq jobs.EnforcePolicyComplianceRequest
var enforceComplianceJson flags.JsonFlag
// TODO: short flags
cmd.Flags().Var(&enforceComplianceJson, "json", `either inline JSON string or @path/to/file.json with request body`)
cmd.Flags().BoolVar(&enforceComplianceReq.ValidateOnly, "validate-only", enforceComplianceReq.ValidateOnly, `If set, previews changes made to the job to comply with its policy, but does not update the job.`)
cmd.Use = "enforce-compliance JOB_ID"
cmd.Short = `Enforce job policy compliance.`
cmd.Long = `Enforce job policy compliance.
Updates a job so the job clusters that are created when running the job
(specified in new_cluster) are compliant with the current versions of their
respective cluster policies. All-purpose clusters used in the job will not be
updated.
Arguments:
JOB_ID: The ID of the job you want to enforce policy compliance on.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
if cmd.Flags().Changed("json") {
err := root.ExactArgs(0)(cmd, args)
if err != nil {
return fmt.Errorf("when --json flag is specified, no positional arguments are required. Provide 'job_id' in your JSON input")
}
return nil
}
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
if cmd.Flags().Changed("json") {
err = enforceComplianceJson.Unmarshal(&enforceComplianceReq)
if err != nil {
return err
}
}
if !cmd.Flags().Changed("json") {
_, err = fmt.Sscan(args[0], &enforceComplianceReq.JobId)
if err != nil {
return fmt.Errorf("invalid JOB_ID: %s", args[0])
}
}
response, err := w.PolicyComplianceForJobs.EnforceCompliance(ctx, enforceComplianceReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range enforceComplianceOverrides {
fn(cmd, &enforceComplianceReq)
}
return cmd
}
// start get-compliance command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getComplianceOverrides []func(
*cobra.Command,
*jobs.GetPolicyComplianceRequest,
)
func newGetCompliance() *cobra.Command {
cmd := &cobra.Command{}
var getComplianceReq jobs.GetPolicyComplianceRequest
// TODO: short flags
cmd.Use = "get-compliance JOB_ID"
cmd.Short = `Get job policy compliance.`
cmd.Long = `Get job policy compliance.
Returns the policy compliance status of a job. Jobs could be out of compliance
if a cluster policy they use was updated after the job was last edited and
some of its job clusters no longer comply with their updated policies.
Arguments:
JOB_ID: The ID of the job whose compliance status you are requesting.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
_, err = fmt.Sscan(args[0], &getComplianceReq.JobId)
if err != nil {
return fmt.Errorf("invalid JOB_ID: %s", args[0])
}
response, err := w.PolicyComplianceForJobs.GetCompliance(ctx, getComplianceReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getComplianceOverrides {
fn(cmd, &getComplianceReq)
}
return cmd
}
// start list-compliance command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var listComplianceOverrides []func(
*cobra.Command,
*jobs.ListJobComplianceRequest,
)
func newListCompliance() *cobra.Command {
cmd := &cobra.Command{}
var listComplianceReq jobs.ListJobComplianceRequest
// TODO: short flags
cmd.Flags().IntVar(&listComplianceReq.PageSize, "page-size", listComplianceReq.PageSize, `Use this field to specify the maximum number of results to be returned by the server.`)
cmd.Flags().StringVar(&listComplianceReq.PageToken, "page-token", listComplianceReq.PageToken, `A page token that can be used to navigate to the next page or previous page as returned by next_page_token or prev_page_token.`)
cmd.Use = "list-compliance POLICY_ID"
cmd.Short = `List job policy compliance.`
cmd.Long = `List job policy compliance.
Returns the policy compliance status of all jobs that use a given policy. Jobs
could be out of compliance if a cluster policy they use was updated after the
job was last edited and its job clusters no longer comply with the updated
policy.
Arguments:
POLICY_ID: Canonical unique identifier for the cluster policy.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(1)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
listComplianceReq.PolicyId = args[0]
response := w.PolicyComplianceForJobs.ListCompliance(ctx, listComplianceReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range listComplianceOverrides {
fn(cmd, &listComplianceReq)
}
return cmd
}
// end service PolicyComplianceForJobs

View File

@ -16,9 +16,9 @@ var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "query-history",
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints, serverless compute, and DLT.`,
Short: `A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.`,
Long: `A service responsible for storing and retrieving the list of queries run
against SQL endpoints, serverless compute, and DLT.`,
against SQL endpoints and serverless compute.`,
GroupID: "sql",
Annotations: map[string]string{
"package": "sql",
@ -53,6 +53,7 @@ func newList() *cobra.Command {
// TODO: short flags
// TODO: complex arg: filter_by
cmd.Flags().BoolVar(&listReq.IncludeMetrics, "include-metrics", listReq.IncludeMetrics, `Whether to include the query metrics with each query.`)
cmd.Flags().IntVar(&listReq.MaxResults, "max-results", listReq.MaxResults, `Limit the number of results returned in one page.`)
cmd.Flags().StringVar(&listReq.PageToken, "page-token", listReq.PageToken, `A token that can be used to get the next page of results.`)
@ -60,8 +61,7 @@ func newList() *cobra.Command {
cmd.Short = `List Queries.`
cmd.Long = `List Queries.
List the history of queries through SQL warehouses, serverless compute, and
DLT.
List the history of queries through SQL warehouses, and serverless compute.
You can filter by user ID, warehouse ID, status, and time range. Most recently
started queries are returned first (up to max_results in request). The

168
cmd/workspace/resource-quotas/resource-quotas.go generated Executable file
View File

@ -0,0 +1,168 @@
// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
package resource_quotas
import (
"github.com/databricks/cli/cmd/root"
"github.com/databricks/cli/libs/cmdio"
"github.com/databricks/databricks-sdk-go/service/catalog"
"github.com/spf13/cobra"
)
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var cmdOverrides []func(*cobra.Command)
func New() *cobra.Command {
cmd := &cobra.Command{
Use: "resource-quotas",
Short: `Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created.`,
Long: `Unity Catalog enforces resource quotas on all securable objects, which limits
the number of resources that can be created. Quotas are expressed in terms of
a resource type and a parent (for example, tables per metastore or schemas per
catalog). The resource quota APIs enable you to monitor your current usage and
limits. For more information on resource quotas see the [Unity Catalog
documentation].
[Unity Catalog documentation]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#resource-quotas`,
GroupID: "catalog",
Annotations: map[string]string{
"package": "catalog",
},
}
// Add methods
cmd.AddCommand(newGetQuota())
cmd.AddCommand(newListQuotas())
// Apply optional overrides to this command.
for _, fn := range cmdOverrides {
fn(cmd)
}
return cmd
}
// start get-quota command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var getQuotaOverrides []func(
*cobra.Command,
*catalog.GetQuotaRequest,
)
func newGetQuota() *cobra.Command {
cmd := &cobra.Command{}
var getQuotaReq catalog.GetQuotaRequest
// TODO: short flags
cmd.Use = "get-quota PARENT_SECURABLE_TYPE PARENT_FULL_NAME QUOTA_NAME"
cmd.Short = `Get information for a single resource quota.`
cmd.Long = `Get information for a single resource quota.
The GetQuota API returns usage information for a single resource quota,
defined as a child-parent pair. This API also refreshes the quota count if it
is out of date. Refreshes are triggered asynchronously. The updated count
might not be returned in the first call.
Arguments:
PARENT_SECURABLE_TYPE: Securable type of the quota parent.
PARENT_FULL_NAME: Full name of the parent resource. Provide the metastore ID if the parent
is a metastore.
QUOTA_NAME: Name of the quota. Follows the pattern of the quota type, with "-quota"
added as a suffix.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(3)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
getQuotaReq.ParentSecurableType = args[0]
getQuotaReq.ParentFullName = args[1]
getQuotaReq.QuotaName = args[2]
response, err := w.ResourceQuotas.GetQuota(ctx, getQuotaReq)
if err != nil {
return err
}
return cmdio.Render(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range getQuotaOverrides {
fn(cmd, &getQuotaReq)
}
return cmd
}
// start list-quotas command
// Slice with functions to override default command behavior.
// Functions can be added from the `init()` function in manually curated files in this directory.
var listQuotasOverrides []func(
*cobra.Command,
*catalog.ListQuotasRequest,
)
func newListQuotas() *cobra.Command {
cmd := &cobra.Command{}
var listQuotasReq catalog.ListQuotasRequest
// TODO: short flags
cmd.Flags().IntVar(&listQuotasReq.MaxResults, "max-results", listQuotasReq.MaxResults, `The number of quotas to return.`)
cmd.Flags().StringVar(&listQuotasReq.PageToken, "page-token", listQuotasReq.PageToken, `Opaque token for the next page of results.`)
cmd.Use = "list-quotas"
cmd.Short = `List all resource quotas under a metastore.`
cmd.Long = `List all resource quotas under a metastore.
ListQuotas returns all quota values under the metastore. There are no SLAs on
the freshness of the counts returned. This API does not trigger a refresh of
quota counts.`
cmd.Annotations = make(map[string]string)
cmd.Args = func(cmd *cobra.Command, args []string) error {
check := root.ExactArgs(0)
return check(cmd, args)
}
cmd.PreRunE = root.MustWorkspaceClient
cmd.RunE = func(cmd *cobra.Command, args []string) (err error) {
ctx := cmd.Context()
w := root.WorkspaceClient(ctx)
response := w.ResourceQuotas.ListQuotas(ctx, listQuotasReq)
return cmdio.RenderIterator(ctx, response)
}
// Disable completions since they are not applicable.
// Can be overridden by manual implementation in `override.go`.
cmd.ValidArgsFunction = cobra.NoFileCompletions
// Apply optional overrides to this command.
for _, fn := range listQuotasOverrides {
fn(cmd, &listQuotasReq)
}
return cmd
}
// end service ResourceQuotas

2
go.mod
View File

@ -5,7 +5,7 @@ go 1.22
require (
github.com/Masterminds/semver/v3 v3.2.1 // MIT
github.com/briandowns/spinner v1.23.1 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.44.0 // Apache 2.0
github.com/databricks/databricks-sdk-go v0.45.0 // Apache 2.0
github.com/fatih/color v1.17.0 // MIT
github.com/ghodss/yaml v1.0.0 // MIT + NOTICE
github.com/google/uuid v1.6.0 // BSD-3-Clause

4
go.sum generated
View File

@ -32,8 +32,8 @@ github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGX
github.com/cpuguy83/go-md2man/v2 v2.0.4/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
github.com/cyphar/filepath-securejoin v0.2.4 h1:Ugdm7cg7i6ZK6x3xDF1oEu1nfkyfH53EtKeQYTC3kyg=
github.com/cyphar/filepath-securejoin v0.2.4/go.mod h1:aPGpWjXOXUn2NCNjFvBE6aRxGGx79pTxQpKOJNYHHl4=
github.com/databricks/databricks-sdk-go v0.44.0 h1:9/FZACv4EFQIOYxfwYVKnY7v46xio9FKCw9tpKB2O/s=
github.com/databricks/databricks-sdk-go v0.44.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
github.com/databricks/databricks-sdk-go v0.45.0 h1:wdx5Wm/ESrahdHeq62WrjLeGjV4r722LLanD8ahI0Mo=
github.com/databricks/databricks-sdk-go v0.45.0/go.mod h1:ds+zbv5mlQG7nFEU5ojLtgN/u0/9YzZmKQES/CfedzU=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=

View File

@ -6,9 +6,9 @@ import (
"github.com/databricks/cli/libs/dyn"
)
const VariableRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\[[0-9]+\])*)*(\[[0-9]+\])*)\}`
const ReferenceRegex = `\$\{([a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\.[a-zA-Z]+([-_]?[a-zA-Z0-9]+)*(\[[0-9]+\])*)*(\[[0-9]+\])*)\}`
var re = regexp.MustCompile(VariableRegex)
var re = regexp.MustCompile(ReferenceRegex)
// ref represents a variable reference.
// It is a string [dyn.Value] contained in a larger [dyn.Value].

View File

@ -3,17 +3,13 @@ package jsonschema
import (
"container/list"
"fmt"
"maps"
"path"
"reflect"
"slices"
"strings"
)
// TODO: Maybe can be removed?
var InvalidSchema = Schema{
Type: InvalidType,
}
// Fields tagged "readonly" should not be emitted in the schema as they are
// computed at runtime, and should not be assigned a value by the bundle author.
const readonlyTag = "readonly"
@ -26,19 +22,17 @@ const internalTag = "internal"
// Fields tagged as "deprecated" are removed/omitted from the generated schema.
const deprecatedTag = "deprecated"
// TODO: Test what happens with invalid cycles? Do integration tests fail?
// TODO: Call out in the PR description that recursive types like "for_each_task"
// are now supported.
type constructor struct {
// Map of typ.PkgPath() + "." + typ.Name() to the schema for that type.
// Example key: github.com/databricks/databricks-sdk-go/service/jobs.JobSettings
definitions map[string]Schema
seen map[string]struct{}
// Map of typ.PkgPath() + "." + typ.Name() to the corresponding type. Used to
// track types that have been seen to avoid infinite recursion.
seen map[string]reflect.Type
// Transformation function to apply after generating a node in the schema.
fn func(s Schema) Schema
// The root type for which the schema is being generated.
root reflect.Type
}
// The $defs block in a JSON schema cannot contain "/", otherwise it will not be
@ -47,13 +41,19 @@ type constructor struct {
//
// For example:
// {"a/b/c": "value"} is converted to {"a": {"b": {"c": "value"}}}
func (c *constructor) nestedDefinitions() any {
if len(c.definitions) == 0 {
func (c *constructor) Definitions() any {
defs := maps.Clone(c.definitions)
// Remove the root type from the definitions. No need to include it in the
// definitions.
delete(defs, typePath(c.root))
if len(defs) == 0 {
return nil
}
res := make(map[string]any)
for k, v := range c.definitions {
for k, v := range defs {
parts := strings.Split(k, "/")
cur := res
for i, p := range parts {
@ -72,47 +72,76 @@ func (c *constructor) nestedDefinitions() any {
return res
}
// TODO: Skip generating schema for interface fields.
func FromType(typ reflect.Type, fn func(s Schema) Schema) (Schema, error) {
// FromType converts a reflect.Type to a jsonschema.Schema. Nodes in the final JSON
// schema are guaranteed to be one level deep, which is done using defining $defs
// for every Go type and referring them using $ref in the corresponding node in
// the JSON schema.
//
// fns is a list of transformation functions that will be applied to all $defs
// in the schema.
func FromType(typ reflect.Type, fns []func(typ reflect.Type, s Schema) Schema) (Schema, error) {
c := constructor{
definitions: make(map[string]Schema),
seen: make(map[string]struct{}),
fn: fn,
seen: make(map[string]reflect.Type),
root: typ,
}
err := c.walk(typ)
if err != nil {
return InvalidSchema, err
return Schema{}, err
}
for _, fn := range fns {
for k, v := range c.definitions {
c.definitions[k] = fn(c.seen[k], v)
}
}
res := c.definitions[typePath(typ)]
// No need to include the root type in the definitions.
delete(c.definitions, typePath(typ))
res.Definitions = c.nestedDefinitions()
res.Definitions = c.Definitions()
return res, nil
}
// typePath computes a unique string representation of the type. $ref in the generated
// JSON schema will refer to this path. See TestTypePath for examples outputs.
func typePath(typ reflect.Type) string {
// Pointers have a typ.Name() of "". Dereference them to get the underlying type.
for typ.Kind() == reflect.Ptr {
typ = typ.Elem()
}
// typ.Name() resolves to "" for any type.
if typ.Kind() == reflect.Interface {
return "interface"
}
// For built-in types, return the type name directly.
if typ.PkgPath() == "" {
return typ.Name()
// Recursively call typePath, to handle slices of slices / maps.
if typ.Kind() == reflect.Slice {
return path.Join("slice", typePath(typ.Elem()))
}
return strings.Join([]string{typ.PkgPath(), typ.Name()}, ".")
if typ.Kind() == reflect.Map {
if typ.Key().Kind() != reflect.String {
panic(fmt.Sprintf("found map with non-string key: %v", typ.Key()))
}
// Recursively call typePath, to handle maps of maps / slices.
return path.Join("map", typePath(typ.Elem()))
}
switch {
case typ.PkgPath() != "" && typ.Name() != "":
return typ.PkgPath() + "." + typ.Name()
case typ.Name() != "":
return typ.Name()
default:
// Invariant. This function should return a non-empty string
// for all types.
panic("unexpected empty type name for type: " + typ.String())
}
}
// TODO: would a worked based model fit better here? Is this internal API not
// the right fit?
// Walk the Go type, generating $defs for every type encountered, and populating
// the corresponding $ref in the JSON schema.
func (c *constructor) walk(typ reflect.Type) error {
// Dereference pointers if necessary.
for typ.Kind() == reflect.Ptr {
@ -121,10 +150,11 @@ func (c *constructor) walk(typ reflect.Type) error {
typPath := typePath(typ)
// Keep track of seen types to avoid infinite recursion.
if _, ok := c.seen[typPath]; !ok {
c.seen[typPath] = struct{}{}
// Return early if the type has already been seen, to avoid infinite recursion.
if _, ok := c.seen[typPath]; ok {
return nil
}
c.seen[typPath] = typ
// Return early directly if it's already been processed.
if _, ok := c.definitions[typPath]; ok {
@ -134,7 +164,6 @@ func (c *constructor) walk(typ reflect.Type) error {
var s Schema
var err error
// TODO: Narrow / widen down the number of Go types handled here.
switch typ.Kind() {
case reflect.Struct:
s, err = c.fromTypeStruct(typ)
@ -142,20 +171,20 @@ func (c *constructor) walk(typ reflect.Type) error {
s, err = c.fromTypeSlice(typ)
case reflect.Map:
s, err = c.fromTypeMap(typ)
// TODO: Should the primitive functions below be inlined?
case reflect.String:
s = Schema{Type: StringType}
case reflect.Bool:
s = Schema{Type: BooleanType}
// TODO: Add comment about reduced coverage of primitive Go types in the code paths here.
case reflect.Int:
case reflect.Int, reflect.Int32, reflect.Int64:
s = Schema{Type: IntegerType}
case reflect.Float32, reflect.Float64:
s = Schema{Type: NumberType}
case reflect.Interface:
// An interface value can never be serialized from text, and thus is explicitly
// set to null and disallowed in the schema.
s = Schema{Type: NullType}
// Interface or any types are not serialized to JSON by the default JSON
// unmarshaller (json.Unmarshal). They likely thus are parsed by the
// dynamic configuration tree and we should support arbitary values here.
// Eg: variables.default can be anything.
s = Schema{}
default:
return fmt.Errorf("unsupported type: %s", typ.Kind())
}
@ -163,13 +192,7 @@ func (c *constructor) walk(typ reflect.Type) error {
return err
}
if c.fn != nil {
s = c.fn(s)
}
// Store definition for the type if it's part of a Go package and not a built-in type.
// TODO: Apply transformation at the end, to all definitions instead of
// during recursive traversal?
// Store the computed JSON schema for the type.
c.definitions[typPath] = s
return nil
}
@ -206,20 +229,15 @@ func getStructFields(typ reflect.Type) []reflect.StructField {
return fields
}
// TODO: get rid of the errors here and panic instead?
func (c *constructor) fromTypeStruct(typ reflect.Type) (Schema, error) {
if typ.Kind() != reflect.Struct {
return InvalidSchema, fmt.Errorf("expected struct, got %s", typ.Kind())
return Schema{}, fmt.Errorf("expected struct, got %s", typ.Kind())
}
res := Schema{
Type: ObjectType,
Properties: make(map[string]*Schema),
// TODO: Confirm that empty arrays are not serialized.
Required: []string{},
Type: ObjectType,
Properties: make(map[string]*Schema),
Required: []string{},
AdditionalProperties: false,
}
@ -240,6 +258,7 @@ func (c *constructor) fromTypeStruct(typ reflect.Type) (Schema, error) {
if jsonTags[0] == "" || jsonTags[0] == "-" || !structField.IsExported() {
continue
}
// "omitempty" tags in the Go SDK structs represent fields that not are
// required to be present in the API payload. Thus its absence in the
// tags list indicates that the field is required.
@ -247,19 +266,16 @@ func (c *constructor) fromTypeStruct(typ reflect.Type) (Schema, error) {
res.Required = append(res.Required, jsonTags[0])
}
// Walk the fields of the struct.
typPath := typePath(structField.Type)
// Only walk if the type has not been seen yet.
if _, ok := c.seen[typPath]; !ok {
// Trigger call to fromType, to recursively generate definitions for
// the struct field.
err := c.walk(structField.Type)
if err != nil {
return InvalidSchema, err
}
err := c.walk(structField.Type)
if err != nil {
return Schema{}, err
}
// For every property in the struct, add a $ref to the corresponding
// $defs block.
refPath := path.Join("#/$defs", typPath)
// For non-built-in types, refer to the definition.
res.Properties[jsonTags[0]] = &Schema{
Reference: &refPath,
}
@ -268,11 +284,9 @@ func (c *constructor) fromTypeStruct(typ reflect.Type) (Schema, error) {
return res, nil
}
// TODO: Add comments explaining the translation between struct, map, slice and
// the JSON schema representation.
func (c *constructor) fromTypeSlice(typ reflect.Type) (Schema, error) {
if typ.Kind() != reflect.Slice {
return InvalidSchema, fmt.Errorf("expected slice, got %s", typ.Kind())
return Schema{}, fmt.Errorf("expected slice, got %s", typ.Kind())
}
res := Schema{
@ -280,19 +294,16 @@ func (c *constructor) fromTypeSlice(typ reflect.Type) (Schema, error) {
}
typPath := typePath(typ.Elem())
// Only walk if the type has not been seen yet.
if _, ok := c.seen[typPath]; !ok {
// Trigger call to fromType, to recursively generate definitions for
// the slice element.
err := c.walk(typ.Elem())
if err != nil {
return InvalidSchema, err
}
// Walk the slice element type.
err := c.walk(typ.Elem())
if err != nil {
return Schema{}, err
}
refPath := path.Join("#/$defs", typPath)
// For non-built-in types, refer to the definition
// Add a $ref to the corresponding $defs block for the slice element type.
res.Items = &Schema{
Reference: &refPath,
}
@ -301,11 +312,11 @@ func (c *constructor) fromTypeSlice(typ reflect.Type) (Schema, error) {
func (c *constructor) fromTypeMap(typ reflect.Type) (Schema, error) {
if typ.Kind() != reflect.Map {
return InvalidSchema, fmt.Errorf("expected map, got %s", typ.Kind())
return Schema{}, fmt.Errorf("expected map, got %s", typ.Kind())
}
if typ.Key().Kind() != reflect.String {
return InvalidSchema, fmt.Errorf("found map with non-string key: %v", typ.Key())
return Schema{}, fmt.Errorf("found map with non-string key: %v", typ.Key())
}
res := Schema{
@ -313,19 +324,16 @@ func (c *constructor) fromTypeMap(typ reflect.Type) (Schema, error) {
}
typPath := typePath(typ.Elem())
// Only walk if the type has not been seen yet.
if _, ok := c.seen[typPath]; !ok {
// Trigger call to fromType, to recursively generate definitions for
// the map value.
err := c.walk(typ.Elem())
if err != nil {
return InvalidSchema, err
}
// Walk the map value type.
err := c.walk(typ.Elem())
if err != nil {
return Schema{}, err
}
refPath := path.Join("#/$defs", typPath)
// For non-built-in types, refer to the definition
// Add a $ref to the corresponding $defs block for the map value type.
res.AdditionalProperties = &Schema{
Reference: &refPath,
}

View File

@ -67,9 +67,7 @@ func TestFromTypeBasic(t *testing.T) {
expected: Schema{
Type: "object",
Definitions: map[string]any{
"interface": Schema{
Type: "null",
},
"interface": Schema{},
"string": Schema{
Type: "string",
},
@ -129,15 +127,6 @@ func TestFromTypeBasic(t *testing.T) {
s, err := FromType(tc.typ, nil)
assert.NoError(t, err)
assert.Equal(t, tc.expected, s)
// jsonSchema, err := json.MarshalIndent(s, " ", " ")
// assert.NoError(t, err)
// expectedJson, err := json.MarshalIndent(tc.expected, " ", " ")
// assert.NoError(t, err)
// t.Log("[DEBUG] actual: ", string(jsonSchema))
// t.Log("[DEBUG] expected: ", string(expectedJson))
})
}
}
@ -222,7 +211,7 @@ func TestFromTypeNested(t *testing.T) {
},
{
name: "struct as a map value",
typ: reflect.TypeOf(map[string]Inner{}),
typ: reflect.TypeOf(map[string]*Inner{}),
expected: Schema{
Type: "object",
Definitions: expectedDefinitions,
@ -252,7 +241,6 @@ func TestFromTypeNested(t *testing.T) {
}
}
// TODO: Call out in the PR description that recursive Go types are supported.
func TestFromTypeRecursive(t *testing.T) {
fooRef := "#/$defs/github.com/databricks/cli/libs/jsonschema/test_types.Foo"
barRef := "#/$defs/github.com/databricks/cli/libs/jsonschema/test_types.Bar"
@ -354,7 +342,122 @@ func TestFromTypeSelfReferential(t *testing.T) {
}
func TestFromTypeError(t *testing.T) {
// Maps with non-string keys should panic.
type mapOfInts map[int]int
_, err := FromType(reflect.TypeOf(mapOfInts{}), nil)
assert.EqualError(t, err, "found map with non-string key: int")
assert.PanicsWithValue(t, "found map with non-string key: int", func() {
FromType(reflect.TypeOf(mapOfInts{}), nil)
})
// Unsupported types should return an error.
_, err := FromType(reflect.TypeOf(complex64(0)), nil)
assert.EqualError(t, err, "unsupported type: complex64")
}
func TestFromTypeFunctionsArg(t *testing.T) {
type myStruct struct {
S string `json:"s"`
}
strRef := "#/$defs/string"
expected := Schema{
Type: "object",
Definitions: map[string]any{
"string": Schema{
Type: "string",
Description: "a string",
Enum: []any{"a", "b", "c"},
},
},
Properties: map[string]*Schema{
"s": {
Reference: &strRef,
},
},
AdditionalProperties: false,
Required: []string{"s"},
}
addDescription := func(typ reflect.Type, s Schema) Schema {
if typ.Kind() != reflect.String {
return s
}
s.Description = "a string"
return s
}
addEnums := func(typ reflect.Type, s Schema) Schema {
if typ.Kind() != reflect.String {
return s
}
s.Enum = []any{"a", "b", "c"}
return s
}
s, err := FromType(reflect.TypeOf(myStruct{}), []func(reflect.Type, Schema) Schema{
addDescription,
addEnums,
})
assert.NoError(t, err)
assert.Equal(t, expected, s)
}
func TestTypePath(t *testing.T) {
type myStruct struct{}
tcases := []struct {
typ reflect.Type
path string
}{
{
typ: reflect.TypeOf(""),
path: "string",
},
{
typ: reflect.TypeOf(int(0)),
path: "int",
},
{
typ: reflect.TypeOf(true),
path: "bool",
},
{
typ: reflect.TypeOf(float64(0)),
path: "float64",
},
{
typ: reflect.TypeOf(myStruct{}),
path: "github.com/databricks/cli/libs/jsonschema.myStruct",
},
{
typ: reflect.TypeOf([]int{}),
path: "slice/int",
},
{
typ: reflect.TypeOf(map[string]int{}),
path: "map/int",
},
{
typ: reflect.TypeOf([]myStruct{}),
path: "slice/github.com/databricks/cli/libs/jsonschema.myStruct",
},
{
typ: reflect.TypeOf([][]map[string]map[string]myStruct{}),
path: "slice/slice/map/map/github.com/databricks/cli/libs/jsonschema.myStruct",
},
{
typ: reflect.TypeOf(map[string]myStruct{}),
path: "map/github.com/databricks/cli/libs/jsonschema.myStruct",
},
}
for _, tc := range tcases {
t.Run(tc.typ.String(), func(t *testing.T) {
assert.Equal(t, tc.path, typePath(tc.typ))
})
}
// Maps with non-string keys should panic.
assert.PanicsWithValue(t, "found map with non-string key: int", func() {
typePath(reflect.TypeOf(map[int]int{}))
})
}