databricks-cli/bundle/trampoline/python_dbr_warning.go

114 lines
3.0 KiB
Go
Raw Permalink Normal View History

package trampoline
import (
"context"
"strconv"
"strings"
"github.com/databricks/cli/bundle"
"github.com/databricks/cli/bundle/libraries"
"github.com/databricks/cli/libs/diag"
"github.com/databricks/cli/libs/log"
"github.com/databricks/databricks-sdk-go"
"golang.org/x/mod/semver"
)
type wrapperWarning struct {
}
func WrapperWarning() bundle.Mutator {
return &wrapperWarning{}
}
func (m *wrapperWarning) Apply(ctx context.Context, b *bundle.Bundle) diag.Diagnostics {
if isPythonWheelWrapperOn(b) {
return nil
}
if hasIncompatibleWheelTasks(ctx, b) {
return diag.Errorf("Python wheel tasks require compute with DBR 13.3+ to include local libraries. Please change your cluster configuration or use the experimental 'python_wheel_wrapper' setting. See https://docs.databricks.com/dev-tools/bundles/python-wheel.html for more information.")
}
return nil
}
func isPythonWheelWrapperOn(b *bundle.Bundle) bool {
return b.Config.Experimental != nil && b.Config.Experimental.PythonWheelWrapper
}
func hasIncompatibleWheelTasks(ctx context.Context, b *bundle.Bundle) bool {
tasks := libraries.FindTasksWithLocalLibraries(b)
for _, task := range tasks {
if task.NewCluster != nil {
if lowerThanExpectedVersion(task.NewCluster.SparkVersion) {
return true
}
}
if task.JobClusterKey != "" {
for _, job := range b.Config.Resources.Jobs {
for _, cluster := range job.JobClusters {
Bump github.com/databricks/databricks-sdk-go from 0.36.0 to 0.37.0 (#1326) [![Dependabot compatibility score](https://dependabot-badges.githubapp.com/badges/compatibility_score?dependency-name=github.com/databricks/databricks-sdk-go&package-manager=go_modules&previous-version=0.36.0&new-version=0.37.0)](https://docs.github.com/en/github/managing-security-vulnerabilities/about-dependabot-security-updates#about-compatibility-scores) Dependabot will resolve any conflicts with this PR as long as you don't alter it yourself. You can also trigger a rebase manually by commenting `@dependabot rebase`. [//]: # (dependabot-automerge-start) [//]: # (dependabot-automerge-end) --- <details> <summary>Dependabot commands and options</summary> <br /> You can trigger Dependabot actions by commenting on this PR: - `@dependabot rebase` will rebase this PR - `@dependabot recreate` will recreate this PR, overwriting any edits that have been made to it - `@dependabot merge` will merge this PR after your CI passes on it - `@dependabot squash and merge` will squash and merge this PR after your CI passes on it - `@dependabot cancel merge` will cancel a previously requested merge and block automerging - `@dependabot reopen` will reopen this PR if it is closed - `@dependabot close` will close this PR and stop Dependabot recreating it. You can achieve the same result by closing it manually - `@dependabot show <dependency name> ignore conditions` will show all of the ignore conditions of the specified dependency - `@dependabot ignore this major version` will close this PR and stop Dependabot creating any more for this major version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this minor version` will close this PR and stop Dependabot creating any more for this minor version (unless you reopen the PR or upgrade to it yourself) - `@dependabot ignore this dependency` will close this PR and stop Dependabot creating any more for this dependency (unless you reopen the PR or upgrade to it yourself) </details> --------- Signed-off-by: dependabot[bot] <support@github.com> Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> Co-authored-by: Andrew Nester <andrew.nester@databricks.com>
2024-04-03 10:39:53 +00:00
if task.JobClusterKey == cluster.JobClusterKey && cluster.NewCluster.SparkVersion != "" {
if lowerThanExpectedVersion(cluster.NewCluster.SparkVersion) {
return true
}
}
}
}
}
if task.ExistingClusterId != "" {
version, err := getSparkVersionForCluster(ctx, b.WorkspaceClient(), task.ExistingClusterId)
// If there's error getting spark version for cluster, do not mark it as incompatible
if err != nil {
log.Warnf(ctx, "unable to get spark version for cluster %s, err: %s", task.ExistingClusterId, err.Error())
return false
}
if lowerThanExpectedVersion(version) {
return true
}
}
}
return false
}
func lowerThanExpectedVersion(sparkVersion string) bool {
parts := strings.Split(sparkVersion, ".")
if len(parts) < 2 {
return false
}
if parts[1][0] == 'x' { // treat versions like 13.x as the very latest minor (13.99)
parts[1] = "99"
}
// if any of the version parts are not numbers, we can't compare
// so consider it as compatible version
if _, err := strconv.Atoi(parts[0]); err != nil {
return false
}
if _, err := strconv.Atoi(parts[1]); err != nil {
return false
}
v := "v" + parts[0] + "." + parts[1]
return semver.Compare(v, "v13.1") < 0
}
// Name implements bundle.Mutator.
func (m *wrapperWarning) Name() string {
return "PythonWrapperWarning"
}
func getSparkVersionForCluster(ctx context.Context, w *databricks.WorkspaceClient, clusterId string) (string, error) {
details, err := w.Clusters.GetByClusterId(ctx, clusterId)
if err != nil {
return "", err
}
return details.SparkVersion, nil
}