From 5906276aefe1c2db684c8f3c1287e1c57974c7b5 Mon Sep 17 00:00:00 2001
From: Philip Laine <philip.laine@gmail.com>
Date: Thu, 16 Jan 2025 17:30:45 +0100
Subject: [PATCH] Refactor deploy package

---
 src/internal/packager2/deploy.go           | 594 ++++++++++++++-------
 src/internal/packager2/helm/chart.go       | 512 ++++++++++++++++++
 src/internal/packager2/helm/common.go      | 178 ++++++
 src/internal/packager2/helm/destroy.go     |  75 +++
 src/internal/packager2/helm/images.go      |  57 ++
 src/internal/packager2/helm/post-render.go | 291 ++++++++++
 src/internal/packager2/helm/repo.go        | 390 ++++++++++++++
 src/internal/packager2/helm/utils.go       |  87 +++
 src/internal/packager2/helm/zarf.go        | 169 ++++++
 src/pkg/cluster/data.go                    | 122 +++++
 10 files changed, 2277 insertions(+), 198 deletions(-)
 create mode 100644 src/internal/packager2/helm/chart.go
 create mode 100644 src/internal/packager2/helm/common.go
 create mode 100644 src/internal/packager2/helm/destroy.go
 create mode 100644 src/internal/packager2/helm/images.go
 create mode 100644 src/internal/packager2/helm/post-render.go
 create mode 100644 src/internal/packager2/helm/repo.go
 create mode 100644 src/internal/packager2/helm/utils.go
 create mode 100644 src/internal/packager2/helm/zarf.go

diff --git a/src/internal/packager2/deploy.go b/src/internal/packager2/deploy.go
index cde73de4f1..fc32730ed9 100644
--- a/src/internal/packager2/deploy.go
+++ b/src/internal/packager2/deploy.go
@@ -5,6 +5,7 @@ import (
 	"errors"
 	"fmt"
 	"os"
+	"path/filepath"
 	"regexp"
 	"runtime"
 	"slices"
@@ -12,20 +13,27 @@ import (
 	"time"
 
 	"github.com/Masterminds/semver"
+	"github.com/defenseunicorns/pkg/helpers/v2"
 	"golang.org/x/sync/errgroup"
+	corev1 "k8s.io/api/core/v1"
+	kerrors "k8s.io/apimachinery/pkg/api/errors"
 	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
 
 	"github.com/zarf-dev/zarf/src/api/v1alpha1"
 	"github.com/zarf-dev/zarf/src/config"
 	"github.com/zarf-dev/zarf/src/config/lang"
 	"github.com/zarf-dev/zarf/src/internal/healthchecks"
+	"github.com/zarf-dev/zarf/src/internal/packager/template"
+	actions2 "github.com/zarf-dev/zarf/src/internal/packager2/actions"
+	"github.com/zarf-dev/zarf/src/internal/packager2/helm"
 	layout2 "github.com/zarf-dev/zarf/src/internal/packager2/layout"
 	"github.com/zarf-dev/zarf/src/pkg/cluster"
 	"github.com/zarf-dev/zarf/src/pkg/logger"
 	"github.com/zarf-dev/zarf/src/pkg/message"
-	"github.com/zarf-dev/zarf/src/pkg/packager/actions"
 	"github.com/zarf-dev/zarf/src/pkg/packager/deprecated"
 	"github.com/zarf-dev/zarf/src/pkg/packager/filters"
+	"github.com/zarf-dev/zarf/src/pkg/utils"
+	"github.com/zarf-dev/zarf/src/pkg/variables"
 	"github.com/zarf-dev/zarf/src/types"
 )
 
@@ -35,114 +43,76 @@ var (
 )
 
 type DeployOptions struct {
+	Path               string
 	OptionalComponents string
+	SetVariables       map[string]string
 }
 
-func Deploy(ctx context.Context, opt DeployOptions) error {
+func Deploy(ctx context.Context, opt DeployOptions) ([]types.DeployedComponent, error) {
 	l := logger.From(ctx)
 	start := time.Now()
-	isInteractive := !config.CommonOptions.Confirm
-
-	deployFilter := filters.Combine(
-		filters.ByLocalOS(runtime.GOOS),
-		filters.ForDeploy(opt.OptionalComponents, isInteractive),
-	)
 
-	var pkgLayout layout2.PackageLayout
+	isInteractive := !config.CommonOptions.Confirm
+	loadOpts := LoadOptions{Filter: filters.Empty()}
+	if isInteractive {
+		loadOpts.Filter = filters.Combine(
+			filters.ByLocalOS(runtime.GOOS),
+			filters.ForDeploy(opt.OptionalComponents, isInteractive),
+		)
+	}
+	pkgLayout, err := LoadPackage(ctx, loadOpts)
+	if err != nil {
+		return nil, err
+	}
 
 	warnings := []string{}
-	// if isInteractive {
-	// 	filter := filters.Empty()
-	// 	pkg, loadWarnings, err := p.source.LoadPackage(ctx, p.layout, filter, true)
-	// 	if err != nil {
-	// 		return fmt.Errorf("unable to load the package: %w", err)
-	// 	}
-	// 	p.cfg.Pkg = pkg
-	// 	warnings = append(warnings, loadWarnings...)
-	// } else {
-	// 	pkg, loadWarnings, err := p.source.LoadPackage(ctx, p.layout, deployFilter, true)
-	// 	if err != nil {
-	// 		return fmt.Errorf("unable to load the package: %w", err)
-	// 	}
-	// 	p.cfg.Pkg = pkg
-	// 	warnings = append(warnings, loadWarnings...)
-	// 	if err := p.populatePackageVariableConfig(); err != nil {
-	// 		return fmt.Errorf("unable to set the active variables: %w", err)
-	// 	}
-	// }
-
-	// validateWarnings, err := validateLastNonBreakingVersion(config.CLIVersion, p.cfg.Pkg.Build.LastNonBreakingVersion)
-	// if err != nil {
-	// 	return err
-	// }
-	// warnings = append(warnings, validateWarnings...)
-
-	// sbomViewFiles, sbomWarnings, err := p.layout.SBOMs.StageSBOMViewFiles()
-	// if err != nil {
-	// 	return err
-	// }
-	// warnings = append(warnings, sbomWarnings...)
-
-	// Confirm the overall package deployment
-	// if !p.confirmAction(ctx, config.ZarfDeployStage, warnings, sbomViewFiles) {
-	// 	return fmt.Errorf("deployment cancelled")
-	// }
+	validateWarnings, err := validateLastNonBreakingVersion(config.CLIVersion, pkgLayout.Pkg.Build.LastNonBreakingVersion)
+	if err != nil {
+		return nil, err
+	}
+	warnings = append(warnings, validateWarnings...)
 
 	if isInteractive {
-		p.cfg.Pkg.Components, err = deployFilter.Apply(p.cfg.Pkg)
+		var err error
+		pkgLayout.Pkg.Components, err = loadOpts.Filter.Apply(pkgLayout.Pkg)
 		if err != nil {
-			return err
-		}
-
-		// Set variables and prompt if --confirm is not set
-		if err := p.populatePackageVariableConfig(); err != nil {
-			return fmt.Errorf("unable to set the active variables: %w", err)
+			return nil, err
 		}
 	}
 
+	variableConfig := template.GetZarfVariableConfig(ctx)
+	variableConfig.SetConstants(pkgLayout.Pkg.Constants)
+	variableConfig.PopulateVariables(pkgLayout.Pkg.Variables, opt.SetVariables)
+
 	// p.hpaModified = false
-	// // Reset registry HPA scale down whether an error occurs or not
+	// Reset registry HPA scale down whether an error occurs or not
 	// defer p.resetRegistryHPA(ctx)
 
-	// Get a list of all the components we are deploying and actually deploy them
-	deployedComponents, err := .deployComponents(ctx)
-	if err != nil {
-		return err
-	}
-	if len(deployedComponents) == 0 {
-		message.Warn("No components were selected for deployment.  Inspect the package to view the available components and select components interactively or by name with \"--components\"")
-		l.Warn("no components were selected for deployment. Inspect the package to view the available components and select components interactively or by name with \"--components\"")
-	}
-
-	// Notify all the things about the successful deployment
-	message.Successf("Zarf deployment complete")
-	l.Debug("Zarf deployment complete", "duration", time.Since(start))
-
-	// err = p.printTablesForDeployment(ctx, deployedComponents)
-	// if err != nil {
-	// 	return err
-	// }
-
-	return nil
-}
-
-// deployComponents loops through a list of ZarfComponents and deploys them.
-func deployComponents(ctx context.Context) ([]types.DeployedComponent, error) {
-	l := logger.From(ctx)
-	deployedComponents := []types.DeployedComponent{}
+	var deployedComponents []types.DeployedComponent
+	var c *cluster.Cluster
+	var state *types.ZarfState
 
 	// Process all the components we are deploying
-	for _, component := range p.cfg.Pkg.Components {
+	for _, component := range pkgLayout.Pkg.Components {
 		// Connect to cluster if a component requires it.
-		if component.RequiresCluster() {
+		if component.RequiresCluster() && c == nil {
 			timeout := cluster.DefaultTimeout
-			if p.cfg.Pkg.IsInitConfig() {
+			if pkgLayout.Pkg.IsInitConfig() {
 				timeout = 5 * time.Minute
 			}
 			connectCtx, cancel := context.WithTimeout(ctx, timeout)
 			defer cancel()
-			if err := p.connectToCluster(connectCtx); err != nil {
-				return nil, fmt.Errorf("unable to connect to the Kubernetes cluster: %w", err)
+			var err error
+			c, err = cluster.NewClusterWithWait(connectCtx)
+			if err != nil {
+				return nil, err
+			}
+
+			if state == nil {
+				state, err = setupState(ctx, c, pkgLayout.Pkg)
+				if err != nil {
+					return nil, err
+				}
 			}
 		}
 
@@ -151,8 +121,8 @@ func deployComponents(ctx context.Context) ([]types.DeployedComponent, error) {
 		}
 
 		// Ensure we don't overwrite any installedCharts data when updating the package secret
-		if p.isConnectedToCluster() {
-			installedCharts, err := p.cluster.GetInstalledChartsForComponent(ctx, p.cfg.Pkg.Metadata.Name, component)
+		if c != nil {
+			installedCharts, err := c.GetInstalledChartsForComponent(ctx, pkgLayout.Pkg.Metadata.Name, component)
 			if err != nil {
 				message.Debugf("Unable to fetch installed Helm charts for component '%s': %s", component.Name, err.Error())
 				l.Debug("unable to fetch installed Helm charts", "component", component.Name, "error", err.Error())
@@ -166,16 +136,16 @@ func deployComponents(ctx context.Context) ([]types.DeployedComponent, error) {
 		// Deploy the component
 		var charts []types.InstalledChart
 		var deployErr error
-		if p.cfg.Pkg.IsInitConfig() {
-			charts, deployErr = p.deployInitComponent(ctx, component)
+		if pkgLayout.Pkg.IsInitConfig() {
+			// charts, deployErr = deployInitComponent(ctx, c, component)
 		} else {
-			charts, deployErr = p.deployComponent(ctx, component, false, false)
+			charts, deployErr = deployComponent(ctx, c, pkgLayout, component, variableConfig, state)
 		}
 
 		onDeploy := component.Actions.OnDeploy
 
 		onFailure := func() {
-			if err := actions.Run(ctx, onDeploy.Defaults, onDeploy.OnFailure, p.variableConfig); err != nil {
+			if err := actions2.Run(ctx, opt.Path, onDeploy.Defaults, onDeploy.OnFailure, variableConfig); err != nil {
 				message.Debugf("unable to run component failure action: %s", err.Error())
 				l.Debug("unable to run component failure action", "error", err.Error())
 			}
@@ -183,9 +153,8 @@ func deployComponents(ctx context.Context) ([]types.DeployedComponent, error) {
 
 		if deployErr != nil {
 			onFailure()
-
-			if p.isConnectedToCluster() {
-				if _, err := p.cluster.RecordPackageDeployment(ctx, p.cfg.Pkg, deployedComponents); err != nil {
+			if c != nil {
+				if _, err := c.RecordPackageDeployment(ctx, pkgLayout.Pkg, deployedComponents); err != nil {
 					message.Debugf("Unable to record package deployment for component %q: this will affect features like `zarf package remove`: %s", component.Name, err.Error())
 					l.Debug("unable to record package deployment", "component", component.Name, "error", err.Error())
 				}
@@ -195,139 +164,78 @@ func deployComponents(ctx context.Context) ([]types.DeployedComponent, error) {
 
 		// Update the package secret to indicate that we successfully deployed this component
 		deployedComponents[idx].InstalledCharts = charts
-		if p.isConnectedToCluster() {
-			if _, err := p.cluster.RecordPackageDeployment(ctx, p.cfg.Pkg, deployedComponents); err != nil {
+		if c != nil {
+			if _, err := c.RecordPackageDeployment(ctx, pkgLayout.Pkg, deployedComponents); err != nil {
 				message.Debugf("Unable to record package deployment for component %q: this will affect features like `zarf package remove`: %s", component.Name, err.Error())
 				l.Debug("unable to record package deployment", "component", component.Name, "error", err.Error())
 			}
 		}
 
-		if err := actions.Run(ctx, onDeploy.Defaults, onDeploy.OnSuccess, p.variableConfig); err != nil {
+		if err := actions2.Run(ctx, opt.Path, onDeploy.Defaults, onDeploy.OnSuccess, variableConfig); err != nil {
 			onFailure()
 			return nil, fmt.Errorf("unable to run component success action: %w", err)
 		}
 	}
 
-	return deployedComponents, nil
-}
-
-func deployInitComponent(ctx context.Context, component v1alpha1.ZarfComponent) ([]types.InstalledChart, error) {
-	l := logger.From(ctx)
-	hasExternalRegistry := p.cfg.InitOpts.RegistryInfo.Address != ""
-	isSeedRegistry := component.Name == "zarf-seed-registry"
-	isRegistry := component.Name == "zarf-registry"
-	isInjector := component.Name == "zarf-injector"
-	isAgent := component.Name == "zarf-agent"
-	isK3s := component.Name == "k3s"
-
-	if isK3s {
-		p.cfg.InitOpts.ApplianceMode = true
-	}
-
-	// Always init the state before the first component that requires the cluster (on most deployments, the zarf-seed-registry)
-	if component.RequiresCluster() && p.state == nil {
-		err := p.cluster.InitZarfState(ctx, p.cfg.InitOpts)
-		if err != nil {
-			return nil, fmt.Errorf("unable to initialize Zarf state: %w", err)
-		}
-	}
-
-	if hasExternalRegistry && (isSeedRegistry || isInjector || isRegistry) {
-		message.Notef("Not deploying the component (%s) since external registry information was provided during `zarf init`", component.Name)
-		l.Info("skipping init package component since external registry information was provided", "component", component.Name)
-		return nil, nil
-	}
-
-	if isRegistry {
-		// If we are deploying the registry then mark the HPA as "modified" to set it to Min later
-		p.hpaModified = true
-	}
-
-	// Before deploying the seed registry, start the injector
-	if isSeedRegistry {
-		err := p.cluster.StartInjection(ctx, p.layout.Base, p.layout.Images.Base, component.Images)
-		if err != nil {
-			return nil, err
-		}
-	}
-
-	// Skip image checksum if component is agent.
-	// Skip image push if component is seed registry.
-	charts, err := p.deployComponent(ctx, component, isAgent, isSeedRegistry)
-	if err != nil {
-		return nil, err
+	if len(deployedComponents) == 0 {
+		message.Warn("No components were selected for deployment.  Inspect the package to view the available components and select components interactively or by name with \"--components\"")
+		l.Warn("no components were selected for deployment. Inspect the package to view the available components and select components interactively or by name with \"--components\"")
 	}
 
-	// Do cleanup for when we inject the seed registry during initialization
-	if isSeedRegistry {
-		if err := p.cluster.StopInjection(ctx); err != nil {
-			return nil, fmt.Errorf("failed to delete injector resources: %w", err)
-		}
-	}
+	// Notify all the things about the successful deployment
+	message.Successf("Zarf deployment complete")
+	l.Debug("Zarf deployment complete", "duration", time.Since(start))
 
-	return charts, nil
+	return deployedComponents, nil
 }
 
-func deployComponent(ctx context.Context, component v1alpha1.ZarfComponent, noImgChecksum bool, noImgPush bool) ([]types.InstalledChart, error) {
+func deployComponent(ctx context.Context, c *cluster.Cluster, pkgLayout *layout2.PackageLayout, component v1alpha1.ZarfComponent, variableConfig *variables.VariableConfig, state *types.ZarfState) ([]types.InstalledChart, error) {
+	retries := 3
+	noImgChecksum := false
+	noImgPush := false
+
 	l := logger.From(ctx)
 	start := time.Now()
-	// Toggles for general deploy operations
-	componentPath := p.layout.Components.Dirs[component.Name]
 
 	message.HeaderInfof("📦 %s COMPONENT", strings.ToUpper(component.Name))
 	l.Info("deploying component", "name", component.Name)
 
-	hasImages := len(component.Images) > 0 && !noImgPush
-	hasCharts := len(component.Charts) > 0
-	hasManifests := len(component.Manifests) > 0
-	hasRepos := len(component.Repos) > 0
-	hasFiles := len(component.Files) > 0
-
 	onDeploy := component.Actions.OnDeploy
 
-	if component.RequiresCluster() {
-		// Setup the state in the config
-		if p.state == nil {
-			err := p.setupState(ctx)
-			if err != nil {
-				return nil, err
-			}
-		}
-
-		// Disable the registry HPA scale down if we are deploying images and it is not already disabled
-		if hasImages && !p.hpaModified && p.state.RegistryInfo.IsInternal() {
-			if err := p.cluster.DisableRegHPAScaleDown(ctx); err != nil {
-				message.Debugf("unable to disable the registry HPA scale down: %s", err.Error())
-				l.Debug("unable to disable the registry HPA scale down", "error", err.Error())
-			} else {
-				p.hpaModified = true
-			}
-		}
-	}
+	// if component.RequiresCluster() {
+	// Disable the registry HPA scale down if we are deploying images and it is not already disabled
+	// if hasImages && !p.hpaModified && p.state.RegistryInfo.IsInternal() {
+	// 	if err := p.cluster.DisableRegHPAScaleDown(ctx); err != nil {
+	// 		message.Debugf("unable to disable the registry HPA scale down: %s", err.Error())
+	// 		l.Debug("unable to disable the registry HPA scale down", "error", err.Error())
+	// 	} else {
+	// 		p.hpaModified = true
+	// 	}
+	// }
+	// }
 
-	err := p.populateComponentAndStateTemplates(ctx, component.Name)
+	applicationTemplates, err := template.GetZarfTemplates(ctx, component.Name, state)
 	if err != nil {
 		return nil, err
 	}
+	variableConfig.SetApplicationTemplates(applicationTemplates)
 
-	if err = actions.Run(ctx, onDeploy.Defaults, onDeploy.Before, p.variableConfig); err != nil {
+	if err = actions2.Run(ctx, "", onDeploy.Defaults, onDeploy.Before, variableConfig); err != nil {
 		return nil, fmt.Errorf("unable to run component before action: %w", err)
 	}
 
-	if hasFiles {
-		if err := p.processComponentFiles(ctx, component, componentPath.Files); err != nil {
-			return nil, fmt.Errorf("unable to process the component files: %w", err)
-		}
-	}
-
-	if hasImages {
-		if err := p.pushImagesToRegistry(ctx, component.Images, noImgChecksum); err != nil {
+	// if len(component.Files) > 0 {
+	// 	if err := processComponentFiles(ctx, pkgLayout, component, variableConfig); err != nil {
+	// 		return nil, fmt.Errorf("unable to process the component files: %w", err)
+	// 	}
+	// }
+	if len(component.Images) > 0 && !noImgPush {
+		if err := pushImagesToRegistry(ctx, c, pkgLayout, filters.Empty(), types.RegistryInfo{}, noImgChecksum, retries); err != nil {
 			return nil, fmt.Errorf("unable to push images to the registry: %w", err)
 		}
 	}
-
-	if hasRepos {
-		if err = p.pushReposToRepository(ctx, componentPath.Repos, component.Repos); err != nil {
+	if len(component.Repos) > 0 {
+		if err = pushReposToRepository(ctx, c, pkgLayout, filters.Empty(), types.GitServerInfo{}, retries); err != nil {
 			return nil, fmt.Errorf("unable to push the repos to the repository: %w", err)
 		}
 	}
@@ -335,29 +243,40 @@ func deployComponent(ctx context.Context, component v1alpha1.ZarfComponent, noIm
 	g, gCtx := errgroup.WithContext(ctx)
 	for idx, data := range component.DataInjections {
 		g.Go(func() error {
-			return p.cluster.HandleDataInjection(gCtx, data, componentPath, idx)
+			tmp, err := utils.MakeTempDir(config.CommonOptions.TempDirectory)
+			if err != nil {
+				return err
+			}
+			defer os.RemoveAll(tmp)
+			dataPath, err := pkgLayout.GetComponentDir(tmp, component.Name, layout2.DataComponentDir)
+			if err != nil {
+				return err
+			}
+			return c.InjectData(gCtx, data, dataPath, idx)
 		})
 	}
 
 	charts := []types.InstalledChart{}
-	if hasCharts || hasManifests {
-		charts, err = p.installChartAndManifests(ctx, componentPath, component)
+	if len(component.Charts) > 0 || len(component.Manifests) > 0 {
+		charts, err = installChartAndManifests(ctx, c, pkgLayout, component, variableConfig, state)
 		if err != nil {
 			return nil, err
 		}
 	}
 
-	if err = actions.Run(ctx, onDeploy.Defaults, onDeploy.After, p.variableConfig); err != nil {
+	if err = actions2.Run(ctx, "", onDeploy.Defaults, onDeploy.After, variableConfig); err != nil {
 		return nil, fmt.Errorf("unable to run component after action: %w", err)
 	}
 
 	if len(component.HealthChecks) > 0 {
-		healthCheckContext, cancel := context.WithTimeout(ctx, p.cfg.DeployOpts.Timeout)
+		// TODO: Make configurable
+		deployTimeout := 5 * time.Minute
+		healthCheckContext, cancel := context.WithTimeout(ctx, deployTimeout)
 		defer cancel()
 		spinner := message.NewProgressSpinner("Running health checks")
 		l.Info("running health checks")
 		defer spinner.Stop()
-		if err = healthchecks.Run(healthCheckContext, p.cluster.Watcher, component.HealthChecks); err != nil {
+		if err = healthchecks.Run(healthCheckContext, c.Watcher, component.HealthChecks); err != nil {
 			return nil, fmt.Errorf("health checks failed: %w", err)
 		}
 		spinner.Success()
@@ -452,3 +371,282 @@ func validateLastNonBreakingVersion(cliVersion, lastNonBreakingVersion string) (
 	}
 	return nil, nil
 }
+
+func installChartAndManifests(ctx context.Context, c *cluster.Cluster, pkgLayout *layout2.PackageLayout, component v1alpha1.ZarfComponent, variableConfig *variables.VariableConfig, state *types.ZarfState) ([]types.InstalledChart, error) {
+	timeout := 10 * time.Second
+	retries := 3
+	adoptExistingResources := true
+
+	tmp, err := utils.MakeTempDir(config.CommonOptions.TempDirectory)
+	if err != nil {
+		return nil, err
+	}
+	defer os.RemoveAll(tmp)
+	valuesDir, err := pkgLayout.GetComponentDir(tmp, component.Name, layout2.ValuesComponentDir)
+	if err != nil {
+		return nil, err
+	}
+	chartDir, err := pkgLayout.GetComponentDir(tmp, component.Name, layout2.ChartsComponentDir)
+	if err != nil {
+		return nil, err
+	}
+	manifestsDir, err := pkgLayout.GetComponentDir(tmp, component.Name, layout2.ManifestsComponentDir)
+	if err != nil {
+		return nil, err
+	}
+
+	installedCharts := []types.InstalledChart{}
+	for _, chart := range component.Charts {
+		// Do not wait for the chart to be ready if data injections are present.
+		if len(component.DataInjections) > 0 {
+			chart.NoWait = true
+		}
+
+		// zarf magic for the value file
+		for idx := range chart.ValuesFiles {
+			valueFilePath := helm.StandardValuesName(valuesDir, chart, idx)
+			if err := variableConfig.ReplaceTextTemplate(valueFilePath); err != nil {
+				return nil, err
+			}
+		}
+
+		// Create a Helm values overrides map from set Zarf `variables` and DeployOpts library inputs
+		// Values overrides are to be applied in order of Helm Chart Defaults -> Zarf `valuesFiles` -> Zarf `variables` -> DeployOpts overrides
+		valuesOverrides, err := generateValuesOverrides(chart, variableConfig, component.Name)
+		if err != nil {
+			return nil, err
+		}
+
+		helmCfg := helm.New(
+			chart,
+			chartDir,
+			valuesDir,
+			helm.WithDeployInfo(
+				variableConfig,
+				state,
+				c,
+				valuesOverrides,
+				adoptExistingResources,
+				pkgLayout.Pkg.Metadata.YOLO,
+				timeout,
+				retries,
+			),
+		)
+		connectStrings, installedChartName, err := helmCfg.InstallOrUpgradeChart(ctx)
+		if err != nil {
+			return nil, err
+		}
+		installedCharts = append(installedCharts, types.InstalledChart{Namespace: chart.Namespace, ChartName: installedChartName, ConnectStrings: connectStrings})
+	}
+
+	for _, manifest := range component.Manifests {
+		for idx := range manifest.Files {
+			if helpers.InvalidPath(filepath.Join(manifestsDir, manifest.Files[idx])) {
+				// The path is likely invalid because of how we compose OCI components, add an index suffix to the filename
+				manifest.Files[idx] = fmt.Sprintf("%s-%d.yaml", manifest.Name, idx)
+				if helpers.InvalidPath(filepath.Join(manifestsDir, manifest.Files[idx])) {
+					return nil, fmt.Errorf("unable to find manifest file %s", manifest.Files[idx])
+				}
+			}
+		}
+		// Move kustomizations to files now
+		for idx := range manifest.Kustomizations {
+			kustomization := fmt.Sprintf("kustomization-%s-%d.yaml", manifest.Name, idx)
+			manifest.Files = append(manifest.Files, kustomization)
+		}
+
+		if manifest.Namespace == "" {
+			// Helm gets sad when you don't provide a namespace even though we aren't using helm templating
+			manifest.Namespace = corev1.NamespaceDefault
+		}
+
+		// Create a chart and helm cfg from a given Zarf Manifest.
+		helmCfg, err := helm.NewFromZarfManifest(
+			manifest,
+			manifestsDir,
+			pkgLayout.Pkg.Metadata.Name,
+			component.Name,
+			helm.WithDeployInfo(
+				variableConfig,
+				state,
+				c,
+				nil,
+				adoptExistingResources,
+				pkgLayout.Pkg.Metadata.YOLO,
+				timeout,
+				retries,
+			),
+		)
+		if err != nil {
+			return nil, err
+		}
+
+		// Install the chart.
+		connectStrings, installedChartName, err := helmCfg.InstallOrUpgradeChart(ctx)
+		if err != nil {
+			return nil, err
+		}
+		installedCharts = append(installedCharts, types.InstalledChart{Namespace: manifest.Namespace, ChartName: installedChartName, ConnectStrings: connectStrings})
+	}
+
+	return installedCharts, nil
+}
+
+func generateValuesOverrides(chart v1alpha1.ZarfChart, variableConfig *variables.VariableConfig, componentName string) (map[string]any, error) {
+	valuesOverrides := make(map[string]any)
+	chartOverrides := make(map[string]any)
+
+	for _, variable := range chart.Variables {
+		if setVar, ok := variableConfig.GetSetVariable(variable.Name); ok && setVar != nil {
+			// Use the variable's path as a key to ensure unique entries for variables with the same name but different paths.
+			if err := helpers.MergePathAndValueIntoMap(chartOverrides, variable.Path, setVar.Value); err != nil {
+				return nil, fmt.Errorf("unable to merge path and value into map: %w", err)
+			}
+		}
+	}
+
+	// Apply any direct overrides specified in the deployment options for this component and chart
+	// if componentOverrides, ok := p.cfg.DeployOpts.ValuesOverridesMap[componentName]; ok {
+	// 	if chartSpecificOverrides, ok := componentOverrides[chart.Name]; ok {
+	// 		valuesOverrides = chartSpecificOverrides
+	// 	}
+	// }
+
+	// Merge chartOverrides into valuesOverrides to ensure all overrides are applied.
+	// This corrects the logic to ensure that chartOverrides and valuesOverrides are merged correctly.
+	return helpers.MergeMapRecursive(chartOverrides, valuesOverrides), nil
+}
+
+// Move files onto the host of the machine performing the deployment.
+// func processComponentFiles(ctx context.Context, pkgLayout *layout2.PackageLayout, component v1alpha1.ZarfComponent, variableConfig *variables.VariableConfig) error {
+// 	l := logger.From(ctx)
+// 	spinner := message.NewProgressSpinner("Copying %d files", len(component.Files))
+// 	start := time.Now()
+// 	l.Info("copying files", "count", len(component.Files))
+// 	defer spinner.Stop()
+
+// 	for fileIdx, file := range component.Files {
+// 		spinner.Updatef("Loading %s", file.Target)
+// 		l.Info("loading file", "name", file.Target)
+
+// 		fileLocation := filepath.Join(pkgLocation, strconv.Itoa(fileIdx), filepath.Base(file.Target))
+// 		if helpers.InvalidPath(fileLocation) {
+// 			fileLocation = filepath.Join(pkgLocation, strconv.Itoa(fileIdx))
+// 		}
+
+// 		// If a shasum is specified check it again on deployment as well
+// 		if file.Shasum != "" {
+// 			spinner.Updatef("Validating SHASUM for %s", file.Target)
+// 			l.Debug("Validating SHASUM", "file", file.Target)
+// 			if err := helpers.SHAsMatch(fileLocation, file.Shasum); err != nil {
+// 				return err
+// 			}
+// 		}
+
+// 		// Replace temp target directory and home directory
+// 		var err error
+// 		// target, err := config.GetAbsHomePath(strings.Replace(file.Target, "###ZARF_TEMP###", p.layout.Base, 1))
+// 		// if err != nil {
+// 		// 	return err
+// 		// }
+// 		// file.Target = target
+
+// 		fileList := []string{}
+// 		if helpers.IsDir(fileLocation) {
+// 			files, _ := helpers.RecursiveFileList(fileLocation, nil, false)
+// 			fileList = append(fileList, files...)
+// 		} else {
+// 			fileList = append(fileList, fileLocation)
+// 		}
+
+// 		for _, subFile := range fileList {
+// 			// Check if the file looks like a text file
+// 			isText, err := helpers.IsTextFile(subFile)
+// 			if err != nil {
+// 				return err
+// 			}
+
+// 			// If the file is a text file, template it
+// 			if isText {
+// 				spinner.Updatef("Templating %s", file.Target)
+// 				l.Debug("template file", "name", file.Target)
+// 				if err := variableConfig.ReplaceTextTemplate(subFile); err != nil {
+// 					return fmt.Errorf("unable to template file %s: %w", subFile, err)
+// 				}
+// 			}
+// 		}
+
+// 		// Copy the file to the destination
+// 		spinner.Updatef("Saving %s", file.Target)
+// 		l.Debug("saving file", "name", file.Target)
+// 		err = helpers.CreatePathAndCopy(fileLocation, file.Target)
+// 		if err != nil {
+// 			return fmt.Errorf("unable to copy file %s to %s: %w", fileLocation, file.Target, err)
+// 		}
+
+// 		// Loop over all symlinks and create them
+// 		for _, link := range file.Symlinks {
+// 			spinner.Updatef("Adding symlink %s->%s", link, file.Target)
+// 			// Try to remove the filepath if it exists
+// 			_ = os.RemoveAll(link)
+// 			// Make sure the parent directory exists
+// 			_ = helpers.CreateParentDirectory(link)
+// 			// Create the symlink
+// 			err := os.Symlink(file.Target, link)
+// 			if err != nil {
+// 				return fmt.Errorf("unable to create symlink %s->%s: %w", link, file.Target, err)
+// 			}
+// 		}
+
+// 		// Cleanup now to reduce disk pressure
+// 		_ = os.RemoveAll(fileLocation)
+// 	}
+
+// 	spinner.Success()
+// 	l.Debug("done copying files", "duration", time.Since(start))
+
+// 	return nil
+// }
+
+func setupState(ctx context.Context, c *cluster.Cluster, pkg v1alpha1.ZarfPackage) (*types.ZarfState, error) {
+	l := logger.From(ctx)
+	// If we are touching K8s, make sure we can talk to it once per deployment
+	spinner := message.NewProgressSpinner("Loading the Zarf State from the Kubernetes cluster")
+	defer spinner.Stop()
+	l.Debug("loading the Zarf State from the Kubernetes cluster")
+
+	state, err := c.LoadZarfState(ctx)
+	// We ignore the error if in YOLO mode because Zarf should not be initiated.
+	if err != nil && !pkg.Metadata.YOLO {
+		return nil, err
+	}
+	// Only ignore state load error in yolo mode when secret could not be found.
+	if err != nil && !kerrors.IsNotFound(err) && pkg.Metadata.YOLO {
+		return nil, err
+	}
+	if state == nil && pkg.Metadata.YOLO {
+		state = &types.ZarfState{}
+		// YOLO mode, so minimal state needed
+		state.Distro = "YOLO"
+
+		spinner.Updatef("Creating the Zarf namespace")
+		l.Info("creating the Zarf namespace")
+		zarfNamespace := cluster.NewZarfManagedApplyNamespace(cluster.ZarfNamespaceName)
+		_, err = c.Clientset.CoreV1().Namespaces().Apply(ctx, zarfNamespace, metav1.ApplyOptions{Force: true, FieldManager: cluster.FieldManagerName})
+		if err != nil {
+			return nil, fmt.Errorf("unable to apply the Zarf namespace: %w", err)
+		}
+	}
+
+	if pkg.Metadata.YOLO && state.Distro != "YOLO" {
+		message.Warn("This package is in YOLO mode, but the cluster was already initialized with 'zarf init'. " +
+			"This may cause issues if the package does not exclude any charts or manifests from the Zarf Agent using " +
+			"the pod or namespace label `zarf.dev/agent: ignore'.")
+		l.Warn("This package is in YOLO mode, but the cluster was already initialized with 'zarf init'. " +
+			"This may cause issues if the package does not exclude any charts or manifests from the Zarf Agent using " +
+			"the pod or namespace label `zarf.dev/agent: ignore'.")
+	}
+
+	spinner.Success()
+	return state, nil
+}
diff --git a/src/internal/packager2/helm/chart.go b/src/internal/packager2/helm/chart.go
new file mode 100644
index 0000000000..56fb5e59a0
--- /dev/null
+++ b/src/internal/packager2/helm/chart.go
@@ -0,0 +1,512 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+// Package helm contains operations for working with helm charts.
+package helm
+
+import (
+	"bytes"
+	"context"
+	"errors"
+	"fmt"
+	"time"
+
+	"github.com/zarf-dev/zarf/src/pkg/logger"
+
+	"github.com/Masterminds/semver/v3"
+	"github.com/avast/retry-go/v4"
+	plutoversionsfile "github.com/fairwindsops/pluto/v5"
+	plutoapi "github.com/fairwindsops/pluto/v5/pkg/api"
+	goyaml "github.com/goccy/go-yaml"
+	"helm.sh/helm/v3/pkg/action"
+	"helm.sh/helm/v3/pkg/chart"
+	"helm.sh/helm/v3/pkg/chartutil"
+	"helm.sh/helm/v3/pkg/release"
+	"helm.sh/helm/v3/pkg/releaseutil"
+	"helm.sh/helm/v3/pkg/storage/driver"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+	"sigs.k8s.io/yaml"
+
+	"github.com/zarf-dev/zarf/src/config"
+	"github.com/zarf-dev/zarf/src/internal/healthchecks"
+	"github.com/zarf-dev/zarf/src/pkg/message"
+	"github.com/zarf-dev/zarf/src/types"
+)
+
+// Use same default as Helm CLI does.
+const maxHelmHistory = 10
+
+// InstallOrUpgradeChart performs a helm install of the given chart.
+func (h *Helm) InstallOrUpgradeChart(ctx context.Context) (types.ConnectStrings, string, error) {
+	l := logger.From(ctx)
+	start := time.Now()
+	source := h.chart.URL
+	if source == "" {
+		source = "Zarf-generated"
+	}
+	spinner := message.NewProgressSpinner("Processing helm chart %s:%s source: %s",
+		h.chart.Name,
+		h.chart.Version,
+		source)
+	defer spinner.Stop()
+	l.Info("processing helm chart", "name", h.chart.Name, "version", h.chart.Version, "source", source)
+
+	// If no release name is specified, use the chart name.
+	if h.chart.ReleaseName == "" {
+		h.chart.ReleaseName = h.chart.Name
+	}
+
+	// Setup K8s connection.
+	err := h.createActionConfig(ctx, h.chart.Namespace, spinner)
+	if err != nil {
+		return nil, "", fmt.Errorf("unable to initialize the K8s client: %w", err)
+	}
+
+	postRender, err := h.newRenderer(ctx)
+	if err != nil {
+		return nil, "", fmt.Errorf("unable to create helm renderer: %w", err)
+	}
+
+	histClient := action.NewHistory(h.actionConfig)
+	var release *release.Release
+
+	helmCtx, helmCtxCancel := context.WithTimeout(ctx, h.timeout)
+	defer helmCtxCancel()
+
+	err = retry.Do(func() error {
+		var err error
+
+		releases, histErr := histClient.Run(h.chart.ReleaseName)
+
+		spinner.Updatef("Checking for existing helm deployment")
+		l.Debug("checking for existing helm deployment")
+
+		if errors.Is(histErr, driver.ErrReleaseNotFound) {
+			// No prior release, try to install it.
+			spinner.Updatef("Attempting chart installation")
+			l.Info("performing Helm install", "chart", h.chart.Name)
+
+			release, err = h.installChart(helmCtx, postRender)
+		} else if histErr == nil && len(releases) > 0 {
+			// Otherwise, there is a prior release so upgrade it.
+			spinner.Updatef("Attempting chart upgrade")
+			l.Info("performing Helm upgrade", "chart", h.chart.Name)
+
+			lastRelease := releases[len(releases)-1]
+
+			release, err = h.upgradeChart(helmCtx, lastRelease, postRender)
+		} else {
+			return fmt.Errorf("unable to verify the chart installation status: %w", histErr)
+		}
+
+		if err != nil {
+			return err
+		}
+
+		spinner.Success()
+		return nil
+	}, retry.Context(ctx), retry.Attempts(uint(h.retries)), retry.Delay(500*time.Millisecond))
+	if err != nil {
+		removeMsg := "if you need to remove the failed chart, use `zarf package remove`"
+		installErr := fmt.Errorf("unable to install chart after %d attempts: %w: %s", h.retries, err, removeMsg)
+
+		releases, _ := histClient.Run(h.chart.ReleaseName)
+		previouslyDeployedVersion := 0
+
+		// Check for previous releases that successfully deployed
+		for _, release := range releases {
+			if release.Info.Status == "deployed" {
+				previouslyDeployedVersion = release.Version
+			}
+		}
+
+		// No prior releases means this was an initial install.
+		if previouslyDeployedVersion == 0 {
+			return nil, "", installErr
+		}
+
+		// Attempt to rollback on a failed upgrade.
+		spinner.Updatef("Performing chart rollback")
+		l.Info("performing Helm rollback", "chart", h.chart.Name)
+		err = h.rollbackChart(h.chart.ReleaseName, previouslyDeployedVersion)
+		if err != nil {
+			return nil, "", fmt.Errorf("%w: unable to rollback: %w", installErr, err)
+		}
+		return nil, "", installErr
+	}
+
+	resourceList, err := h.actionConfig.KubeClient.Build(bytes.NewBufferString(release.Manifest), true)
+	if err != nil {
+		return nil, "", fmt.Errorf("unable to build the resource list: %w", err)
+	}
+
+	runtimeObjs := []runtime.Object{}
+	for _, resource := range resourceList {
+		runtimeObjs = append(runtimeObjs, resource.Object)
+	}
+	if !h.chart.NoWait {
+		// Ensure we don't go past the timeout by using a context initialized with the helm timeout
+		spinner.Updatef("Running health checks")
+		l.Info("running health checks", "chart", h.chart.Name)
+		if err := healthchecks.WaitForReadyRuntime(helmCtx, h.cluster.Watcher, runtimeObjs); err != nil {
+			return nil, "", err
+		}
+	}
+	spinner.Success()
+	l.Debug("done processing helm chart", "name", h.chart.Name, "duration", time.Since(start))
+
+	// return any collected connect strings for zarf connect.
+	return postRender.connectStrings, h.chart.ReleaseName, nil
+}
+
+// TemplateChart generates a helm template from a given chart.
+func (h *Helm) TemplateChart(ctx context.Context) (manifest string, chartValues chartutil.Values, err error) {
+	l := logger.From(ctx)
+	spinner := message.NewProgressSpinner("Templating helm chart %s", h.chart.Name)
+	defer spinner.Stop()
+	l.Debug("templating helm chart", "name", h.chart.Name)
+
+	err = h.createActionConfig(ctx, h.chart.Namespace, spinner)
+
+	// Setup K8s connection.
+	if err != nil {
+		return "", nil, fmt.Errorf("unable to initialize the K8s client: %w", err)
+	}
+
+	// Bind the helm action.
+	client := action.NewInstall(h.actionConfig)
+
+	client.DryRun = true
+	client.Replace = true // Skip the name check.
+	client.ClientOnly = true
+	client.IncludeCRDs = true
+	// TODO: Further research this with regular/OCI charts
+	client.Verify = false
+	client.InsecureSkipTLSverify = config.CommonOptions.InsecureSkipTLSVerify
+	if h.kubeVersion != "" {
+		parsedKubeVersion, err := chartutil.ParseKubeVersion(h.kubeVersion)
+		if err != nil {
+			return "", nil, fmt.Errorf("invalid kube version %s: %w", h.kubeVersion, err)
+		}
+		client.KubeVersion = parsedKubeVersion
+	}
+	client.ReleaseName = h.chart.ReleaseName
+
+	// If no release name is specified, use the chart name.
+	if client.ReleaseName == "" {
+		client.ReleaseName = h.chart.Name
+	}
+
+	// Namespace must be specified.
+	client.Namespace = h.chart.Namespace
+
+	loadedChart, chartValues, err := h.loadChartData()
+	if err != nil {
+		return "", nil, fmt.Errorf("unable to load chart data: %w", err)
+	}
+
+	client.PostRenderer, err = h.newRenderer(ctx)
+	if err != nil {
+		return "", nil, fmt.Errorf("unable to create helm renderer: %w", err)
+	}
+
+	// Perform the loadedChart installation.
+	templatedChart, err := client.RunWithContext(ctx, loadedChart, chartValues)
+	if err != nil {
+		return "", nil, fmt.Errorf("error generating helm chart template: %w", err)
+	}
+
+	manifest = templatedChart.Manifest
+
+	for _, hook := range templatedChart.Hooks {
+		manifest += fmt.Sprintf("\n---\n%s", hook.Manifest)
+	}
+
+	spinner.Success()
+
+	return manifest, chartValues, nil
+}
+
+// RemoveChart removes a chart from the cluster.
+func (h *Helm) RemoveChart(ctx context.Context, namespace string, name string, spinner *message.Spinner) error {
+	// Establish a new actionConfig for the namespace.
+	_ = h.createActionConfig(ctx, namespace, spinner)
+	// Perform the uninstall.
+	response, err := h.uninstallChart(name)
+	message.Debug(response)
+	logger.From(ctx).Debug("chart uninstalled", "response", response)
+	return err
+}
+
+// UpdateReleaseValues updates values for a given chart release
+// (note: this only works on single-deep charts, charts with dependencies (like loki-stack) will not work)
+func (h *Helm) UpdateReleaseValues(ctx context.Context, updatedValues map[string]interface{}) error {
+	l := logger.From(ctx)
+	spinner := message.NewProgressSpinner("Updating values for helm release %s", h.chart.ReleaseName)
+	defer spinner.Stop()
+	l.Debug("updating values for helm release", "name", h.chart.ReleaseName)
+
+	err := h.createActionConfig(ctx, h.chart.Namespace, spinner)
+	if err != nil {
+		return fmt.Errorf("unable to initialize the K8s client: %w", err)
+	}
+
+	postRender, err := h.newRenderer(ctx)
+	if err != nil {
+		return fmt.Errorf("unable to create helm renderer: %w", err)
+	}
+
+	histClient := action.NewHistory(h.actionConfig)
+	histClient.Max = 1
+	releases, histErr := histClient.Run(h.chart.ReleaseName)
+	if histErr == nil && len(releases) > 0 {
+		lastRelease := releases[len(releases)-1]
+
+		// Setup a new upgrade action
+		client := action.NewUpgrade(h.actionConfig)
+
+		// Let each chart run for the default timeout.
+		client.Timeout = h.timeout
+
+		client.SkipCRDs = true
+
+		// Namespace must be specified.
+		client.Namespace = h.chart.Namespace
+
+		// Post-processing our manifests to apply vars and run zarf helm logic in cluster
+		client.PostRenderer = postRender
+
+		// Set reuse values to only override the values we are explicitly given
+		client.ReuseValues = true
+
+		// Wait for the update operation to successfully complete
+		client.Wait = true
+
+		// Perform the loadedChart upgrade.
+		_, err = client.RunWithContext(ctx, h.chart.ReleaseName, lastRelease.Chart, updatedValues)
+		if err != nil {
+			return err
+		}
+
+		spinner.Success()
+
+		return nil
+	}
+
+	return fmt.Errorf("unable to find the %s helm release", h.chart.ReleaseName)
+}
+
+func (h *Helm) installChart(ctx context.Context, postRender *renderer) (*release.Release, error) {
+	// Bind the helm action.
+	client := action.NewInstall(h.actionConfig)
+
+	// Let each chart run for the default timeout.
+	client.Timeout = h.timeout
+
+	// Default helm behavior for Zarf is to wait for the resources to deploy, NoWait overrides that for special cases (such as data-injection).
+	client.Wait = !h.chart.NoWait
+
+	// We need to include CRDs or operator installations will fail spectacularly.
+	client.SkipCRDs = false
+
+	// Must be unique per-namespace and < 53 characters. @todo: restrict helm loadedChart name to this.
+	client.ReleaseName = h.chart.ReleaseName
+
+	client.SkipSchemaValidation = !h.chart.ShouldRunSchemaValidation()
+
+	// Namespace must be specified.
+	client.Namespace = h.chart.Namespace
+
+	// Post-processing our manifests to apply vars and run zarf helm logic in cluster
+	client.PostRenderer = postRender
+
+	loadedChart, chartValues, err := h.loadChartData()
+	if err != nil {
+		return nil, fmt.Errorf("unable to load chart data: %w", err)
+	}
+
+	// Perform the loadedChart installation.
+	return client.RunWithContext(ctx, loadedChart, chartValues)
+}
+
+func (h *Helm) upgradeChart(ctx context.Context, lastRelease *release.Release, postRender *renderer) (*release.Release, error) {
+	// Migrate any deprecated APIs (if applicable)
+	err := h.migrateDeprecatedAPIs(ctx, lastRelease)
+	if err != nil {
+		return nil, fmt.Errorf("unable to check for API deprecations: %w", err)
+	}
+
+	// Setup a new upgrade action
+	client := action.NewUpgrade(h.actionConfig)
+
+	// Let each chart run for the default timeout.
+	client.Timeout = h.timeout
+
+	// Default helm behavior for Zarf is to wait for the resources to deploy, NoWait overrides that for special cases (such as data-injection).
+	client.Wait = !h.chart.NoWait
+
+	client.SkipCRDs = true
+
+	client.SkipSchemaValidation = !h.chart.ShouldRunSchemaValidation()
+
+	// Namespace must be specified.
+	client.Namespace = h.chart.Namespace
+
+	// Post-processing our manifests to apply vars and run zarf helm logic in cluster
+	client.PostRenderer = postRender
+
+	client.MaxHistory = maxHelmHistory
+
+	loadedChart, chartValues, err := h.loadChartData()
+	if err != nil {
+		return nil, fmt.Errorf("unable to load chart data: %w", err)
+	}
+
+	// Perform the loadedChart upgrade.
+	return client.RunWithContext(ctx, h.chart.ReleaseName, loadedChart, chartValues)
+}
+
+func (h *Helm) rollbackChart(name string, version int) error {
+	client := action.NewRollback(h.actionConfig)
+	client.CleanupOnFail = true
+	client.Force = true
+	client.Wait = true
+	client.Timeout = h.timeout
+	client.Version = version
+	client.MaxHistory = maxHelmHistory
+	return client.Run(name)
+}
+
+func (h *Helm) uninstallChart(name string) (*release.UninstallReleaseResponse, error) {
+	client := action.NewUninstall(h.actionConfig)
+	client.KeepHistory = false
+	client.Wait = true
+	client.Timeout = h.timeout
+	return client.Run(name)
+}
+
+func (h *Helm) loadChartData() (*chart.Chart, chartutil.Values, error) {
+	var (
+		loadedChart *chart.Chart
+		chartValues chartutil.Values
+		err         error
+	)
+
+	if h.chartOverride == nil {
+		// If there is no override, get the chart and values info.
+		loadedChart, err = h.loadChartFromTarball()
+		if err != nil {
+			return nil, nil, fmt.Errorf("unable to load chart tarball: %w", err)
+		}
+
+		chartValues, err = h.parseChartValues()
+		if err != nil {
+			return loadedChart, nil, fmt.Errorf("unable to parse chart values: %w", err)
+		}
+	} else {
+		// Otherwise, use the overrides instead.
+		loadedChart = h.chartOverride
+		chartValues = h.valuesOverrides
+	}
+
+	return loadedChart, chartValues, nil
+}
+
+func (h *Helm) migrateDeprecatedAPIs(ctx context.Context, latestRelease *release.Release) error {
+	// Get the Kubernetes version from the current cluster
+	kubeVersion, err := h.cluster.Clientset.Discovery().ServerVersion()
+	if err != nil {
+		return err
+	}
+
+	kubeGitVersion, err := semver.NewVersion(kubeVersion.String())
+	if err != nil {
+		return err
+	}
+
+	// Use helm to re-split the manifest bytes (same call used by helm to pass this data to postRender)
+	_, resources, err := releaseutil.SortManifests(map[string]string{"manifest": latestRelease.Manifest}, nil, releaseutil.InstallOrder)
+
+	if err != nil {
+		return fmt.Errorf("error re-rendering helm output: %w", err)
+	}
+
+	modifiedManifest := ""
+	modified := false
+
+	// Loop over the resources from the lastRelease manifest to check for deprecations
+	for _, resource := range resources {
+		// parse to unstructured to have access to more data than just the name
+		rawData := &unstructured.Unstructured{}
+		if err := yaml.Unmarshal([]byte(resource.Content), rawData); err != nil {
+			return fmt.Errorf("failed to unmarshal manifest: %w", err)
+		}
+
+		rawData, manifestModified, _ := handleDeprecations(rawData, *kubeGitVersion)
+		manifestContent, err := yaml.Marshal(rawData)
+		if err != nil {
+			return fmt.Errorf("failed to marshal raw manifest after deprecation check: %w", err)
+		}
+
+		// If this is not a bad object, place it back into the manifest
+		modifiedManifest += fmt.Sprintf("---\n# Source: %s\n%s\n", resource.Name, manifestContent)
+
+		if manifestModified {
+			modified = true
+		}
+	}
+
+	// If the release was modified in the above loop, save it back to the cluster
+	if modified {
+		message.Warnf("Zarf detected deprecated APIs for the '%s' helm release.  Attempting automatic upgrade.", latestRelease.Name)
+		logger.From(ctx).Warn("detected deprecated APIs for the helm release", "name", latestRelease.Name)
+
+		// Update current release version to be superseded (same as the helm mapkubeapis plugin)
+		latestRelease.Info.Status = release.StatusSuperseded
+		if err := h.actionConfig.Releases.Update(latestRelease); err != nil {
+			return err
+		}
+
+		// Use a shallow copy of current release version to update the object with the modification
+		// and then store this new version (same as the helm mapkubeapis plugin)
+		var newRelease = latestRelease
+		newRelease.Manifest = modifiedManifest
+		newRelease.Info.Description = "Kubernetes deprecated API upgrade - DO NOT rollback from this version"
+		newRelease.Info.LastDeployed = h.actionConfig.Now()
+		newRelease.Version = latestRelease.Version + 1
+		newRelease.Info.Status = release.StatusDeployed
+		if err := h.actionConfig.Releases.Create(newRelease); err != nil {
+			return err
+		}
+	}
+
+	return nil
+}
+
+// handleDeprecations takes in an unstructured object and the k8s version and returns the latest version of the object and if it was modified.
+func handleDeprecations(rawData *unstructured.Unstructured, kubernetesVersion semver.Version) (*unstructured.Unstructured, bool, error) {
+	deprecatedVersionContent := &plutoapi.VersionFile{}
+	err := goyaml.Unmarshal(plutoversionsfile.Content(), deprecatedVersionContent)
+	if err != nil {
+		return rawData, false, err
+	}
+	for _, deprecation := range deprecatedVersionContent.DeprecatedVersions {
+		if deprecation.Component == "k8s" && deprecation.Kind == rawData.GetKind() && deprecation.Name == rawData.GetAPIVersion() {
+			removedVersion, err := semver.NewVersion(deprecation.RemovedIn)
+			if err != nil {
+				return rawData, false, err
+			}
+
+			if removedVersion.LessThan(&kubernetesVersion) {
+				if deprecation.ReplacementAPI != "" {
+					rawData.SetAPIVersion(deprecation.ReplacementAPI)
+					return rawData, true, nil
+				}
+
+				return nil, true, nil
+			}
+		}
+	}
+	return rawData, false, nil
+}
diff --git a/src/internal/packager2/helm/common.go b/src/internal/packager2/helm/common.go
new file mode 100644
index 0000000000..b555bafd25
--- /dev/null
+++ b/src/internal/packager2/helm/common.go
@@ -0,0 +1,178 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+// Package helm contains operations for working with helm charts.
+package helm
+
+import (
+	"crypto/sha1"
+	"encoding/hex"
+	"fmt"
+	"os"
+	"path"
+	"path/filepath"
+	"strconv"
+	"time"
+
+	"github.com/zarf-dev/zarf/src/api/v1alpha1"
+	"github.com/zarf-dev/zarf/src/config"
+	"github.com/zarf-dev/zarf/src/pkg/cluster"
+	"github.com/zarf-dev/zarf/src/pkg/message"
+	"github.com/zarf-dev/zarf/src/pkg/variables"
+	"github.com/zarf-dev/zarf/src/types"
+	"helm.sh/helm/v3/pkg/action"
+	"helm.sh/helm/v3/pkg/chart"
+	"helm.sh/helm/v3/pkg/cli"
+)
+
+// Helm is a config object for working with helm charts.
+type Helm struct {
+	chart      v1alpha1.ZarfChart
+	chartPath  string
+	valuesPath string
+
+	adoptExistingResources bool
+	yolo                   bool
+	cluster                *cluster.Cluster
+	timeout                time.Duration
+	retries                int
+
+	kubeVersion string
+
+	chartOverride   *chart.Chart
+	valuesOverrides map[string]any
+
+	settings       *cli.EnvSettings
+	actionConfig   *action.Configuration
+	variableConfig *variables.VariableConfig
+	state          *types.ZarfState
+}
+
+// Modifier is a function that modifies the Helm config.
+type Modifier func(*Helm)
+
+// New returns a new Helm config struct.
+func New(chart v1alpha1.ZarfChart, chartPath string, valuesPath string, mods ...Modifier) *Helm {
+	h := &Helm{
+		chart:      chart,
+		chartPath:  chartPath,
+		valuesPath: valuesPath,
+		timeout:    config.ZarfDefaultTimeout,
+	}
+
+	for _, mod := range mods {
+		mod(h)
+	}
+
+	return h
+}
+
+// NewClusterOnly returns a new Helm config struct geared toward interacting with the cluster (not packages)
+func NewClusterOnly(variableConfig *variables.VariableConfig, state *types.ZarfState, cluster *cluster.Cluster, adoptExistingResources, yolo bool) *Helm {
+	return &Helm{
+		adoptExistingResources: adoptExistingResources,
+		yolo:                   yolo,
+		variableConfig:         variableConfig,
+		state:                  state,
+		cluster:                cluster,
+		timeout:                config.ZarfDefaultTimeout,
+		retries:                config.ZarfDefaultRetries,
+	}
+}
+
+// NewFromZarfManifest generates a helm chart and config from a given Zarf manifest.
+func NewFromZarfManifest(manifest v1alpha1.ZarfManifest, manifestPath, packageName, componentName string, mods ...Modifier) (h *Helm, err error) {
+	spinner := message.NewProgressSpinner("Starting helm chart generation %s", manifest.Name)
+	defer spinner.Stop()
+
+	// Generate a new chart.
+	tmpChart := new(chart.Chart)
+	tmpChart.Metadata = new(chart.Metadata)
+
+	// Generate a hashed chart name.
+	rawChartName := fmt.Sprintf("raw-%s-%s-%s", packageName, componentName, manifest.Name)
+	hasher := sha1.New()
+	hasher.Write([]byte(rawChartName))
+	tmpChart.Metadata.Name = rawChartName
+	sha1ReleaseName := hex.EncodeToString(hasher.Sum(nil))
+
+	// This is fun, increment forward in a semver-way using epoch so helm doesn't cry.
+	tmpChart.Metadata.Version = fmt.Sprintf("0.1.%d", config.GetStartTime())
+	tmpChart.Metadata.APIVersion = chart.APIVersionV1
+
+	// Add the manifest files so helm does its thing.
+	for _, file := range manifest.Files {
+		spinner.Updatef("Processing %s", file)
+		manifest := path.Join(manifestPath, file)
+		data, err := os.ReadFile(manifest)
+		if err != nil {
+			return h, fmt.Errorf("unable to read manifest file %s: %w", manifest, err)
+		}
+
+		// Escape all chars and then wrap in {{ }}.
+		txt := strconv.Quote(string(data))
+		data = []byte("{{" + txt + "}}")
+
+		tmpChart.Templates = append(tmpChart.Templates, &chart.File{Name: manifest, Data: data})
+	}
+
+	// Generate the struct to pass to InstallOrUpgradeChart().
+	h = &Helm{
+		chart: v1alpha1.ZarfChart{
+			Name: tmpChart.Metadata.Name,
+			// Preserve the zarf prefix for chart names to match v0.22.x and earlier behavior.
+			ReleaseName: fmt.Sprintf("zarf-%s", sha1ReleaseName),
+			Version:     tmpChart.Metadata.Version,
+			Namespace:   manifest.Namespace,
+			NoWait:      manifest.NoWait,
+		},
+		chartOverride: tmpChart,
+		timeout:       config.ZarfDefaultTimeout,
+	}
+
+	for _, mod := range mods {
+		mod(h)
+	}
+
+	spinner.Success()
+
+	return h, nil
+}
+
+// WithDeployInfo adds the necessary information to deploy a given chart
+func WithDeployInfo(variableConfig *variables.VariableConfig, state *types.ZarfState, cluster *cluster.Cluster, valuesOverrides map[string]any, adoptExistingResources, yolo bool, timeout time.Duration, retries int) Modifier {
+	return func(h *Helm) {
+		h.adoptExistingResources = adoptExistingResources
+		h.yolo = yolo
+		h.variableConfig = variableConfig
+		h.state = state
+		h.cluster = cluster
+		h.valuesOverrides = valuesOverrides
+		h.timeout = timeout
+		h.retries = retries
+	}
+}
+
+// WithKubeVersion sets the Kube version for templating the chart
+func WithKubeVersion(kubeVersion string) Modifier {
+	return func(h *Helm) {
+		h.kubeVersion = kubeVersion
+	}
+}
+
+// WithVariableConfig sets the variable config for the chart
+func WithVariableConfig(variableConfig *variables.VariableConfig) Modifier {
+	return func(h *Helm) {
+		h.variableConfig = variableConfig
+	}
+}
+
+// StandardName generates a predictable full path for a helm chart for Zarf.
+func StandardName(destination string, chart v1alpha1.ZarfChart) string {
+	return filepath.Join(destination, chart.Name+"-"+chart.Version)
+}
+
+// StandardValuesName generates a predictable full path for the values file for a helm chart for zarf
+func StandardValuesName(destination string, chart v1alpha1.ZarfChart, idx int) string {
+	return fmt.Sprintf("%s-%d", StandardName(destination, chart), idx)
+}
diff --git a/src/internal/packager2/helm/destroy.go b/src/internal/packager2/helm/destroy.go
new file mode 100644
index 0000000000..86e0b63ce6
--- /dev/null
+++ b/src/internal/packager2/helm/destroy.go
@@ -0,0 +1,75 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+// Package helm contains operations for working with helm charts.
+package helm
+
+import (
+	"context"
+	"regexp"
+	"time"
+
+	"github.com/zarf-dev/zarf/src/pkg/cluster"
+	"github.com/zarf-dev/zarf/src/pkg/logger"
+	"github.com/zarf-dev/zarf/src/pkg/message"
+	"helm.sh/helm/v3/pkg/action"
+)
+
+// Destroy removes ZarfInitPackage charts from the cluster and optionally all Zarf-installed charts.
+func Destroy(ctx context.Context, purgeAllZarfInstallations bool) {
+	start := time.Now()
+	l := logger.From(ctx)
+	spinner := message.NewProgressSpinner("Removing Zarf-installed charts")
+	defer spinner.Stop()
+	l.Info("removing Zarf-installed charts")
+
+	h := Helm{}
+
+	// Initially load the actionConfig without a namespace
+	err := h.createActionConfig(ctx, "", spinner)
+	if err != nil {
+		// Don't fatal since this is a removal action
+		spinner.Errorf(err, "Unable to initialize the K8s client")
+		l.Error("unable to initialize the K8s client", "error", err.Error())
+		return
+	}
+
+	// Match a name that begins with "zarf-"
+	// Explanation: https://regex101.com/r/3yzKZy/1
+	zarfPrefix := regexp.MustCompile(`(?m)^zarf-`)
+
+	// Get a list of all releases in all namespaces
+	list := action.NewList(h.actionConfig)
+	list.All = true
+	list.AllNamespaces = true
+	// Uninstall in reverse order
+	list.ByDate = true
+	list.SortReverse = true
+	releases, err := list.Run()
+	if err != nil {
+		// Don't fatal since this is a removal action
+		spinner.Errorf(err, "Unable to get the list of installed charts")
+		l.Error("unable to get the list of installed charts", "error", err.Error())
+	}
+
+	// Iterate over all releases
+	for _, release := range releases {
+		if !purgeAllZarfInstallations && release.Namespace != cluster.ZarfNamespaceName {
+			// Don't process releases outside the zarf namespace unless purge all is true
+			continue
+		}
+		// Filter on zarf releases
+		if zarfPrefix.MatchString(release.Name) {
+			spinner.Updatef("Uninstalling helm chart %s/%s", release.Namespace, release.Name)
+			l.Info("uninstalling helm chart", "namespace", release.Namespace, "name", release.Name)
+			if err = h.RemoveChart(ctx, release.Namespace, release.Name, spinner); err != nil {
+				// Don't fatal since this is a removal action
+				spinner.Errorf(err, "Unable to uninstall the chart")
+				l.Error("unable to uninstall the chart", "error", err.Error())
+			}
+		}
+	}
+
+	spinner.Success()
+	l.Debug("done uninstalling charts", "duration", time.Since(start))
+}
diff --git a/src/internal/packager2/helm/images.go b/src/internal/packager2/helm/images.go
new file mode 100644
index 0000000000..b5399f2590
--- /dev/null
+++ b/src/internal/packager2/helm/images.go
@@ -0,0 +1,57 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+package helm
+
+import (
+	"github.com/defenseunicorns/pkg/helpers/v2"
+	"github.com/goccy/go-yaml"
+	"helm.sh/helm/v3/pkg/chart/loader"
+	"helm.sh/helm/v3/pkg/chartutil"
+)
+
+// ChartImages captures the structure of the helm.sh/images annotation within the Helm chart.
+type ChartImages []struct {
+	// Name of the image.
+	Name string `yaml:"name"`
+	// Image with tag.
+	Image string `yaml:"image"`
+	// Condition specifies the values to determine if the image is included or not.
+	Condition string `yaml:"condition"`
+	// Dependency is the subchart that contains the image, if empty its the parent chart.
+	Dependency string `yaml:"dependency"`
+}
+
+// FindAnnotatedImagesForChart attempts to parse any image annotations found in a chart archive or directory.
+func FindAnnotatedImagesForChart(chartPath string, values chartutil.Values) (images []string, err error) {
+	// Load a new chart.
+	chart, err := loader.Load(chartPath)
+	if err != nil {
+		return images, err
+	}
+	values = helpers.MergeMapRecursive(chart.Values, values)
+
+	imageAnnotation := chart.Metadata.Annotations["helm.sh/images"]
+
+	var chartImages ChartImages
+
+	err = yaml.Unmarshal([]byte(imageAnnotation), &chartImages)
+	if err != nil {
+		return images, err
+	}
+
+	for _, i := range chartImages {
+		// Only include the image if the current values/condition specify it should be included
+		if i.Condition != "" {
+			value, err := values.PathValue(i.Condition)
+			// We intentionally ignore the error here because the key could be missing from the values.yaml
+			if err == nil && value == true {
+				images = append(images, i.Image)
+			}
+		} else {
+			images = append(images, i.Image)
+		}
+	}
+
+	return images, nil
+}
diff --git a/src/internal/packager2/helm/post-render.go b/src/internal/packager2/helm/post-render.go
new file mode 100644
index 0000000000..bd1121709d
--- /dev/null
+++ b/src/internal/packager2/helm/post-render.go
@@ -0,0 +1,291 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+// Package helm contains operations for working with helm charts.
+package helm
+
+import (
+	"bytes"
+	"context"
+	"fmt"
+	"os"
+	"path/filepath"
+	"slices"
+
+	"github.com/defenseunicorns/pkg/helpers/v2"
+	"github.com/zarf-dev/zarf/src/config"
+	"github.com/zarf-dev/zarf/src/pkg/cluster"
+	"github.com/zarf-dev/zarf/src/pkg/logger"
+	"github.com/zarf-dev/zarf/src/pkg/message"
+	"github.com/zarf-dev/zarf/src/pkg/utils"
+	"github.com/zarf-dev/zarf/src/types"
+	"helm.sh/helm/v3/pkg/releaseutil"
+	"k8s.io/client-go/dynamic"
+	"k8s.io/client-go/restmapper"
+	"sigs.k8s.io/yaml"
+
+	corev1 "k8s.io/api/core/v1"
+	kerrors "k8s.io/apimachinery/pkg/api/errors"
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+	"k8s.io/apimachinery/pkg/apis/meta/v1/unstructured"
+	"k8s.io/apimachinery/pkg/runtime"
+)
+
+type renderer struct {
+	*Helm
+	connectStrings types.ConnectStrings
+	namespaces     map[string]*corev1.Namespace
+}
+
+func (h *Helm) newRenderer(ctx context.Context) (*renderer, error) {
+	rend := &renderer{
+		Helm:           h,
+		connectStrings: types.ConnectStrings{},
+		namespaces:     map[string]*corev1.Namespace{},
+	}
+	if h.cluster == nil {
+		return rend, nil
+	}
+
+	namespace, err := h.cluster.Clientset.CoreV1().Namespaces().Get(ctx, h.chart.Namespace, metav1.GetOptions{})
+	if err != nil && !kerrors.IsNotFound(err) {
+		return nil, fmt.Errorf("unable to check for existing namespace %q in cluster: %w", h.chart.Namespace, err)
+	}
+	if kerrors.IsNotFound(err) {
+		rend.namespaces[h.chart.Namespace] = cluster.NewZarfManagedNamespace(h.chart.Namespace)
+	} else if h.adoptExistingResources {
+		namespace.Labels = cluster.AdoptZarfManagedLabels(namespace.Labels)
+		rend.namespaces[h.chart.Namespace] = namespace
+	}
+
+	return rend, nil
+}
+
+func (r *renderer) Run(renderedManifests *bytes.Buffer) (*bytes.Buffer, error) {
+	// This is very low cost and consistent for how we replace elsewhere, also good for debugging
+	tempDir, err := utils.MakeTempDir(r.chartPath)
+	if err != nil {
+		return nil, fmt.Errorf("unable to create tmpdir:  %w", err)
+	}
+	path := filepath.Join(tempDir, "chart.yaml")
+
+	if err := os.WriteFile(path, renderedManifests.Bytes(), helpers.ReadWriteUser); err != nil {
+		return nil, fmt.Errorf("unable to write the post-render file for the helm chart")
+	}
+
+	// Run the template engine against the chart output
+	if err := r.variableConfig.ReplaceTextTemplate(path); err != nil {
+		return nil, fmt.Errorf("error templating the helm chart: %w", err)
+	}
+
+	// Read back the templated file contents
+	buff, err := os.ReadFile(path)
+	if err != nil {
+		return nil, fmt.Errorf("error reading temporary post-rendered helm chart: %w", err)
+	}
+
+	// Use helm to re-split the manifest byte (same call used by helm to pass this data to postRender)
+	_, resources, err := releaseutil.SortManifests(map[string]string{path: string(buff)},
+		r.actionConfig.Capabilities.APIVersions,
+		releaseutil.InstallOrder,
+	)
+
+	if err != nil {
+		return nil, fmt.Errorf("error re-rendering helm output: %w", err)
+	}
+
+	finalManifestsOutput := bytes.NewBuffer(nil)
+
+	if r.cluster != nil {
+		ctx := context.Background()
+
+		if err := r.editHelmResources(ctx, resources, finalManifestsOutput); err != nil {
+			return nil, err
+		}
+
+		if err := r.adoptAndUpdateNamespaces(ctx); err != nil {
+			return nil, err
+		}
+	} else {
+		for _, resource := range resources {
+			fmt.Fprintf(finalManifestsOutput, "---\n# Source: %s\n%s\n", resource.Name, resource.Content)
+		}
+	}
+
+	// Send the bytes back to helm
+	return finalManifestsOutput, nil
+}
+
+func (r *renderer) adoptAndUpdateNamespaces(ctx context.Context) error {
+	l := logger.From(ctx)
+	c := r.cluster
+	namespaceList, err := r.cluster.Clientset.CoreV1().Namespaces().List(ctx, metav1.ListOptions{})
+	if err != nil {
+		return err
+	}
+	for name, namespace := range r.namespaces {
+		// Check to see if this namespace already exists
+		var existingNamespace bool
+		for _, serverNamespace := range namespaceList.Items {
+			if serverNamespace.Name == name {
+				existingNamespace = true
+			}
+		}
+
+		if !existingNamespace {
+			// This is a new namespace, add it
+			_, err := c.Clientset.CoreV1().Namespaces().Create(ctx, namespace, metav1.CreateOptions{})
+			if err != nil {
+				return fmt.Errorf("unable to create the missing namespace %s", name)
+			}
+		} else if r.adoptExistingResources {
+			// Refuse to adopt namespace if it is one of four initial Kubernetes namespaces.
+			// https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/#initial-namespaces
+			if slices.Contains([]string{"default", "kube-node-lease", "kube-public", "kube-system"}, name) {
+				message.Warnf("Refusing to adopt the initial namespace: %s", name)
+				l.Warn("refusing to adopt initial namespace", "name", name)
+			} else {
+				// This is an existing namespace to adopt
+				_, err := c.Clientset.CoreV1().Namespaces().Update(ctx, namespace, metav1.UpdateOptions{})
+				if err != nil {
+					return fmt.Errorf("unable to adopt the existing namespace %s", name)
+				}
+			}
+		}
+
+		// If the package is marked as YOLO and the state is empty, skip the secret creation for this namespace
+		if r.yolo && r.state.Distro == "YOLO" {
+			continue
+		}
+
+		// Create the secret
+		validRegistrySecret, err := c.GenerateRegistryPullCreds(ctx, name, config.ZarfImagePullSecretName, r.state.RegistryInfo)
+		if err != nil {
+			return err
+		}
+		_, err = c.Clientset.CoreV1().Secrets(*validRegistrySecret.Namespace).Apply(ctx, validRegistrySecret, metav1.ApplyOptions{Force: true, FieldManager: cluster.FieldManagerName})
+		if err != nil {
+			return fmt.Errorf("problem applying registry secret for the %s namespace: %w", name, err)
+		}
+		gitServerSecret := c.GenerateGitPullCreds(name, config.ZarfGitServerSecretName, r.state.GitServer)
+		_, err = c.Clientset.CoreV1().Secrets(*gitServerSecret.Namespace).Apply(ctx, gitServerSecret, metav1.ApplyOptions{Force: true, FieldManager: cluster.FieldManagerName})
+		if err != nil {
+			return fmt.Errorf("problem applying git server secret for the %s namespace: %w", name, err)
+		}
+	}
+	return nil
+}
+
+func (r *renderer) editHelmResources(ctx context.Context, resources []releaseutil.Manifest, finalManifestsOutput *bytes.Buffer) error {
+	l := logger.From(ctx)
+	dc, err := dynamic.NewForConfig(r.cluster.RestConfig)
+	if err != nil {
+		return err
+	}
+	groupResources, err := restmapper.GetAPIGroupResources(r.cluster.Clientset.Discovery())
+	if err != nil {
+		return err
+	}
+	mapper := restmapper.NewDiscoveryRESTMapper(groupResources)
+
+	for _, resource := range resources {
+		// parse to unstructured to have access to more data than just the name
+		rawData := &unstructured.Unstructured{}
+		if err := yaml.Unmarshal([]byte(resource.Content), rawData); err != nil {
+			return fmt.Errorf("failed to unmarshal manifest: %w", err)
+		}
+
+		switch rawData.GetKind() {
+		case "Namespace":
+			namespace := &corev1.Namespace{}
+			// parse the namespace resource so it can be applied out-of-band by zarf instead of helm to avoid helm ns shenanigans
+			if err := runtime.DefaultUnstructuredConverter.FromUnstructured(rawData.UnstructuredContent(), namespace); err != nil {
+				message.WarnErrf(err, "could not parse namespace %s", rawData.GetName())
+				l.Warn("failed to parse namespace", "name", rawData.GetName(), "error", err)
+			} else {
+				message.Debugf("Matched helm namespace %s for zarf annotation", namespace.Name)
+				l.Debug("matched helm namespace for zarf annotation", "name", namespace.Name)
+				namespace.Labels = cluster.AdoptZarfManagedLabels(namespace.Labels)
+				// Add it to the stack
+				r.namespaces[namespace.Name] = namespace
+			}
+			// skip so we can strip namespaces from helm's brain
+			continue
+
+		case "Service":
+			// Check service resources for the zarf-connect label
+			labels := rawData.GetLabels()
+			if labels == nil {
+				labels = map[string]string{}
+			}
+			annotations := rawData.GetAnnotations()
+			if annotations == nil {
+				annotations = map[string]string{}
+			}
+			if key, keyExists := labels[cluster.ZarfConnectLabelName]; keyExists {
+				// If there is a zarf-connect label
+				message.Debugf("Match helm service %s for zarf connection %s", rawData.GetName(), key)
+				l.Debug("match helm service for zarf connection", "service", rawData.GetName(), "connection-key", key)
+
+				// Add the connectString for processing later in the deployment
+				r.connectStrings[key] = types.ConnectString{
+					Description: annotations[cluster.ZarfConnectAnnotationDescription],
+					URL:         annotations[cluster.ZarfConnectAnnotationURL],
+				}
+			}
+		}
+
+		namespace := rawData.GetNamespace()
+		if _, exists := r.namespaces[namespace]; !exists && namespace != "" {
+			// if this is the first time seeing this ns, we need to track that to create it as well
+			r.namespaces[namespace] = cluster.NewZarfManagedNamespace(namespace)
+		}
+
+		// If we have been asked to adopt existing resources, process those now as well
+		if r.adoptExistingResources {
+			deployedNamespace := namespace
+			if deployedNamespace == "" {
+				deployedNamespace = r.chart.Namespace
+			}
+
+			err := func() error {
+				mapping, err := mapper.RESTMapping(rawData.GroupVersionKind().GroupKind())
+				if err != nil {
+					return err
+				}
+				resource, err := dc.Resource(mapping.Resource).Namespace(deployedNamespace).Get(ctx, rawData.GetName(), metav1.GetOptions{})
+				// Ignore resources that are yet to be created
+				if kerrors.IsNotFound(err) {
+					return nil
+				}
+				if err != nil {
+					return err
+				}
+				labels := resource.GetLabels()
+				if labels == nil {
+					labels = map[string]string{}
+				}
+				labels["app.kubernetes.io/managed-by"] = "Helm"
+				resource.SetLabels(labels)
+				annotations := resource.GetAnnotations()
+				if annotations == nil {
+					annotations = map[string]string{}
+				}
+				annotations["meta.helm.sh/release-name"] = r.chart.ReleaseName
+				annotations["meta.helm.sh/release-namespace"] = r.chart.Namespace
+				resource.SetAnnotations(annotations)
+				_, err = dc.Resource(mapping.Resource).Namespace(deployedNamespace).Update(ctx, resource, metav1.UpdateOptions{})
+				if err != nil {
+					return err
+				}
+				return nil
+			}()
+			if err != nil {
+				return fmt.Errorf("unable to adopt the resource %s: %w", rawData.GetName(), err)
+			}
+		}
+		// Finally place this back onto the output buffer
+		fmt.Fprintf(finalManifestsOutput, "---\n# Source: %s\n%s\n", resource.Name, resource.Content)
+	}
+	return nil
+}
diff --git a/src/internal/packager2/helm/repo.go b/src/internal/packager2/helm/repo.go
new file mode 100644
index 0000000000..42991809dc
--- /dev/null
+++ b/src/internal/packager2/helm/repo.go
@@ -0,0 +1,390 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+// Package helm contains operations for working with helm charts.
+package helm
+
+import (
+	"context"
+	"errors"
+	"fmt"
+	"log/slog"
+	"os"
+	"path/filepath"
+	"strings"
+	"time"
+
+	"github.com/zarf-dev/zarf/src/pkg/logger"
+
+	"github.com/defenseunicorns/pkg/helpers/v2"
+	"helm.sh/helm/v3/pkg/action"
+	"helm.sh/helm/v3/pkg/chart"
+	"helm.sh/helm/v3/pkg/chart/loader"
+	"helm.sh/helm/v3/pkg/cli"
+	"helm.sh/helm/v3/pkg/downloader"
+	"helm.sh/helm/v3/pkg/getter"
+	"helm.sh/helm/v3/pkg/registry"
+	"helm.sh/helm/v3/pkg/repo"
+	"k8s.io/client-go/util/homedir"
+
+	"github.com/zarf-dev/zarf/src/config"
+	"github.com/zarf-dev/zarf/src/config/lang"
+	"github.com/zarf-dev/zarf/src/internal/git"
+	"github.com/zarf-dev/zarf/src/pkg/message"
+	"github.com/zarf-dev/zarf/src/pkg/transform"
+	"github.com/zarf-dev/zarf/src/pkg/utils"
+)
+
+// PackageChart creates a chart archive from a path to a chart on the host os and builds chart dependencies
+func (h *Helm) PackageChart(ctx context.Context, cosignKeyPath string) error {
+	if len(h.chart.URL) > 0 {
+		url, refPlain, err := transform.GitURLSplitRef(h.chart.URL)
+		// check if the chart is a git url with a ref (if an error is returned url will be empty)
+		isGitURL := strings.HasSuffix(url, ".git")
+		if err != nil {
+			// TODO(mkcp): Remove message on logger release
+			message.Debugf("unable to parse the url, continuing with %s", h.chart.URL)
+			logger.From(ctx).Debug("unable to parse the url, continuing", "url", h.chart.URL)
+		}
+
+		if isGitURL {
+			// if it is a git url append chart version as if its a tag
+			if refPlain == "" {
+				h.chart.URL = fmt.Sprintf("%s@%s", h.chart.URL, h.chart.Version)
+			}
+
+			err = h.PackageChartFromGit(ctx, cosignKeyPath)
+			if err != nil {
+				return fmt.Errorf("unable to pull the chart %q from git: %w", h.chart.Name, err)
+			}
+		} else {
+			err = h.DownloadPublishedChart(ctx, cosignKeyPath)
+			if err != nil {
+				return fmt.Errorf("unable to download the published chart %q: %w", h.chart.Name, err)
+			}
+		}
+	} else {
+		err := h.PackageChartFromLocalFiles(ctx, cosignKeyPath)
+		if err != nil {
+			return fmt.Errorf("unable to package the %q chart: %w", h.chart.Name, err)
+		}
+	}
+	return nil
+}
+
+// PackageChartFromLocalFiles creates a chart archive from a path to a chart on the host os.
+func (h *Helm) PackageChartFromLocalFiles(ctx context.Context, cosignKeyPath string) error {
+	l := logger.From(ctx)
+	l.Info("processing local helm chart",
+		"name", h.chart.Name,
+		"version", h.chart.Version,
+		"path", h.chart.LocalPath,
+	)
+	// TODO(mkcp): Remove message on logger release
+	spinner := message.NewProgressSpinner("Processing helm chart %s:%s from %s", h.chart.Name, h.chart.Version, h.chart.LocalPath)
+	defer spinner.Stop()
+
+	// Load and validate the chart
+	cl, _, err := h.loadAndValidateChart(h.chart.LocalPath)
+	if err != nil {
+		return err
+	}
+
+	// Handle the chart directory or tarball
+	var saved string
+	temp := filepath.Join(h.chartPath, "temp")
+	if _, ok := cl.(loader.DirLoader); ok {
+		err = h.buildChartDependencies()
+		if err != nil {
+			return fmt.Errorf("unable to build dependencies for the chart: %w", err)
+		}
+
+		client := action.NewPackage()
+
+		client.Destination = temp
+		saved, err = client.Run(h.chart.LocalPath, nil)
+	} else {
+		saved = filepath.Join(temp, filepath.Base(h.chart.LocalPath))
+		err = helpers.CreatePathAndCopy(h.chart.LocalPath, saved)
+	}
+	defer func(l *slog.Logger) {
+		err := os.RemoveAll(temp)
+		if err != nil {
+			l.Error(err.Error())
+		}
+	}(l)
+
+	if err != nil {
+		return fmt.Errorf("unable to save the archive and create the package %s: %w", saved, err)
+	}
+
+	// Finalize the chart
+	err = h.finalizeChartPackage(ctx, saved, cosignKeyPath)
+	if err != nil {
+		return err
+	}
+
+	spinner.Success()
+
+	l.Debug("done processing local helm chart",
+		"name", h.chart.Name,
+		"version", h.chart.Version,
+		"path", h.chart.LocalPath,
+	)
+	return nil
+}
+
+// PackageChartFromGit is a special implementation of chart archiving that supports the https://p1.dso.mil/#/products/big-bang/ model.
+func (h *Helm) PackageChartFromGit(ctx context.Context, cosignKeyPath string) error {
+	l := logger.From(ctx)
+	l.Info("processing helm chart", "name", h.chart.Name)
+	// TODO(mkcp): Remove message on logger release
+	spinner := message.NewProgressSpinner("Processing helm chart %s", h.chart.Name)
+	defer spinner.Stop()
+
+	// Retrieve the repo containing the chart
+	gitPath, err := DownloadChartFromGitToTemp(ctx, h.chart.URL)
+	if err != nil {
+		return err
+	}
+	defer func(l *slog.Logger) {
+		if err := os.RemoveAll(gitPath); err != nil {
+			l.Error(err.Error())
+		}
+	}(l)
+
+	// Set the directory for the chart and package it
+	h.chart.LocalPath = filepath.Join(gitPath, h.chart.GitPath)
+	return h.PackageChartFromLocalFiles(ctx, cosignKeyPath)
+}
+
+// DownloadPublishedChart loads a specific chart version from a remote repo.
+func (h *Helm) DownloadPublishedChart(ctx context.Context, cosignKeyPath string) error {
+	l := logger.From(ctx)
+	l.Info("processing helm chart",
+		"name", h.chart.Name,
+		"version", h.chart.Version,
+		"repo", h.chart.URL,
+	)
+	start := time.Now()
+	// TODO(mkcp): Remove message on logger release
+	spinner := message.NewProgressSpinner("Processing helm chart %s:%s from repo %s", h.chart.Name, h.chart.Version, h.chart.URL)
+	defer spinner.Stop()
+
+	// Set up the helm pull config
+	pull := action.NewPull()
+	pull.Settings = cli.New()
+
+	var (
+		regClient *registry.Client
+		chartURL  string
+		err       error
+	)
+	repoFile, err := repo.LoadFile(pull.Settings.RepositoryConfig)
+
+	// Not returning the error here since the repo file is only needed if we are pulling from a repo that requires authentication
+	if err != nil {
+		// TODO(mkcp): Remove message on logger release
+		message.Debugf("Unable to load the repo file at %q: %s", pull.Settings.RepositoryConfig, err.Error())
+		l.Debug("unable to load the repo file",
+			"path", pull.Settings.RepositoryConfig,
+			"error", err.Error(),
+		)
+	}
+
+	var username string
+	var password string
+
+	// Handle OCI registries
+	if registry.IsOCI(h.chart.URL) {
+		regClient, err = registry.NewClient(registry.ClientOptEnableCache(true))
+		if err != nil {
+			return fmt.Errorf("unable to create the new registry client: %w", err)
+		}
+		chartURL = h.chart.URL
+		// Explicitly set the pull version for OCI
+		pull.Version = h.chart.Version
+	} else {
+		chartName := h.chart.Name
+		if h.chart.RepoName != "" {
+			chartName = h.chart.RepoName
+		}
+
+		if repoFile != nil {
+			// TODO: @AustinAbro321 Currently this selects the last repo with the same url
+			// We should introduce a new field in zarf to allow users to specify the local repo they want
+			for _, repo := range repoFile.Repositories {
+				if repo.URL == h.chart.URL {
+					username = repo.Username
+					password = repo.Password
+				}
+			}
+		}
+
+		chartURL, err = repo.FindChartInAuthRepoURL(h.chart.URL, username, password, chartName, h.chart.Version, pull.CertFile, pull.KeyFile, pull.CaFile, getter.All(pull.Settings))
+		if err != nil {
+			return fmt.Errorf("unable to pull the helm chart: %w", err)
+		}
+	}
+
+	// Set up the chart chartDownloader
+	chartDownloader := downloader.ChartDownloader{
+		Out:            spinner,
+		RegistryClient: regClient,
+		// TODO: Further research this with regular/OCI charts
+		Verify:  downloader.VerifyNever,
+		Getters: getter.All(pull.Settings),
+		Options: []getter.Option{
+			getter.WithInsecureSkipVerifyTLS(config.CommonOptions.InsecureSkipTLSVerify),
+			getter.WithBasicAuth(username, password),
+		},
+	}
+
+	// Download the file into a temp directory since we don't control what name helm creates here
+	temp := filepath.Join(h.chartPath, "temp")
+	if err = helpers.CreateDirectory(temp, helpers.ReadWriteExecuteUser); err != nil {
+		return fmt.Errorf("unable to create helm chart temp directory: %w", err)
+	}
+	defer func(l *slog.Logger) {
+		err := os.RemoveAll(temp)
+		if err != nil {
+			l.Error(err.Error())
+		}
+	}(l)
+
+	saved, _, err := chartDownloader.DownloadTo(chartURL, pull.Version, temp)
+	if err != nil {
+		return fmt.Errorf("unable to download the helm chart: %w", err)
+	}
+
+	// Validate the chart
+	_, _, err = h.loadAndValidateChart(saved)
+	if err != nil {
+		return err
+	}
+
+	// Finalize the chart
+	err = h.finalizeChartPackage(ctx, saved, cosignKeyPath)
+	if err != nil {
+		return err
+	}
+
+	spinner.Success()
+	l.Debug("done downloading helm chart",
+		"name", h.chart.Name,
+		"version", h.chart.Version,
+		"repo", h.chart.URL,
+		"duration", time.Since(start),
+	)
+	return nil
+}
+
+// DownloadChartFromGitToTemp downloads a chart from git into a temp directory
+func DownloadChartFromGitToTemp(ctx context.Context, url string) (string, error) {
+	path, err := utils.MakeTempDir(config.CommonOptions.TempDirectory)
+	if err != nil {
+		return "", fmt.Errorf("unable to create tmpdir: %w", err)
+	}
+	repository, err := git.Clone(ctx, path, url, true)
+	if err != nil {
+		return "", err
+	}
+	return repository.Path(), nil
+}
+
+func (h *Helm) finalizeChartPackage(ctx context.Context, saved, cosignKeyPath string) error {
+	// Ensure the name is consistent for deployments
+	destinationTarball := StandardName(h.chartPath, h.chart) + ".tgz"
+	err := os.Rename(saved, destinationTarball)
+	if err != nil {
+		return fmt.Errorf("unable to save the final chart tarball: %w", err)
+	}
+
+	err = h.packageValues(ctx, cosignKeyPath)
+	if err != nil {
+		return fmt.Errorf("unable to process the values for the package: %w", err)
+	}
+	return nil
+}
+
+func (h *Helm) packageValues(ctx context.Context, cosignKeyPath string) error {
+	for valuesIdx, path := range h.chart.ValuesFiles {
+		dst := StandardValuesName(h.valuesPath, h.chart, valuesIdx)
+
+		if helpers.IsURL(path) {
+			if err := utils.DownloadToFile(ctx, path, dst, cosignKeyPath); err != nil {
+				return fmt.Errorf(lang.ErrDownloading, path, err.Error())
+			}
+		} else {
+			if err := helpers.CreatePathAndCopy(path, dst); err != nil {
+				return fmt.Errorf("unable to copy chart values file %s: %w", path, err)
+			}
+		}
+	}
+
+	return nil
+}
+
+// buildChartDependencies builds the helm chart dependencies
+func (h *Helm) buildChartDependencies() error {
+	// Download and build the specified dependencies
+	regClient, err := registry.NewClient(registry.ClientOptEnableCache(true))
+	if err != nil {
+		return fmt.Errorf("unable to create a new registry client: %w", err)
+	}
+
+	h.settings = cli.New()
+	defaultKeyring := filepath.Join(homedir.HomeDir(), ".gnupg", "pubring.gpg")
+	if v, ok := os.LookupEnv("GNUPGHOME"); ok {
+		defaultKeyring = filepath.Join(v, "pubring.gpg")
+	}
+
+	man := &downloader.Manager{
+		Out:            &message.DebugWriter{},
+		ChartPath:      h.chart.LocalPath,
+		Getters:        getter.All(h.settings),
+		RegistryClient: regClient,
+
+		RepositoryConfig: h.settings.RepositoryConfig,
+		RepositoryCache:  h.settings.RepositoryCache,
+		Debug:            false,
+		Verify:           downloader.VerifyIfPossible,
+		Keyring:          defaultKeyring,
+	}
+
+	// Build the deps from the helm chart
+	err = man.Build()
+	var notFoundErr *downloader.ErrRepoNotFound
+	if errors.As(err, &notFoundErr) {
+		// If we encounter a repo not found error point the user to `zarf tools helm repo add`
+		// TODO(mkcp): Remove message on logger release
+		message.Warnf("%s. Please add the missing repo(s) via the following:", notFoundErr.Error())
+		for _, repository := range notFoundErr.Repos {
+			// TODO(mkcp): Remove message on logger release
+			message.ZarfCommand(fmt.Sprintf("tools helm repo add <your-repo-name> %s", repository))
+		}
+		return err
+	}
+	if err != nil {
+		// TODO(mkcp): Remove message on logger release
+		message.ZarfCommand("tools helm dependency build --verify")
+		message.Warnf("Unable to perform a rebuild of Helm dependencies: %s", err.Error())
+		return err
+	}
+	return nil
+}
+
+func (h *Helm) loadAndValidateChart(location string) (loader.ChartLoader, *chart.Chart, error) {
+	// Validate the chart
+	cl, err := loader.Loader(location)
+	if err != nil {
+		return cl, nil, fmt.Errorf("unable to load the chart from %s: %w", location, err)
+	}
+
+	chart, err := cl.Load()
+	if err != nil {
+		return cl, chart, fmt.Errorf("validation failed for chart from %s: %w", location, err)
+	}
+
+	return cl, chart, nil
+}
diff --git a/src/internal/packager2/helm/utils.go b/src/internal/packager2/helm/utils.go
new file mode 100644
index 0000000000..38b9e1f889
--- /dev/null
+++ b/src/internal/packager2/helm/utils.go
@@ -0,0 +1,87 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+// Package helm contains operations for working with helm charts.
+package helm
+
+import (
+	"context"
+	"fmt"
+	"log/slog"
+
+	"github.com/defenseunicorns/pkg/helpers/v2"
+	"github.com/zarf-dev/zarf/src/pkg/logger"
+	"github.com/zarf-dev/zarf/src/pkg/message"
+	"helm.sh/helm/v3/pkg/action"
+	"helm.sh/helm/v3/pkg/chart"
+	"helm.sh/helm/v3/pkg/chartutil"
+	"helm.sh/helm/v3/pkg/cli"
+	"helm.sh/helm/v3/pkg/cli/values"
+	"helm.sh/helm/v3/pkg/getter"
+
+	"helm.sh/helm/v3/pkg/chart/loader"
+)
+
+// loadChartFromTarball returns a helm chart from a tarball.
+func (h *Helm) loadChartFromTarball() (*chart.Chart, error) {
+	// Get the path the temporary helm chart tarball
+	sourceFile := StandardName(h.chartPath, h.chart) + ".tgz"
+
+	// Load the loadedChart tarball
+	loadedChart, err := loader.Load(sourceFile)
+	if err != nil {
+		return nil, fmt.Errorf("unable to load helm chart archive: %w", err)
+	}
+
+	if err = loadedChart.Validate(); err != nil {
+		return nil, fmt.Errorf("unable to validate loaded helm chart: %w", err)
+	}
+
+	return loadedChart, nil
+}
+
+// parseChartValues reads the context of the chart values into an interface if it exists.
+func (h *Helm) parseChartValues() (chartutil.Values, error) {
+	valueOpts := &values.Options{}
+
+	for idx := range h.chart.ValuesFiles {
+		path := StandardValuesName(h.valuesPath, h.chart, idx)
+		valueOpts.ValueFiles = append(valueOpts.ValueFiles, path)
+	}
+
+	httpProvider := getter.Provider{
+		Schemes: []string{"http", "https"},
+		New:     getter.NewHTTPGetter,
+	}
+
+	providers := getter.Providers{httpProvider}
+	chartValues, err := valueOpts.MergeValues(providers)
+	if err != nil {
+		return chartValues, err
+	}
+
+	return helpers.MergeMapRecursive(chartValues, h.valuesOverrides), nil
+}
+
+func (h *Helm) createActionConfig(ctx context.Context, namespace string, spinner *message.Spinner) error {
+	// Initialize helm SDK
+	actionConfig := new(action.Configuration)
+	// Set the settings for the helm SDK
+	h.settings = cli.New()
+
+	// Set the namespace for helm
+	h.settings.SetNamespace(namespace)
+
+	// Setup K8s connection
+	helmLogger := spinner.Updatef
+	if logger.Enabled(ctx) {
+		l := logger.From(ctx)
+		helmLogger = slog.NewLogLogger(l.Handler(), slog.LevelDebug).Printf
+	}
+	err := actionConfig.Init(h.settings.RESTClientGetter(), namespace, "", helmLogger)
+
+	// Set the actionConfig is the received Helm pointer
+	h.actionConfig = actionConfig
+
+	return err
+}
diff --git a/src/internal/packager2/helm/zarf.go b/src/internal/packager2/helm/zarf.go
new file mode 100644
index 0000000000..76ac55c90c
--- /dev/null
+++ b/src/internal/packager2/helm/zarf.go
@@ -0,0 +1,169 @@
+// SPDX-License-Identifier: Apache-2.0
+// SPDX-FileCopyrightText: 2021-Present The Zarf Authors
+
+// Package helm contains operations for working with helm charts.
+package helm
+
+import (
+	"context"
+	"fmt"
+	"time"
+
+	"helm.sh/helm/v3/pkg/action"
+	"k8s.io/apimachinery/pkg/runtime/schema"
+	"sigs.k8s.io/cli-utils/pkg/object"
+
+	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
+
+	"github.com/zarf-dev/zarf/src/api/v1alpha1"
+	"github.com/zarf-dev/zarf/src/internal/healthchecks"
+	"github.com/zarf-dev/zarf/src/internal/packager/template"
+	"github.com/zarf-dev/zarf/src/pkg/cluster"
+	"github.com/zarf-dev/zarf/src/pkg/logger"
+	"github.com/zarf-dev/zarf/src/pkg/message"
+	"github.com/zarf-dev/zarf/src/pkg/transform"
+	"github.com/zarf-dev/zarf/src/pkg/utils"
+)
+
+// UpdateZarfRegistryValues updates the Zarf registry deployment with the new state values
+func (h *Helm) UpdateZarfRegistryValues(ctx context.Context) error {
+	pushUser, err := utils.GetHtpasswdString(h.state.RegistryInfo.PushUsername, h.state.RegistryInfo.PushPassword)
+	if err != nil {
+		return fmt.Errorf("error generating htpasswd string: %w", err)
+	}
+	pullUser, err := utils.GetHtpasswdString(h.state.RegistryInfo.PullUsername, h.state.RegistryInfo.PullPassword)
+	if err != nil {
+		return fmt.Errorf("error generating htpasswd string: %w", err)
+	}
+	registryValues := map[string]interface{}{
+		"secrets": map[string]interface{}{
+			"htpasswd": fmt.Sprintf("%s\n%s", pushUser, pullUser),
+		},
+	}
+	h.chart = v1alpha1.ZarfChart{
+		Namespace:   "zarf",
+		ReleaseName: "zarf-docker-registry",
+	}
+	err = h.UpdateReleaseValues(ctx, registryValues)
+	if err != nil {
+		return fmt.Errorf("error updating the release values: %w", err)
+	}
+
+	objs := []object.ObjMetadata{
+		{
+			GroupKind: schema.GroupKind{
+				Group: "apps",
+				Kind:  "Deployment",
+			},
+			Namespace: "zarf",
+			Name:      "zarf-docker-registry",
+		},
+	}
+	waitCtx, waitCancel := context.WithTimeout(ctx, 60*time.Second)
+	defer waitCancel()
+	err = healthchecks.WaitForReady(waitCtx, h.cluster.Watcher, objs)
+	if err != nil {
+		return err
+	}
+	return nil
+}
+
+// UpdateZarfAgentValues updates the Zarf agent deployment with the new state values
+func (h *Helm) UpdateZarfAgentValues(ctx context.Context) error {
+	l := logger.From(ctx)
+	spinner := message.NewProgressSpinner("Gathering information to update Zarf Agent TLS")
+	defer spinner.Stop()
+
+	deployment, err := h.cluster.Clientset.AppsV1().Deployments(cluster.ZarfNamespaceName).Get(ctx, "agent-hook", metav1.GetOptions{})
+	if err != nil {
+		return err
+	}
+	agentImage, err := transform.ParseImageRef(deployment.Spec.Template.Spec.Containers[0].Image)
+	if err != nil {
+		return err
+	}
+
+	err = h.createActionConfig(ctx, cluster.ZarfNamespaceName, spinner)
+	if err != nil {
+		return err
+	}
+
+	// List the releases to find the current agent release name.
+	listClient := action.NewList(h.actionConfig)
+	releases, err := listClient.Run()
+	if err != nil {
+		return fmt.Errorf("unable to list helm releases: %w", err)
+	}
+	spinner.Success()
+
+	for _, release := range releases {
+		// Update the Zarf Agent release with the new values
+		if release.Chart.Name() == "raw-init-zarf-agent-zarf-agent" {
+			h.chart = v1alpha1.ZarfChart{
+				Namespace:   "zarf",
+				ReleaseName: release.Name,
+			}
+			h.variableConfig.SetConstants([]v1alpha1.Constant{
+				{
+					Name:  "AGENT_IMAGE",
+					Value: agentImage.Path,
+				},
+				{
+					Name:  "AGENT_IMAGE_TAG",
+					Value: agentImage.Tag,
+				},
+			})
+			applicationTemplates, err := template.GetZarfTemplates(ctx, "zarf-agent", h.state)
+			if err != nil {
+				return fmt.Errorf("error setting up the templates: %w", err)
+			}
+			h.variableConfig.SetApplicationTemplates(applicationTemplates)
+
+			err = h.UpdateReleaseValues(ctx, map[string]interface{}{})
+			if err != nil {
+				return fmt.Errorf("error updating the release values: %w", err)
+			}
+		}
+	}
+
+	// Trigger a rolling update for the TLS secret update to take effect.
+	// https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#updating-a-deployment
+	spinner = message.NewProgressSpinner("Performing a rolling update for the Zarf Agent deployment")
+	defer spinner.Stop()
+	l.Info("performing a rolling update for the Zarf Agent deployment")
+
+	// Re-fetch the agent deployment before we update since the resourceVersion has changed after updating the Helm release values.
+	// Avoids this error: https://github.com/kubernetes/kubernetes/issues/28149
+	deployment, err = h.cluster.Clientset.AppsV1().Deployments(cluster.ZarfNamespaceName).Get(ctx, "agent-hook", metav1.GetOptions{})
+	if err != nil {
+		return err
+	}
+	if deployment.Spec.Template.Annotations == nil {
+		deployment.Spec.Template.Annotations = map[string]string{}
+	}
+	deployment.Spec.Template.Annotations["zarf.dev/restartedAt"] = time.Now().UTC().Format(time.RFC3339)
+	_, err = h.cluster.Clientset.AppsV1().Deployments(cluster.ZarfNamespaceName).Update(ctx, deployment, metav1.UpdateOptions{})
+	if err != nil {
+		return err
+	}
+
+	objs := []object.ObjMetadata{
+		{
+			GroupKind: schema.GroupKind{
+				Group: "apps",
+				Kind:  "Deployment",
+			},
+			Namespace: cluster.ZarfNamespaceName,
+			Name:      "agent-hook",
+		},
+	}
+	waitCtx, waitCancel := context.WithTimeout(ctx, 60*time.Second)
+	defer waitCancel()
+	err = healthchecks.WaitForReady(waitCtx, h.cluster.Watcher, objs)
+	if err != nil {
+		return err
+	}
+
+	spinner.Success()
+	return nil
+}
diff --git a/src/pkg/cluster/data.go b/src/pkg/cluster/data.go
index a85f52c7ab..8e2b564e48 100644
--- a/src/pkg/cluster/data.go
+++ b/src/pkg/cluster/data.go
@@ -31,6 +31,128 @@ import (
 	"github.com/zarf-dev/zarf/src/pkg/utils/exec"
 )
 
+func (c *Cluster) InjectData(ctx context.Context, data v1alpha1.ZarfDataInjection, dataPath string, dataIdx int) error {
+	l := logger.From(ctx)
+
+	injectionCompletionMarker := filepath.Join(dataPath, config.GetDataInjectionMarker())
+	if err := os.WriteFile(injectionCompletionMarker, []byte("🦄"), helpers.ReadWriteUser); err != nil {
+		return fmt.Errorf("unable to create the data injection completion marker: %w", err)
+	}
+
+	tarCompressFlag := ""
+	if data.Compress {
+		tarCompressFlag = "-z"
+	}
+
+	// Pod filter to ensure we only use the current deployment's pods
+	podFilterByInitContainer := func(pod corev1.Pod) bool {
+		b, err := json.Marshal(pod)
+		if err != nil {
+			return false
+		}
+		// Look everywhere in the pod for a matching data injection marker
+		return strings.Contains(string(b), config.GetDataInjectionMarker())
+	}
+
+	// Get the OS shell to execute commands in
+	shell, shellArgs := exec.GetOSShell(v1alpha1.Shell{Windows: "cmd"})
+
+	if _, _, err := exec.Cmd(shell, append(shellArgs, "tar --version")...); err != nil {
+		return fmt.Errorf("unable to execute tar, ensure it is installed in the $PATH: %w", err)
+	}
+
+	message.Debugf("Attempting to inject data into %s", data.Target)
+	l.Debug("performing data injection", "target", data.Target)
+
+	source := filepath.Join(dataPath, filepath.Base(data.Target.Path))
+	if helpers.InvalidPath(source) {
+		// The path is likely invalid because of how we compose OCI components, add an index suffix to the filename
+		source = filepath.Join(dataPath, strconv.Itoa(dataIdx), filepath.Base(data.Target.Path))
+		if helpers.InvalidPath(source) {
+			return fmt.Errorf("could not find the data injection source path %s", source)
+		}
+	}
+
+	// Wait until the pod we are injecting data into becomes available
+	target := podLookup{
+		Namespace: data.Target.Namespace,
+		Selector:  data.Target.Selector,
+		Container: data.Target.Container,
+	}
+	waitCtx, waitCancel := context.WithTimeout(ctx, 90*time.Second)
+	defer waitCancel()
+	pods, err := waitForPodsAndContainers(waitCtx, c.Clientset, target, podFilterByInitContainer)
+	if err != nil {
+		return err
+	}
+
+	// Inject into all the pods
+	for _, pod := range pods {
+		// Try to use the embedded kubectl if we can
+		zarfCommand, err := utils.GetFinalExecutableCommand()
+		kubectlBinPath := "kubectl"
+		if err != nil {
+			message.Warnf("Unable to get the zarf executable path, falling back to host kubectl: %s", err)
+			l.Warn("unable to get the zarf executable path, falling back to host kubectl", "error", err)
+		} else {
+			kubectlBinPath = fmt.Sprintf("%s tools kubectl", zarfCommand)
+		}
+		kubectlCmd := fmt.Sprintf("%s exec -i -n %s %s -c %s ", kubectlBinPath, data.Target.Namespace, pod.Name, data.Target.Container)
+
+		// Note that each command flag is separated to provide the widest cross-platform tar support
+		tarCmd := fmt.Sprintf("tar -c %s -f -", tarCompressFlag)
+		untarCmd := fmt.Sprintf("tar -x %s -v -f - -C %s", tarCompressFlag, data.Target.Path)
+
+		// Must create the target directory before trying to change to it for untar
+		mkdirCmd := fmt.Sprintf("%s -- mkdir -p %s", kubectlCmd, data.Target.Path)
+		if err := exec.CmdWithPrint(shell, append(shellArgs, mkdirCmd)...); err != nil {
+			return fmt.Errorf("unable to create the data injection target directory %s in pod %s: %w", data.Target.Path, pod.Name, err)
+		}
+
+		cpPodCmd := fmt.Sprintf("%s -C %s . | %s -- %s",
+			tarCmd,
+			source,
+			kubectlCmd,
+			untarCmd,
+		)
+
+		// Do the actual data injection
+		if err := exec.CmdWithPrint(shell, append(shellArgs, cpPodCmd)...); err != nil {
+			return fmt.Errorf("could not copy data into the pod %s: %w", pod.Name, err)
+		}
+
+		// Leave a marker in the target container for pods to track the sync action
+		cpPodCmd = fmt.Sprintf("%s -C %s %s | %s -- %s",
+			tarCmd,
+			dataPath,
+			config.GetDataInjectionMarker(),
+			kubectlCmd,
+			untarCmd,
+		)
+
+		if err := exec.CmdWithPrint(shell, append(shellArgs, cpPodCmd)...); err != nil {
+			return fmt.Errorf("could not save the Zarf sync completion file after injection into pod %s: %w", pod.Name, err)
+		}
+	}
+
+	// Do not look for a specific container after injection in case they are running an init container
+	podOnlyTarget := podLookup{
+		Namespace: data.Target.Namespace,
+		Selector:  data.Target.Selector,
+	}
+
+	// Block one final time to make sure at least one pod has come up and injected the data
+	// Using only the pod as the final selector because we don't know what the container name will be
+	// Still using the init container filter to make sure we have the right running pod
+	_, err = waitForPodsAndContainers(ctx, c.Clientset, podOnlyTarget, podFilterByInitContainer)
+	if err != nil {
+		return err
+	}
+
+	// Return to stop the loop
+	return nil
+}
+
 // HandleDataInjection waits for the target pod(s) to come up and inject the data into them
 // todo:  this currently requires kubectl but we should have enough k8s work to make this native now.
 func (c *Cluster) HandleDataInjection(ctx context.Context, data v1alpha1.ZarfDataInjection, componentPath *layout.ComponentPaths, dataIdx int) error {