From 308c6bc414d5c6c207bc021ca2949df602725e52 Mon Sep 17 00:00:00 2001 From: Mateusz Urbanek Date: Thu, 30 Oct 2025 13:39:24 +0100 Subject: [PATCH] feat: add full disk volumes When set to `disk`, a full block device is used for the volume. When `volumeType = "disk"`: - Size specific settings are not allowed in the provisioning block (`minSize`, `maxSize`, `grow`). Signed-off-by: Mateusz Urbanek --- hack/release.toml | 12 +- .../block/internal/volumes/format.go | 6 +- .../block/internal/volumes/locate.go | 21 +- .../pkg/controllers/block/user_volumes.go | 58 +++- .../controllers/block/volume_config_test.go | 92 ++++--- internal/integration/api/volumes.go | 253 +++++++++++++++--- .../config/schemas/config.schema.json | 5 +- pkg/machinery/config/types/block/block_doc.go | 5 +- .../config/types/block/raw_volume_config.go | 2 +- .../config/types/block/swap_volume_config.go | 2 +- .../config/types/block/user_volume_config.go | 75 +++++- .../types/block/user_volume_config_test.go | 32 +++ .../config/types/block/volume_config.go | 22 +- .../configuration/block/uservolumeconfig.md | 54 +++- .../content/v1.12/schemas/config.schema.json | 5 +- 15 files changed, 519 insertions(+), 125 deletions(-) diff --git a/hack/release.toml b/hack/release.toml index 68f5e82caef..a5805ac7b3c 100644 --- a/hack/release.toml +++ b/hack/release.toml @@ -168,7 +168,17 @@ It should not be used for workloads requiring predictable storage quotas. title = "CRI Registry Configuration" description = """\ The CRI registry configuration in v1apha1 legacy machine configuration under `.machine.registries` is now deprecated, but still supported for backwards compatibility. -New configuration documents `RegistryMirrorConfig`, `RegistryAuthConfig` and `RegistryTLSConfig` should be used instead. +New configuration documents `RegistryMirrorConfig`, `RegistryAuthConfig` and `RegistryTLSConfig` should be used instead. +""" + + [notes.disk-user-volumes] + title = "New User Volume type - disk" + description = """\ +`volumeType` in UserVolumeConfig can be set to `disk`. +When set to `disk`, a full block device is used for the volume. + +When `volumeType = "disk"`: +- Size specific settings are not allowed in the provisioning block (`minSize`, `maxSize`, `grow`). """ [make_deps] diff --git a/internal/app/machined/pkg/controllers/block/internal/volumes/format.go b/internal/app/machined/pkg/controllers/block/internal/volumes/format.go index 628f90c4258..b0b4fd90d5d 100644 --- a/internal/app/machined/pkg/controllers/block/internal/volumes/format.go +++ b/internal/app/machined/pkg/controllers/block/internal/volumes/format.go @@ -115,7 +115,7 @@ func Format(ctx context.Context, logger *zap.Logger, volumeContext ManagerContex makefsOptions = append(makefsOptions, makefs.WithConfigFile(quirks.New("").XFSMkfsConfig())) if err = makefs.XFS(volumeContext.Status.MountLocation, makefsOptions...); err != nil { - return fmt.Errorf("error formatting XFS: %w", err) + return xerrors.NewTaggedf[Retryable]("error formatting XFS: %w", err) } case block.FilesystemTypeEXT4: var makefsOptions []makefs.Option @@ -125,14 +125,14 @@ func Format(ctx context.Context, logger *zap.Logger, volumeContext ManagerContex } if err = makefs.Ext4(volumeContext.Status.MountLocation, makefsOptions...); err != nil { - return fmt.Errorf("error formatting ext4: %w", err) + return xerrors.NewTaggedf[Retryable]("error formatting ext4: %w", err) } case block.FilesystemTypeSwap: if err = swap.Format(volumeContext.Status.MountLocation, swap.FormatOptions{ Label: volumeContext.Cfg.TypedSpec().Provisioning.FilesystemSpec.Label, UUID: uuid.New(), }); err != nil { - return fmt.Errorf("error formatting swap: %w", err) + return xerrors.NewTaggedf[Retryable]("error formatting swap: %w", err) } default: return fmt.Errorf("unsupported filesystem type: %s", volumeContext.Cfg.TypedSpec().Provisioning.FilesystemSpec.Type) diff --git a/internal/app/machined/pkg/controllers/block/internal/volumes/locate.go b/internal/app/machined/pkg/controllers/block/internal/volumes/locate.go index 38ada7b7507..a6264571cd0 100644 --- a/internal/app/machined/pkg/controllers/block/internal/volumes/locate.go +++ b/internal/app/machined/pkg/controllers/block/internal/volumes/locate.go @@ -8,6 +8,7 @@ import ( "context" "fmt" + "github.com/google/cel-go/cel" "github.com/siderolabs/gen/value" "github.com/siderolabs/gen/xerrors" "github.com/siderolabs/go-blockdevice/v2/partitioning" @@ -44,8 +45,18 @@ func LocateAndProvision(ctx context.Context, logger *zap.Logger, volumeContext M // attempt to discover the volume for _, dv := range volumeContext.DiscoveredVolumes { - matchContext := map[string]any{ - "volume": dv, + var locator *cel.Env + + matchContext := map[string]any{} + + switch volumeType { //nolint:exhaustive // we do not need to repeat exhaustive check here + case block.VolumeTypeDisk: + locator = celenv.DiskLocator() + + case block.VolumeTypePartition: + locator = celenv.VolumeLocator() + + matchContext["volume"] = dv } // add disk to the context, so we can use it in CEL expressions @@ -63,7 +74,7 @@ func LocateAndProvision(ctx context.Context, logger *zap.Logger, volumeContext M } } - matches, err := volumeContext.Cfg.TypedSpec().Locator.Match.EvalBool(celenv.VolumeLocator(), matchContext) + matches, err := volumeContext.Cfg.TypedSpec().Locator.Match.EvalBool(locator, matchContext) if err != nil { return fmt.Errorf("error evaluating volume locator: %w", err) } @@ -127,6 +138,10 @@ func LocateAndProvision(ctx context.Context, logger *zap.Logger, volumeContext M return fmt.Errorf("no disks matched selector for volume") } + if volumeType == block.VolumeTypeDisk && len(matchedDisks) > 1 { + return fmt.Errorf("multiple disks matched selector for disk volume; matched disks: %v", matchedDisks) + } + logger.Debug("matched disks", zap.Strings("disks", matchedDisks)) // analyze each disk, until we find the one which is the best fit diff --git a/internal/app/machined/pkg/controllers/block/user_volumes.go b/internal/app/machined/pkg/controllers/block/user_volumes.go index c05e81a4aa5..bb46141c3ce 100644 --- a/internal/app/machined/pkg/controllers/block/user_volumes.go +++ b/internal/app/machined/pkg/controllers/block/user_volumes.go @@ -46,6 +46,48 @@ var ( } switch userVolumeConfig.Type().ValueOr(block.VolumeTypePartition) { + case block.VolumeTypeDirectory: + userVolumeResource.TransformFunc = newVolumeConfigBuilder(). + WithType(block.VolumeTypeDirectory). + WithMount(block.MountSpec{ + TargetPath: userVolumeConfig.Name(), + ParentID: constants.UserVolumeMountPoint, + SelinuxLabel: constants.EphemeralSelinuxLabel, + FileMode: 0o755, + UID: 0, + GID: 0, + BindTarget: pointer.To(userVolumeConfig.Name()), + }). + WriterFunc() + + case block.VolumeTypeDisk: + userVolumeResource.TransformFunc = newVolumeConfigBuilder(). + WithType(block.VolumeTypeDisk). + WithLocator(userVolumeConfig.Provisioning().DiskSelector().ValueOr(noMatch)). + WithProvisioning(block.ProvisioningSpec{ + Wave: block.WaveUserVolumes, + DiskSelector: block.DiskSelector{ + Match: userVolumeConfig.Provisioning().DiskSelector().ValueOr(noMatch), + }, + PartitionSpec: block.PartitionSpec{ + TypeUUID: partition.LinuxFilesystemData, + }, + FilesystemSpec: block.FilesystemSpec{ + Type: userVolumeConfig.Filesystem().Type(), + }, + }). + WithMount(block.MountSpec{ + TargetPath: userVolumeConfig.Name(), + ParentID: constants.UserVolumeMountPoint, + SelinuxLabel: constants.EphemeralSelinuxLabel, + FileMode: 0o755, + UID: 0, + GID: 0, + ProjectQuotaSupport: userVolumeConfig.Filesystem().ProjectQuotaSupport(), + }). + WithConvertEncryptionConfiguration(userVolumeConfig.Encryption()). + WriterFunc() + case block.VolumeTypePartition: userVolumeResource.TransformFunc = newVolumeConfigBuilder(). WithType(block.VolumeTypePartition). @@ -77,20 +119,8 @@ var ( }). WithConvertEncryptionConfiguration(userVolumeConfig.Encryption()). WriterFunc() - case block.VolumeTypeDirectory: - userVolumeResource.TransformFunc = newVolumeConfigBuilder(). - WithType(block.VolumeTypeDirectory). - WithMount(block.MountSpec{ - TargetPath: userVolumeConfig.Name(), - ParentID: constants.UserVolumeMountPoint, - SelinuxLabel: constants.EphemeralSelinuxLabel, - FileMode: 0o755, - UID: 0, - GID: 0, - BindTarget: pointer.To(userVolumeConfig.Name()), - }). - WriterFunc() - case block.VolumeTypeDisk, block.VolumeTypeTmpfs, block.VolumeTypeSymlink, block.VolumeTypeOverlay: + + case block.VolumeTypeTmpfs, block.VolumeTypeSymlink, block.VolumeTypeOverlay: fallthrough default: diff --git a/internal/app/machined/pkg/controllers/block/volume_config_test.go b/internal/app/machined/pkg/controllers/block/volume_config_test.go index d38447c163a..2ff31005698 100644 --- a/internal/app/machined/pkg/controllers/block/volume_config_test.go +++ b/internal/app/machined/pkg/controllers/block/volume_config_test.go @@ -404,18 +404,26 @@ func (suite *VolumeConfigSuite) TestReconcileUserRawVolumes() { } func (suite *VolumeConfigSuite) TestReconcileUserSwapVolumes() { - uv1 := blockcfg.NewUserVolumeConfigV1Alpha1() - uv1.MetaName = "data1" - suite.Require().NoError(uv1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`system_disk`))) - uv1.ProvisioningSpec.ProvisioningMinSize = blockcfg.MustByteSize("10GiB") - uv1.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("100GiB") - uv1.FilesystemSpec.FilesystemType = block.FilesystemTypeXFS - - uv2 := blockcfg.NewUserVolumeConfigV1Alpha1() - uv2.MetaName = "data2" - suite.Require().NoError(uv2.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`!system_disk`))) - uv2.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("1TiB") - uv2.EncryptionSpec = blockcfg.EncryptionSpec{ + userVolumeNames := []string{ + "data-part1", + "data-part2", + "data-dir1", + "data-disk1", + } + + uvPart1 := blockcfg.NewUserVolumeConfigV1Alpha1() + uvPart1.MetaName = userVolumeNames[0] + suite.Require().NoError(uvPart1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`system_disk`))) + uvPart1.ProvisioningSpec.ProvisioningMinSize = blockcfg.MustByteSize("10GiB") + uvPart1.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("100GiB") + uvPart1.FilesystemSpec.FilesystemType = block.FilesystemTypeXFS + + uvPart2 := blockcfg.NewUserVolumeConfigV1Alpha1() + uvPart2.MetaName = userVolumeNames[1] + uvPart2.VolumeType = pointer.To(block.VolumeTypePartition) + suite.Require().NoError(uvPart2.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`!system_disk`))) + uvPart2.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("1TiB") + uvPart2.EncryptionSpec = blockcfg.EncryptionSpec{ EncryptionProvider: block.EncryptionProviderLUKS2, EncryptionKeys: []blockcfg.EncryptionKey{ { @@ -429,32 +437,45 @@ func (suite *VolumeConfigSuite) TestReconcileUserSwapVolumes() { }, } - uv3 := blockcfg.NewUserVolumeConfigV1Alpha1() - uv3.MetaName = "data3" - uv3.VolumeType = pointer.To(block.VolumeTypeDirectory) + uvDir1 := blockcfg.NewUserVolumeConfigV1Alpha1() + uvDir1.MetaName = userVolumeNames[2] + uvDir1.VolumeType = pointer.To(block.VolumeTypeDirectory) + + uvDisk1 := blockcfg.NewUserVolumeConfigV1Alpha1() + uvDisk1.MetaName = userVolumeNames[3] + suite.Require().NoError(uvDisk1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`!system_disk`))) + uvDisk1.EncryptionSpec = blockcfg.EncryptionSpec{ + EncryptionProvider: block.EncryptionProviderLUKS2, + EncryptionKeys: []blockcfg.EncryptionKey{ + { + KeySlot: 0, + KeyTPM: &blockcfg.EncryptionKeyTPM{}, + }, + { + KeySlot: 1, + KeyStatic: &blockcfg.EncryptionKeyStatic{KeyData: "secret"}, + }, + }, + } sv1 := blockcfg.NewSwapVolumeConfigV1Alpha1() sv1.MetaName = "swap" suite.Require().NoError(sv1.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`disk.transport == "nvme"`))) sv1.ProvisioningSpec.ProvisioningMaxSize = blockcfg.MustByteSize("2GiB") - ctr, err := container.New(uv1, uv2, uv3, sv1) + ctr, err := container.New(uvPart1, uvPart2, uvDir1, uvDisk1, sv1) suite.Require().NoError(err) cfg := config.NewMachineConfig(ctr) suite.Create(cfg) - userVolumes := []string{ - constants.UserVolumePrefix + "data1", - constants.UserVolumePrefix + "data2", - constants.UserVolumePrefix + "data3", - } + userVolumes := xslices.Map(userVolumeNames, func(in string) string { return constants.UserVolumePrefix + in }) ctest.AssertResources(suite, userVolumes, func(vc *block.VolumeConfig, asrt *assert.Assertions) { asrt.Contains(vc.Metadata().Labels().Raw(), block.UserVolumeLabel) switch vc.Metadata().ID() { - case userVolumes[0], userVolumes[1]: + case userVolumes[0], userVolumes[1], userVolumes[3]: asrt.Equal(block.VolumeTypePartition, vc.TypedSpec().Type) asrt.Contains(userVolumes, vc.TypedSpec().Provisioning.PartitionSpec.Label) @@ -463,11 +484,12 @@ func (suite *VolumeConfigSuite) TestReconcileUserSwapVolumes() { asrt.NoError(err) asrt.Contains(string(locator), vc.TypedSpec().Provisioning.PartitionSpec.Label) + case userVolumes[2]: asrt.Equal(block.VolumeTypeDirectory, vc.TypedSpec().Type) } - asrt.Contains([]string{"data1", "data2", "data3"}, vc.TypedSpec().Mount.TargetPath) + asrt.Contains(userVolumeNames, vc.TypedSpec().Mount.TargetPath) asrt.Equal(constants.UserVolumeMountPoint, vc.TypedSpec().Mount.ParentID) switch vc.Metadata().ID() { @@ -506,8 +528,8 @@ func (suite *VolumeConfigSuite) TestReconcileUserSwapVolumes() { suite.AddFinalizer(block.NewVolumeMountRequest(block.NamespaceName, volumeID).Metadata(), "test") } - // drop the first volume - ctr, err = container.New(uv2) + // keep only the first volume + ctr, err = container.New(uvPart1) suite.Require().NoError(err) newCfg := config.NewMachineConfig(ctr) @@ -516,7 +538,7 @@ func (suite *VolumeConfigSuite) TestReconcileUserSwapVolumes() { // controller should tear down removed resources ctest.AssertResources(suite, userVolumes, func(vc *block.VolumeConfig, asrt *assert.Assertions) { - if vc.Metadata().ID() == userVolumes[1] { + if vc.Metadata().ID() == userVolumes[0] { asrt.Equal(resource.PhaseRunning, vc.Metadata().Phase()) } else { asrt.Equal(resource.PhaseTearingDown, vc.Metadata().Phase()) @@ -524,7 +546,7 @@ func (suite *VolumeConfigSuite) TestReconcileUserSwapVolumes() { }) ctest.AssertResources(suite, userVolumes, func(vmr *block.VolumeMountRequest, asrt *assert.Assertions) { - if vmr.Metadata().ID() == userVolumes[1] { + if vmr.Metadata().ID() == userVolumes[0] { asrt.Equal(resource.PhaseRunning, vmr.Metadata().Phase()) } else { asrt.Equal(resource.PhaseTearingDown, vmr.Metadata().Phase()) @@ -532,14 +554,14 @@ func (suite *VolumeConfigSuite) TestReconcileUserSwapVolumes() { }) // remove finalizers - suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolumes[0]).Metadata(), "test") - suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolumes[0]).Metadata(), "test") - suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolumes[2]).Metadata(), "test") - suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolumes[2]).Metadata(), "test") + for _, userVolume := range userVolumes[1:] { + suite.RemoveFinalizer(block.NewVolumeConfig(block.NamespaceName, userVolume).Metadata(), "test") + suite.RemoveFinalizer(block.NewVolumeMountRequest(block.NamespaceName, userVolume).Metadata(), "test") + } // now the resources should be removed - ctest.AssertNoResource[*block.VolumeConfig](suite, userVolumes[0]) - ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolumes[0]) - ctest.AssertNoResource[*block.VolumeConfig](suite, userVolumes[2]) - ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolumes[2]) + for _, userVolume := range userVolumes[1:] { + ctest.AssertNoResource[*block.VolumeConfig](suite, userVolume) + ctest.AssertNoResource[*block.VolumeMountRequest](suite, userVolume) + } } diff --git a/internal/integration/api/volumes.go b/internal/integration/api/volumes.go index 0b468f48467..fb6bb3ff800 100644 --- a/internal/integration/api/volumes.go +++ b/internal/integration/api/volumes.go @@ -265,32 +265,7 @@ func (suite *VolumesSuite) TestLVMActivation() { suite.Require().Contains(stdout, "Logical volume \"lv1\" created.") - defer func() { - suite.T().Logf("removing LVM volumes %s/%s", node, nodeName) - - deletePodDef, err := suite.NewPrivilegedPod("pv-destroy") - suite.Require().NoError(err) - - deletePodDef = deletePodDef.WithNodeName(nodeName) - - suite.Require().NoError(deletePodDef.Create(suite.ctx, 5*time.Minute)) - - defer deletePodDef.Delete(suite.ctx) //nolint:errcheck - - if _, _, err := deletePodDef.Exec( - suite.ctx, - "nsenter --mount=/proc/1/ns/mnt -- vgremove --nolocking --yes vg0", - ); err != nil { - suite.T().Logf("failed to remove pv vg0: %v", err) - } - - if _, _, err := deletePodDef.Exec( - suite.ctx, - fmt.Sprintf("nsenter --mount=/proc/1/ns/mnt -- pvremove --nolocking --yes %s", userDisksJoined), - ); err != nil { - suite.T().Logf("failed to remove pv backed by volumes %s: %v", userDisksJoined, err) - } - }() + defer suite.deleteLVMVolumes(node, nodeName, userDisksJoined) suite.T().Logf("rebooting node %s/%s", node, nodeName) @@ -309,6 +284,33 @@ func (suite *VolumesSuite) TestLVMActivation() { }, 5*time.Second, 1*time.Second, "LVM volume group was not activated after reboot") } +func (suite *VolumesSuite) deleteLVMVolumes(node, nodeName, userDisksJoined string) { + suite.T().Logf("removing LVM volumes %s/%s", node, nodeName) + + deletePodDef, err := suite.NewPrivilegedPod("pv-destroy") + suite.Require().NoError(err) + + deletePodDef = deletePodDef.WithNodeName(nodeName) + + suite.Require().NoError(deletePodDef.Create(suite.ctx, 5*time.Minute)) + + defer deletePodDef.Delete(suite.ctx) //nolint:errcheck + + if _, _, err := deletePodDef.Exec( + suite.ctx, + "nsenter --mount=/proc/1/ns/mnt -- vgremove --nolocking --yes vg0", + ); err != nil { + suite.T().Logf("failed to remove pv vg0: %v", err) + } + + if _, _, err := deletePodDef.Exec( + suite.ctx, + fmt.Sprintf("nsenter --mount=/proc/1/ns/mnt -- pvremove --nolocking --yes %s", userDisksJoined), + ); err != nil { + suite.T().Logf("failed to remove pv backed by volumes %s: %v", userDisksJoined, err) + } +} + func (suite *VolumesSuite) lvmVolumeExists(node string, expectedVolumes []string) bool { ctx := client.WithNode(suite.ctx, node) @@ -436,7 +438,7 @@ func (suite *VolumesSuite) TestUserVolumesStatus() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -468,7 +470,7 @@ func (suite *VolumesSuite) TestVolumesStatus() { } } -// TestUserVolumesPartition performs a series of operations on user volumes (partition type): creating, destroying, verifying, etc. +// TestUserVolumesPartition performs a series of operations on user volumes: creating, destroying, verifying, etc. func (suite *VolumesSuite) TestUserVolumesPartition() { if testing.Short() { suite.T().Skip("skipping test in short mode.") @@ -527,7 +529,7 @@ func (suite *VolumesSuite) TestUserVolumesPartition() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -612,7 +614,178 @@ func (suite *VolumesSuite) TestUserVolumesPartition() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) + }, + ) + + rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, + func(vs *block.MountStatus, asrt *assert.Assertions) { + if vs.Metadata().ID() == userVolumeIDs[0] { + // check that the project quota support is enabled + asrt.True(vs.TypedSpec().ProjectQuotaSupport, "project quota support should be enabled for %s", vs.Metadata().ID()) + } else { + // check that the project quota support is disabled + asrt.False(vs.TypedSpec().ProjectQuotaSupport, "project quota support should be disabled for %s", vs.Metadata().ID()) + } + }) + + // clean up + suite.RemoveMachineConfigDocumentsByName(ctx, blockcfg.UserVolumeConfigKind, volumeIDs...) + + for _, userVolumeID := range userVolumeIDs { + rtestutils.AssertNoResource[*block.VolumeStatus](ctx, suite.T(), suite.Client.COSI, userVolumeID) + } + + suite.Require().NoError(suite.Client.BlockDeviceWipe(ctx, &storage.BlockDeviceWipeRequest{ + Devices: []*storage.BlockDeviceWipeDescriptor{ + { + Device: filepath.Base(userDisks[0]), + Method: storage.BlockDeviceWipeDescriptor_FAST, + }, + }, + })) + + // wait for the discovered volume reflect wiped status + rtestutils.AssertResource(ctx, suite.T(), suite.Client.COSI, filepath.Base(userDisks[0]), + func(dv *block.DiscoveredVolume, asrt *assert.Assertions) { + asrt.Empty(dv.TypedSpec().Name, "expected discovered volume %s to be wiped", dv.Metadata().ID()) + }) +} + +// TestUserVolumesDisk performs a series of operations on user volumes: creating, destroying, verifying, etc. +func (suite *VolumesSuite) TestUserVolumesDisk() { + if testing.Short() { + suite.T().Skip("skipping test in short mode.") + } + + if suite.Cluster == nil || suite.Cluster.Provisioner() != base.ProvisionerQEMU { + suite.T().Skip("skipping test for non-qemu provisioner") + } + + node := suite.RandomDiscoveredNodeInternalIP(machine.TypeWorker) + + k8sNode, err := suite.GetK8sNodeByInternalIP(suite.ctx, node) + suite.Require().NoError(err) + + nodeName := k8sNode.Name + + userDisks := suite.UserDisks(suite.ctx, node) + + if len(userDisks) < 1 { + suite.T().Skipf("skipping test, not enough user disks available on node %s/%s: %q", node, nodeName, userDisks) + } + + suite.T().Logf("verifying user volumes on node %s/%s with disk %s", node, nodeName, userDisks[0]) + + ctx := client.WithNode(suite.ctx, node) + + disk, err := safe.StateGetByID[*block.Disk](ctx, suite.Client.COSI, filepath.Base(userDisks[0])) + suite.Require().NoError(err) + + volumeName := fmt.Sprintf("%04x", rand.Int31()) + "-" + + const numVolumes = 1 + + volumeIDs := make([]string, numVolumes) + + for i := range numVolumes { + volumeIDs[i] = volumeName + strconv.Itoa(i) + } + + userVolumeIDs := xslices.Map(volumeIDs, func(volumeID string) string { return constants.UserVolumePrefix + volumeID }) + + configDocs := xslices.Map(volumeIDs, func(volumeID string) any { + doc := blockcfg.NewUserVolumeConfigV1Alpha1() + doc.MetaName = volumeID + doc.VolumeType = pointer.To(block.VolumeTypeDisk) + doc.ProvisioningSpec.DiskSelectorSpec.Match = cel.MustExpression( + cel.ParseBooleanExpression(fmt.Sprintf("'%s' in disk.symlinks", disk.TypedSpec().Symlinks[0]), celenv.DiskLocator()), + ) + + return doc + }) + + // create user volumes + suite.PatchMachineConfig(ctx, configDocs...) + + rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, + func(vs *block.VolumeStatus, asrt *assert.Assertions) { + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) + }, + ) + + // check that the volumes are mounted + rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, + func(vs *block.MountStatus, _ *assert.Assertions) {}) + + // create a pod using user volumes + podDef, err := suite.NewPod("user-volume-test") + suite.Require().NoError(err) + + // using subdirectory here to test that the hostPath mount is properly propagated into the kubelet + podDef = podDef.WithNodeName(nodeName). + WithNamespace("kube-system"). + WithHostVolumeMount(filepath.Join(constants.UserVolumeMountPoint, volumeIDs[0], "data"), "/mnt/data") + + suite.Require().NoError(podDef.Create(suite.ctx, 1*time.Minute)) + + _, _, err = podDef.Exec(suite.ctx, "mkdir -p /mnt/data/test") + suite.Require().NoError(err) + + suite.Require().NoError(podDef.Delete(suite.ctx)) + + // verify that directory exists + expectedPath := filepath.Join(constants.UserVolumeMountPoint, volumeIDs[0], "data", "test") + + stream, err := suite.Client.LS(ctx, &machineapi.ListRequest{ + Root: expectedPath, + Types: []machineapi.ListRequest_Type{machineapi.ListRequest_DIRECTORY}, + }) + + suite.Require().NoError(err) + + suite.Require().NoError(helpers.ReadGRPCStream(stream, func(info *machineapi.FileInfo, _ string, _ bool) error { + suite.T().Logf("found %s on node %s", info.Name, node) + suite.Require().Equal(expectedPath, info.Name, "expected %s to exist", expectedPath) + + return nil + })) + + // now, remove one of the volumes, wipe the partition and re-create the volume + vs, err := safe.ReaderGetByID[*block.VolumeStatus](ctx, suite.Client.COSI, userVolumeIDs[0]) + suite.Require().NoError(err) + + suite.RemoveMachineConfigDocumentsByName(ctx, blockcfg.UserVolumeConfigKind, volumeIDs[0]) + + rtestutils.AssertNoResource[*block.VolumeStatus](ctx, suite.T(), suite.Client.COSI, userVolumeIDs[0]) + + suite.Require().EventuallyWithT(func(collect *assert.CollectT) { + // a little retry loop, as the device might be considered busy for a little while after unmounting + asrt := assert.New(collect) + + asrt.NoError(suite.Client.BlockDeviceWipe(ctx, &storage.BlockDeviceWipeRequest{ + Devices: []*storage.BlockDeviceWipeDescriptor{ + { + Device: filepath.Base(vs.TypedSpec().Location), + Method: storage.BlockDeviceWipeDescriptor_FAST, + DropPartition: true, + }, + }, + })) + }, time.Minute, time.Second, "failed to wipe partition %s", vs.TypedSpec().Location) + + // wait for the discovered volume to lose filesystem + rtestutils.AssertResource(ctx, suite.T(), suite.Client.COSI, filepath.Base(vs.TypedSpec().Location), func(r *block.DiscoveredVolume, asrt *assert.Assertions) { + asrt.Empty(r.TypedSpec().Name) // no filesystem + }) + + // re-create the volume with project quota support + configDocs[0].(*blockcfg.UserVolumeConfigV1Alpha1).FilesystemSpec.ProjectQuotaSupportConfig = pointer.To(true) + suite.PatchMachineConfig(ctx, configDocs[0]) + + rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, + func(vs *block.VolumeStatus, asrt *assert.Assertions) { + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -650,8 +823,8 @@ func (suite *VolumesSuite) TestUserVolumesPartition() { }) } -// TestUserVolumesBind performs a series of operations on user volumes (bind type): creating, destroying, verifying, etc. -func (suite *VolumesSuite) TestUserVolumesBind() { +// TestUserVolumesDirectory performs a series of operations on user volumes: creating, destroying, verifying, etc. +func (suite *VolumesSuite) TestUserVolumesDirectory() { if testing.Short() { suite.T().Skip("skipping test in short mode.") } @@ -694,7 +867,7 @@ func (suite *VolumesSuite) TestUserVolumesBind() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -745,7 +918,7 @@ func (suite *VolumesSuite) TestUserVolumesBind() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, userVolumeIDs, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -816,7 +989,7 @@ func (suite *VolumesSuite) TestRawVolumes() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, rawVolumeIDs, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -862,7 +1035,7 @@ func (suite *VolumesSuite) TestRawVolumes() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, rawVolumeIDs, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -938,7 +1111,7 @@ func (suite *VolumesSuite) TestExistingVolumes() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, []resource.ID{userVolumeID}, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -964,7 +1137,7 @@ func (suite *VolumesSuite) TestExistingVolumes() { // wait for the existing volume to be discovered rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, []resource.ID{existingVolumeID}, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) asrt.Equal(userDisks[0], vs.TypedSpec().ParentLocation) }, ) @@ -1056,7 +1229,7 @@ func (suite *VolumesSuite) TestSwapStatus() { return sv.Metadata().ID() }), func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) @@ -1135,7 +1308,7 @@ func (suite *VolumesSuite) TestSwapOnOff() { rtestutils.AssertResources(ctx, suite.T(), suite.Client.COSI, []string{swapVolumeID}, func(vs *block.VolumeStatus, asrt *assert.Assertions) { - asrt.Equal(block.VolumePhaseReady, vs.TypedSpec().Phase) + asrt.Equalf(block.VolumePhaseReady, vs.TypedSpec().Phase, "Expected %q, but got %q (%s)", block.VolumePhaseReady, vs.TypedSpec().Phase, vs.Metadata().ID()) }, ) diff --git a/pkg/machinery/config/schemas/config.schema.json b/pkg/machinery/config/schemas/config.schema.json index bb1d862e496..d5865181c2f 100644 --- a/pkg/machinery/config/schemas/config.schema.json +++ b/pkg/machinery/config/schemas/config.schema.json @@ -451,8 +451,9 @@ }, "volumeType": { "enum": [ - "partition", - "directory" + "directory", + "disk", + "partition" ], "title": "volumeType", "description": "Volume type.\n", diff --git a/pkg/machinery/config/types/block/block_doc.go b/pkg/machinery/config/types/block/block_doc.go index f975d960a3e..13c436c1315 100644 --- a/pkg/machinery/config/types/block/block_doc.go +++ b/pkg/machinery/config/types/block/block_doc.go @@ -506,8 +506,9 @@ func (UserVolumeConfigV1Alpha1) Doc() *encoder.Doc { Description: "Volume type.", Comments: [3]string{"" /* encoder.HeadComment */, "Volume type." /* encoder.LineComment */, "" /* encoder.FootComment */}, Values: []string{ - "partition", "directory", + "disk", + "partition", }, }, { @@ -536,6 +537,8 @@ func (UserVolumeConfigV1Alpha1) Doc() *encoder.Doc { doc.AddExample("", exampleUserVolumeConfigV1Alpha1Directory()) + doc.AddExample("", exampleUserVolumeConfigV1Alpha1Disk()) + doc.AddExample("", exampleUserVolumeConfigV1Alpha1Partition()) return doc diff --git a/pkg/machinery/config/types/block/raw_volume_config.go b/pkg/machinery/config/types/block/raw_volume_config.go index ff3c696406c..d31b2bb2c8a 100644 --- a/pkg/machinery/config/types/block/raw_volume_config.go +++ b/pkg/machinery/config/types/block/raw_volume_config.go @@ -140,7 +140,7 @@ func (s *RawVolumeConfigV1Alpha1) Validate(validation.RuntimeMode, ...validation validationErrors = errors.Join(validationErrors, errors.New("name can only contain lowercase and uppercase ASCII letters, digits, and hyphens")) } - extraWarnings, extraErrors := s.ProvisioningSpec.Validate(true) + extraWarnings, extraErrors := s.ProvisioningSpec.Validate(true, true) warnings = append(warnings, extraWarnings...) validationErrors = errors.Join(validationErrors, extraErrors) diff --git a/pkg/machinery/config/types/block/swap_volume_config.go b/pkg/machinery/config/types/block/swap_volume_config.go index 257c55b9d3e..14c76c64c01 100644 --- a/pkg/machinery/config/types/block/swap_volume_config.go +++ b/pkg/machinery/config/types/block/swap_volume_config.go @@ -151,7 +151,7 @@ func (s *SwapVolumeConfigV1Alpha1) Validate(validation.RuntimeMode, ...validatio validationErrors = errors.Join(validationErrors, errors.New("name can only contain lowercase and uppercase ASCII letters, digits, and hyphens")) } - extraWarnings, extraErrors := s.ProvisioningSpec.Validate(true) + extraWarnings, extraErrors := s.ProvisioningSpec.Validate(true, true) warnings = append(warnings, extraWarnings...) validationErrors = errors.Join(validationErrors, extraErrors) diff --git a/pkg/machinery/config/types/block/user_volume_config.go b/pkg/machinery/config/types/block/user_volume_config.go index 85b61d3379b..93a852050b9 100644 --- a/pkg/machinery/config/types/block/user_volume_config.go +++ b/pkg/machinery/config/types/block/user_volume_config.go @@ -59,6 +59,7 @@ type VolumeType = block.VolumeType // The partition label is automatically generated as `u-`. // examples: // - value: exampleUserVolumeConfigV1Alpha1Directory() +// - value: exampleUserVolumeConfigV1Alpha1Disk() // - value: exampleUserVolumeConfigV1Alpha1Partition() // alias: UserVolumeConfig // schemaRoot: true @@ -75,8 +76,9 @@ type UserVolumeConfigV1Alpha1 struct { // description: | // Volume type. // values: - // - partition // - directory + // - disk + // - partition // schema: // type: string VolumeType *VolumeType `yaml:"volumeType,omitempty"` @@ -143,6 +145,37 @@ func exampleUserVolumeConfigV1Alpha1Directory() *UserVolumeConfigV1Alpha1 { return cfg } +func exampleUserVolumeConfigV1Alpha1Disk() *UserVolumeConfigV1Alpha1 { + cfg := NewUserVolumeConfigV1Alpha1() + cfg.MetaName = userVolumeName + cfg.VolumeType = pointer.To(block.VolumeTypeDisk) + cfg.ProvisioningSpec = ProvisioningSpec{ + DiskSelectorSpec: DiskSelector{ + Match: cel.MustExpression(cel.ParseBooleanExpression(`disk.transport == "nvme"`, celenv.DiskLocator())), + }, + } + cfg.FilesystemSpec = FilesystemSpec{ + FilesystemType: block.FilesystemTypeXFS, + } + cfg.EncryptionSpec = EncryptionSpec{ + EncryptionProvider: block.EncryptionProviderLUKS2, + EncryptionKeys: []EncryptionKey{ + { + KeySlot: 0, + KeyTPM: &EncryptionKeyTPM{}, + }, + { + KeySlot: 1, + KeyStatic: &EncryptionKeyStatic{ + KeyData: "topsecret", + }, + }, + }, + } + + return cfg +} + // Name implements config.NamedDocument interface. func (s *UserVolumeConfigV1Alpha1) Name() string { return s.MetaName @@ -198,8 +231,21 @@ func (s *UserVolumeConfigV1Alpha1) Validate(validation.RuntimeMode, ...validatio } switch vtype { - case block.VolumeTypePartition: - extraWarnings, extraErrors := s.ProvisioningSpec.Validate(true) + case block.VolumeTypeDirectory: + if !s.ProvisioningSpec.IsZero() { + validationErrors = errors.Join(validationErrors, errors.New("provisioning spec is invalid for volumeType directory")) + } + + if !s.EncryptionSpec.IsZero() { + validationErrors = errors.Join(validationErrors, errors.New("encryption spec is invalid for volumeType directory")) + } + + if !s.FilesystemSpec.IsZero() { + validationErrors = errors.Join(validationErrors, errors.New("filesystem spec is invalid for volumeType directory")) + } + + case block.VolumeTypeDisk: + extraWarnings, extraErrors := s.ProvisioningSpec.Validate(true, false) warnings = append(warnings, extraWarnings...) validationErrors = errors.Join(validationErrors, extraErrors) @@ -212,20 +258,21 @@ func (s *UserVolumeConfigV1Alpha1) Validate(validation.RuntimeMode, ...validatio warnings = append(warnings, extraWarnings...) validationErrors = errors.Join(validationErrors, extraErrors) - case block.VolumeTypeDirectory: - if !s.ProvisioningSpec.IsZero() { - validationErrors = errors.Join(validationErrors, errors.New("provisioning spec is invalid for volumeType directory")) - } + case block.VolumeTypePartition: + extraWarnings, extraErrors := s.ProvisioningSpec.Validate(true, true) - if !s.EncryptionSpec.IsZero() { - validationErrors = errors.Join(validationErrors, errors.New("encryption spec is invalid for volumeType directory")) - } + warnings = append(warnings, extraWarnings...) + validationErrors = errors.Join(validationErrors, extraErrors) - if !s.FilesystemSpec.IsZero() { - validationErrors = errors.Join(validationErrors, errors.New("filesystem spec is invalid for volumeType directory")) - } + extraWarnings, extraErrors = s.FilesystemSpec.Validate() + warnings = append(warnings, extraWarnings...) + validationErrors = errors.Join(validationErrors, extraErrors) + + extraWarnings, extraErrors = s.EncryptionSpec.Validate() + warnings = append(warnings, extraWarnings...) + validationErrors = errors.Join(validationErrors, extraErrors) - case block.VolumeTypeDisk, block.VolumeTypeTmpfs, block.VolumeTypeSymlink, block.VolumeTypeOverlay: + case block.VolumeTypeTmpfs, block.VolumeTypeSymlink, block.VolumeTypeOverlay: fallthrough default: diff --git a/pkg/machinery/config/types/block/user_volume_config_test.go b/pkg/machinery/config/types/block/user_volume_config_test.go index 6ec4faf457a..bf9b24f4088 100644 --- a/pkg/machinery/config/types/block/user_volume_config_test.go +++ b/pkg/machinery/config/types/block/user_volume_config_test.go @@ -340,6 +340,24 @@ func TestUserVolumeConfigValidate(t *testing.T) { expectedErrors: "encryption spec is invalid for volumeType directory", }, + { + name: "size for disk", + + cfg: func(t *testing.T) *block.UserVolumeConfigV1Alpha1 { + c := block.NewUserVolumeConfigV1Alpha1() + c.MetaName = constants.EphemeralPartitionLabel + c.VolumeType = pointer.To(blockres.VolumeTypeDisk) + + require.NoError(t, c.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`disk.size > 120u * GiB`))) + c.ProvisioningSpec.ProvisioningMaxSize = block.MustByteSize("2.5TiB") + c.ProvisioningSpec.ProvisioningMinSize = block.MustByteSize("10GiB") + c.FilesystemSpec.FilesystemType = blockres.FilesystemTypeEXT4 + + return c + }, + + expectedErrors: "min size, max size and grow are not supported", + }, { name: "filesystem spec for directory", @@ -421,6 +439,20 @@ func TestUserVolumeConfigValidate(t *testing.T) { return c }, }, + { + name: "valid disk", + + cfg: func(t *testing.T) *block.UserVolumeConfigV1Alpha1 { + c := block.NewUserVolumeConfigV1Alpha1() + c.MetaName = constants.EphemeralPartitionLabel + c.VolumeType = pointer.To(blockres.VolumeTypeDisk) + + require.NoError(t, c.ProvisioningSpec.DiskSelectorSpec.Match.UnmarshalText([]byte(`disk.size > 120u * GiB`))) + c.FilesystemSpec.FilesystemType = blockres.FilesystemTypeEXT4 + + return c + }, + }, { name: "valid encrypted", diff --git a/pkg/machinery/config/types/block/volume_config.go b/pkg/machinery/config/types/block/volume_config.go index 41cd87cbcf0..570d5e46ee1 100644 --- a/pkg/machinery/config/types/block/volume_config.go +++ b/pkg/machinery/config/types/block/volume_config.go @@ -190,7 +190,7 @@ func (s *VolumeConfigV1Alpha1) Validate(validation.RuntimeMode, ...validation.Op } } - extraWarnings, extraErrors := s.ProvisioningSpec.Validate(false) + extraWarnings, extraErrors := s.ProvisioningSpec.Validate(false, true) warnings = append(warnings, extraWarnings...) validationErrors = errors.Join(validationErrors, extraErrors) @@ -216,7 +216,9 @@ func (s *VolumeConfigV1Alpha1) Encryption() config.EncryptionConfig { } // Validate the provisioning spec. -func (s ProvisioningSpec) Validate(required bool) ([]string, error) { +// +//nolint:gocyclo +func (s ProvisioningSpec) Validate(required bool, sizeSupported bool) ([]string, error) { var validationErrors error if !s.DiskSelectorSpec.Match.IsZero() { @@ -227,12 +229,18 @@ func (s ProvisioningSpec) Validate(required bool) ([]string, error) { validationErrors = errors.Join(validationErrors, errors.New("disk selector is required")) } - if !s.ProvisioningMinSize.IsZero() && !s.ProvisioningMaxSize.IsZero() { - if s.ProvisioningMinSize.Value() > s.ProvisioningMaxSize.Value() { - validationErrors = errors.Join(validationErrors, errors.New("min size is greater than max size")) + if sizeSupported { + if !s.ProvisioningMinSize.IsZero() && !s.ProvisioningMaxSize.IsZero() { + if s.ProvisioningMinSize.Value() > s.ProvisioningMaxSize.Value() { + validationErrors = errors.Join(validationErrors, errors.New("min size is greater than max size")) + } + } else if required && s.ProvisioningMinSize.IsZero() && s.ProvisioningMaxSize.IsZero() { + validationErrors = errors.Join(validationErrors, errors.New("min size or max size is required")) + } + } else { + if !s.ProvisioningMinSize.IsZero() || !s.ProvisioningMaxSize.IsZero() || s.Grow().IsPresent() { + validationErrors = errors.Join(validationErrors, errors.New("min size, max size and grow are not supported")) } - } else if required && s.ProvisioningMinSize.IsZero() && s.ProvisioningMaxSize.IsZero() { - validationErrors = errors.Join(validationErrors, errors.New("min size or max size is required")) } return nil, validationErrors diff --git a/website/content/v1.12/reference/configuration/block/uservolumeconfig.md b/website/content/v1.12/reference/configuration/block/uservolumeconfig.md index 497a663082d..c6bb374b6d4 100644 --- a/website/content/v1.12/reference/configuration/block/uservolumeconfig.md +++ b/website/content/v1.12/reference/configuration/block/uservolumeconfig.md @@ -48,6 +48,58 @@ volumeType: directory # Volume type. # - no_write_workqueue {{< /highlight >}} +{{< highlight yaml >}} +apiVersion: v1alpha1 +kind: UserVolumeConfig +name: local-data # Name of the volume. +volumeType: disk # Volume type. +# The provisioning describes how the volume is provisioned. +provisioning: + # The disk selector expression. + diskSelector: + match: disk.transport == "nvme" # The Common Expression Language (CEL) expression to match the disk. + + # # The minimum size of the volume. + # minSize: 2.5GiB + + # # The maximum size of the volume, if not specified the volume can grow to the size of the + # maxSize: 50GiB +# The filesystem describes how the volume is formatted. +filesystem: + type: xfs # Filesystem type. Default is `xfs`. +# The encryption describes how the volume is encrypted. +encryption: + provider: luks2 # Encryption provider to use for the encryption. + # Defines the encryption keys generation and storage method. + keys: + - slot: 0 # Key slot number for LUKS2 encryption. + # Enable TPM based disk encryption. + tpm: {} + + # # KMS managed encryption key. + # kms: + # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key. + - slot: 1 # Key slot number for LUKS2 encryption. + # Key which value is stored in the configuration file. + static: + passphrase: topsecret # Defines the static passphrase value. + + # # KMS managed encryption key. + # kms: + # endpoint: https://192.168.88.21:4443 # KMS endpoint to Seal/Unseal the key. + + # # Cipher to use for the encryption. Depends on the encryption provider. + # cipher: aes-xts-plain64 + + # # Defines the encryption sector size. + # blockSize: 4096 + + # # Additional --perf parameters for the LUKS2 encryption. + # options: + # - no_read_workqueue + # - no_write_workqueue +{{< /highlight >}} + {{< highlight yaml >}} apiVersion: v1alpha1 kind: UserVolumeConfig @@ -102,7 +154,7 @@ encryption: | Field | Type | Description | Value(s) | |-------|------|-------------|----------| |`name` |string |Name of the volume.

Name might be between 1 and 34 characters long and can only contain:
lowercase and uppercase ASCII letters, digits, and hyphens. | | -|`volumeType` |VolumeType |Volume type. |`partition`
`directory`
| +|`volumeType` |VolumeType |Volume type. |`directory`
`disk`
`partition`
| |`provisioning` |ProvisioningSpec |The provisioning describes how the volume is provisioned. | | |`filesystem` |FilesystemSpec |The filesystem describes how the volume is formatted. | | |`encryption` |EncryptionSpec |The encryption describes how the volume is encrypted. | | diff --git a/website/content/v1.12/schemas/config.schema.json b/website/content/v1.12/schemas/config.schema.json index bb1d862e496..d5865181c2f 100644 --- a/website/content/v1.12/schemas/config.schema.json +++ b/website/content/v1.12/schemas/config.schema.json @@ -451,8 +451,9 @@ }, "volumeType": { "enum": [ - "partition", - "directory" + "directory", + "disk", + "partition" ], "title": "volumeType", "description": "Volume type.\n",