mirror of
https://github.com/kairos-io/kairos-agent.git
synced 2025-06-03 01:44:53 +00:00
Use the yip layout plugin (#72)
Co-authored-by: Dimitris Karakasilis <dimitris@spectrocloud.com> Co-authored-by: Itxaka <itxaka.garcia@spectrocloud.com>
This commit is contained in:
parent
68e4bd6358
commit
e336c66b3f
2
go.mod
2
go.mod
@ -18,7 +18,7 @@ require (
|
||||
github.com/mudler/go-nodepair v0.0.0-20221223092639-ba399a66fdfb
|
||||
github.com/mudler/go-pluggable v0.0.0-20230126220627-7710299a0ae5
|
||||
github.com/mudler/go-processmanager v0.0.0-20220724164624-c45b5c61312d
|
||||
github.com/mudler/yip v1.2.1-0.20230621084401-972474b4ad72
|
||||
github.com/mudler/yip v1.3.1-0.20230704124832-e5812d0f5890
|
||||
github.com/nxadm/tail v1.4.8
|
||||
github.com/onsi/ginkgo/v2 v2.9.7
|
||||
github.com/onsi/gomega v1.27.8
|
||||
|
22
go.sum
22
go.sum
@ -95,8 +95,6 @@ github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z
|
||||
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
|
||||
github.com/avast/retry-go v2.7.0+incompatible h1:XaGnzl7gESAideSjr+I8Hki/JBi+Yb9baHlMRPeSC84=
|
||||
github.com/avast/retry-go v2.7.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/avast/retry-go v3.0.0+incompatible h1:4SOWQ7Qs+oroOTQOYnAHqelpCO0biHSxpiH9JdtuBj0=
|
||||
github.com/avast/retry-go v3.0.0+incompatible/go.mod h1:XtSnn+n/sHqQIpZ10K1qAevBhOOCWBLXXy3hyiqqBrY=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59 h1:WWB576BN5zNSZc/M9d/10pqEx5VHNhaQ/yOVAkmj5Yo=
|
||||
github.com/aybabtme/rgbterm v0.0.0-20170906152045-cc83f3b3ce59/go.mod h1:q/89r3U2H7sSsE2t6Kca0lfwTK8JdoNGS/yzM/4iH5I=
|
||||
github.com/aymanbagabas/go-osc52 v1.0.3/go.mod h1:zT8H+Rk4VSabYN90pWyugflM3ZhpTZNC7cASDfUCdT4=
|
||||
@ -381,8 +379,6 @@ github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2
|
||||
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
|
||||
github.com/itchyny/gojq v0.12.12 h1:x+xGI9BXqKoJQZkr95ibpe3cdrTbY8D9lonrK433rcA=
|
||||
github.com/itchyny/gojq v0.12.12/go.mod h1:j+3sVkjxwd7A7Z5jrbKibgOLn0ZfLWkV+Awxr/pyzJE=
|
||||
github.com/itchyny/gojq v0.12.13 h1:IxyYlHYIlspQHHTE0f3cJF0NKDMfajxViuhBLnHd/QU=
|
||||
github.com/itchyny/gojq v0.12.13/go.mod h1:JzwzAqenfhrPUuwbmEz3nu3JQmFLlQTQMUcOdnu/Sf4=
|
||||
github.com/itchyny/timefmt-go v0.1.5 h1:G0INE2la8S6ru/ZI5JecgyzbbJNs5lG1RcBqa7Jm6GE=
|
||||
github.com/itchyny/timefmt-go v0.1.5/go.mod h1:nEP7L+2YmAbT2kZ2HfSs1d8Xtw9LY8D2stDBckWakZ8=
|
||||
github.com/jaypipes/ghw v0.10.0 h1:UHu9UX08Py315iPojADFPOkmjTsNzHj4g4adsNKKteY=
|
||||
@ -403,8 +399,6 @@ github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/kairos-io/kairos-sdk v0.0.8 h1:3yfxdmUuJoN7ePg+ogpH1PJvuMsLmLcxEXuWoiGdIrg=
|
||||
github.com/kairos-io/kairos-sdk v0.0.8/go.mod h1:Z+1CLqMZq97bzwX2XSIArr8EoniMth3mMYkOOb8L3QY=
|
||||
github.com/kairos-io/kairos-sdk v0.0.9-0.20230620064343-df990bf49a07 h1:WctdkLqZBl8bViFPoqnRtxU5Vf63G9c1lTLem6F3d4s=
|
||||
github.com/kairos-io/kairos-sdk v0.0.9-0.20230620064343-df990bf49a07/go.mod h1:Z+1CLqMZq97bzwX2XSIArr8EoniMth3mMYkOOb8L3QY=
|
||||
github.com/kbinani/screenshot v0.0.0-20210720154843-7d3a670d8329 h1:qq2nCpSrXrmvDGRxW0ruW9BVEV1CN2a9YDOExdt+U0o=
|
||||
@ -514,14 +508,10 @@ github.com/mudler/go-pluggable v0.0.0-20230126220627-7710299a0ae5 h1:FaZD86+A9mV
|
||||
github.com/mudler/go-pluggable v0.0.0-20230126220627-7710299a0ae5/go.mod h1:WmKcT8ONmhDQIqQ+HxU+tkGWjzBEyY/KFO8LTGCu4AI=
|
||||
github.com/mudler/go-processmanager v0.0.0-20220724164624-c45b5c61312d h1:/lAg9vPAAU+s35cDMCx1IyeMn+4OYfCBPqi08Q8vXDg=
|
||||
github.com/mudler/go-processmanager v0.0.0-20220724164624-c45b5c61312d/go.mod h1:HGGAOJhipApckwNV8ZTliRJqxctUv3xRY+zbQEwuytc=
|
||||
github.com/mudler/yip v1.1.0 h1:QQLQhD5FQ7ojaP7s7dIll6pSGnwnIplL1zGMSg5lsHQ=
|
||||
github.com/mudler/yip v1.1.0/go.mod h1:GIzGnY6+tP7kaNBsmtisdyuo4cgn/4y6bEOS3GZNtkY=
|
||||
github.com/mudler/yip v1.2.0 h1:hs6x2HDUq+0mwxKzY0SLixnv+VKYc4n3tDY/nXPVejU=
|
||||
github.com/mudler/yip v1.2.0/go.mod h1:7fAek4ZV9SS8anO6drK+tn5eXA6w1mXKpPxI0wZT5u8=
|
||||
github.com/mudler/yip v1.2.1-0.20230620134733-dcdc988cf703 h1:OnnvSX4hZPHLXLYCjXt4fVWvQLN6a4vBystwA9M/mRg=
|
||||
github.com/mudler/yip v1.2.1-0.20230620134733-dcdc988cf703/go.mod h1:xZfjwk/7n2D5iaj6IjtHmjCR1IbEyySnnoH+t3GKh7M=
|
||||
github.com/mudler/yip v1.2.1-0.20230621084401-972474b4ad72 h1:lNI5UGAHBkgInDgRYT8jC37YSg4+FGemsKf0E4wtCFs=
|
||||
github.com/mudler/yip v1.2.1-0.20230621084401-972474b4ad72/go.mod h1:3WeDh6tGX1yYPJom05E7xEjw8dNVlkH2WFxLi7Gflzk=
|
||||
github.com/mudler/yip v1.3.0 h1:MjVh4dDr/imwJ46qXGbftnLRKmDgzs0Y60WyVtXY4i4=
|
||||
github.com/mudler/yip v1.3.0/go.mod h1:3WeDh6tGX1yYPJom05E7xEjw8dNVlkH2WFxLi7Gflzk=
|
||||
github.com/mudler/yip v1.3.1-0.20230704124832-e5812d0f5890 h1:lNOwUTOH+WW8c2t1aQniYRiF8qFwRjB6XO6Nu+d9vlo=
|
||||
github.com/mudler/yip v1.3.1-0.20230704124832-e5812d0f5890/go.mod h1:3WeDh6tGX1yYPJom05E7xEjw8dNVlkH2WFxLi7Gflzk=
|
||||
github.com/muesli/ansi v0.0.0-20211018074035-2e021307bc4b/go.mod h1:fQuZ0gauxyBcmsdE3ZT4NasjaRdxmbCS0jRHsrWu3Ho=
|
||||
github.com/muesli/ansi v0.0.0-20221106050444-61f0cd9a192a h1:jlDOeO5TU0pYlbc/y6PFguab5IjANI0Knrpg3u/ton4=
|
||||
github.com/muesli/ansi v0.0.0-20221106050444-61f0cd9a192a/go.mod h1:CJlz5H+gyd6CUWT45Oy4q24RdLyn7Md9Vj2/ldJBSIo=
|
||||
@ -609,8 +599,6 @@ github.com/pterm/pterm v0.12.62 h1:Xjj5Wl6UR4Il9xOiDUOZRwReRTdO75if/JdWsn9I59s=
|
||||
github.com/pterm/pterm v0.12.62/go.mod h1:+c3ujjE7N5qmNx6eKAa7YVSC6m/gCorJJKhzwYTbL90=
|
||||
github.com/qeesung/image2ascii v1.0.1 h1:Fe5zTnX/v/qNC3OC4P/cfASOXS501Xyw2UUcgrLgtp4=
|
||||
github.com/qeesung/image2ascii v1.0.1/go.mod h1:kZKhyX0h2g/YXa/zdJR3JnLnJ8avHjZ3LrvEKSYyAyU=
|
||||
github.com/rancher-sandbox/linuxkit v1.0.0 h1:ejEKyLWfByMkwzpmcSQLc5/RL3FtiKRpIgY+TUjFpaM=
|
||||
github.com/rancher-sandbox/linuxkit v1.0.0/go.mod h1:n6Fkjc5qoMeWrnLSA5oqUF8ZzFKMrM960CtBwfvH1ZM=
|
||||
github.com/rancher-sandbox/linuxkit v1.0.1-0.20230517173613-432a87ba3e09 h1:/yNp//3ZC5J7KUaUPDmomQ78j8VUD/2T/uT+TvS4M0w=
|
||||
github.com/rancher-sandbox/linuxkit v1.0.1-0.20230517173613-432a87ba3e09/go.mod h1:n6Fkjc5qoMeWrnLSA5oqUF8ZzFKMrM960CtBwfvH1ZM=
|
||||
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||
@ -755,8 +743,6 @@ github.com/yuin/goldmark v1.3.5/go.mod h1:mwnBkeHKe2W/ZEtQ+71ViKU8L12m81fl3OWwC1
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
github.com/zcalusic/sysinfo v0.9.5 h1:ivoHyj9aIAYkwzo1+8QgJ5s4oeE6Etx9FmZtqa4wJjQ=
|
||||
github.com/zcalusic/sysinfo v0.9.5/go.mod h1:Z/gPVufBrFc8X5sef3m6kkw3r3nlNFp+I6bvASfvBZQ=
|
||||
github.com/zloylos/grsync v1.7.0 h1:7JjxC4CdzA7Inh771VelfUWdxIiMcXCXF5qV1Vx+W6E=
|
||||
github.com/zloylos/grsync v1.7.0/go.mod h1:0Ue43fnWwx3doC5GkfmwmUwCAvQ54h06FRHXXX3ZWls=
|
||||
go.etcd.io/bbolt v1.3.2/go.mod h1:IbVyRI1SCnLcuJnV2u8VeU0CEYM7e686BmAb1XKL+uU=
|
||||
go.etcd.io/etcd/api/v3 v3.5.0/go.mod h1:cbVKeC6lCfl7j/8jBhAK6aIYO9XOjdptoxU/nLQcPvs=
|
||||
go.etcd.io/etcd/client/pkg/v3 v3.5.0/go.mod h1:IJHfcCEKxYu1Os13ZdwCwIUTUVGYTSAM3YSwc9/Ac1g=
|
||||
|
@ -59,7 +59,7 @@ func NewYipCloudInitRunner(l v1.Logger, r v1.Runner, fs vfs.FS) *YipCloudInitRun
|
||||
plugins.Environment,
|
||||
plugins.SystemdFirstboot,
|
||||
plugins.DataSources,
|
||||
layoutPlugin,
|
||||
plugins.Layout,
|
||||
),
|
||||
)
|
||||
return &YipCloudInitRunner{
|
||||
|
@ -17,6 +17,7 @@ limitations under the License.
|
||||
package cloudinit_test
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
@ -102,8 +103,12 @@ stages:
|
||||
var device, cmdFail string
|
||||
var partNum int
|
||||
var cleanup func()
|
||||
logger := v1.NewNullLogger()
|
||||
var logs *bytes.Buffer
|
||||
var logger v1.Logger
|
||||
BeforeEach(func() {
|
||||
logs = &bytes.Buffer{}
|
||||
logger = v1.NewBufferLogger(logs)
|
||||
|
||||
afs, cleanup, _ = vfst.NewTestFS(nil)
|
||||
err := utils.MkdirAll(afs, "/some/yip", constants.DirPerm)
|
||||
Expect(err).To(BeNil())
|
||||
@ -231,7 +236,9 @@ stages:
|
||||
`, device)), constants.FilePerm)
|
||||
Expect(err).To(BeNil())
|
||||
cloudRunner := NewYipCloudInitRunner(logger, runner, afs)
|
||||
Expect(cloudRunner.Run("test", "/some/yip")).NotTo(BeNil())
|
||||
err = cloudRunner.Run("test", "/some/yip")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(logs.String()).To(MatchRegexp("Could not verify /dev/device is a block device"))
|
||||
})
|
||||
It("Fails to expand last partition", func() {
|
||||
partNum = 3
|
||||
@ -250,7 +257,10 @@ stages:
|
||||
`, device)), constants.FilePerm)
|
||||
Expect(err).To(BeNil())
|
||||
cloudRunner := NewYipCloudInitRunner(logger, runner, afs)
|
||||
Expect(cloudRunner.Run("test", "/some/yip")).NotTo(BeNil())
|
||||
err = cloudRunner.Run("test", "/some/yip")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// TODO: Is this the error we should be expecting?
|
||||
Expect(logs.String()).To(MatchRegexp("Could not verify /dev/device is a block device"))
|
||||
})
|
||||
It("Fails to find device by path", func() {
|
||||
err := afs.WriteFile("/some/yip/layout.yaml", []byte(`
|
||||
@ -263,7 +273,9 @@ stages:
|
||||
`), constants.FilePerm)
|
||||
Expect(err).To(BeNil())
|
||||
cloudRunner := NewYipCloudInitRunner(logger, runner, afs)
|
||||
Expect(cloudRunner.Run("test", "/some/yip")).NotTo(BeNil())
|
||||
err = cloudRunner.Run("test", "/some/yip")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(logs.String()).To(MatchRegexp("Could not verify /whatever is a block device"))
|
||||
})
|
||||
It("Fails to find device by label", func() {
|
||||
err := afs.WriteFile("/some/yip/layout.yaml", []byte(`
|
||||
@ -276,7 +288,9 @@ stages:
|
||||
`), constants.FilePerm)
|
||||
Expect(err).To(BeNil())
|
||||
cloudRunner := NewYipCloudInitRunner(logger, runner, afs)
|
||||
Expect(cloudRunner.Run("test", "/some/yip")).NotTo(BeNil())
|
||||
err = cloudRunner.Run("test", "/some/yip")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
Expect(logs.String()).To(MatchRegexp("Could not find device for the given label"))
|
||||
})
|
||||
})
|
||||
})
|
||||
|
@ -1,117 +0,0 @@
|
||||
/*
|
||||
Copyright © 2022 SUSE LLC
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cloudinit
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/kairos-io/kairos/v2/pkg/constants"
|
||||
"github.com/kairos-io/kairos/v2/pkg/partitioner"
|
||||
v1 "github.com/kairos-io/kairos/v2/pkg/types/v1"
|
||||
"github.com/kairos-io/kairos/v2/pkg/utils"
|
||||
"github.com/mudler/yip/pkg/logger"
|
||||
"github.com/mudler/yip/pkg/plugins"
|
||||
"github.com/mudler/yip/pkg/schema"
|
||||
"github.com/twpayne/go-vfs"
|
||||
)
|
||||
|
||||
// layoutPlugin is the elemental's implementation of Layout yip's plugin based
|
||||
// on partitioner package
|
||||
func layoutPlugin(l logger.Interface, s schema.Stage, fs vfs.FS, console plugins.Console) (err error) {
|
||||
if s.Layout.Device == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var dev *partitioner.Disk
|
||||
elemConsole, ok := console.(*cloudInitConsole)
|
||||
if !ok {
|
||||
return errors.New("provided console is not an instance of 'cloudInitConsole' type")
|
||||
}
|
||||
runner := elemConsole.getRunner()
|
||||
log, ok := l.(v1.Logger)
|
||||
if !ok {
|
||||
return errors.New("provided logger is not implementing v1.Logger interface")
|
||||
}
|
||||
|
||||
if len(strings.TrimSpace(s.Layout.Device.Label)) > 0 {
|
||||
partDevice, err := utils.GetFullDeviceByLabel(runner, s.Layout.Device.Label, 5)
|
||||
if err != nil {
|
||||
l.Errorf("Exiting, disk not found:\n %s", err.Error())
|
||||
return err
|
||||
}
|
||||
dev = partitioner.NewDisk(
|
||||
partDevice.Disk,
|
||||
partitioner.WithRunner(runner),
|
||||
partitioner.WithLogger(log),
|
||||
partitioner.WithFS(fs),
|
||||
)
|
||||
} else if len(strings.TrimSpace(s.Layout.Device.Path)) > 0 {
|
||||
dev = partitioner.NewDisk(
|
||||
s.Layout.Device.Path,
|
||||
partitioner.WithRunner(runner),
|
||||
partitioner.WithLogger(log),
|
||||
partitioner.WithFS(fs),
|
||||
)
|
||||
} else {
|
||||
l.Warnf("No target device defined, nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
if !dev.Exists() {
|
||||
l.Errorf("Exiting, disk not found:\n %s", s.Layout.Device.Path)
|
||||
return errors.New("Target disk not found")
|
||||
}
|
||||
|
||||
for _, part := range s.Layout.Parts {
|
||||
_, err := utils.GetFullDeviceByLabel(runner, part.FSLabel, 1)
|
||||
if err == nil {
|
||||
l.Warnf("Partition with FSLabel: %s already exists, ignoring", part.FSLabel)
|
||||
continue
|
||||
}
|
||||
|
||||
// Set default filesystem
|
||||
if part.FileSystem == "" {
|
||||
part.FileSystem = constants.LinuxFs
|
||||
}
|
||||
|
||||
l.Infof("Creating %s partition", part.FSLabel)
|
||||
partNum, err := dev.AddPartition(part.Size, part.FileSystem, part.PLabel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Failed creating partitions: %w", err)
|
||||
}
|
||||
|
||||
if part.FileSystem != "-" {
|
||||
out, err := dev.FormatPartition(partNum, part.FileSystem, part.FSLabel)
|
||||
if err != nil {
|
||||
return fmt.Errorf("Formatting partition failed: %s\nError: %w", out, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if s.Layout.Expand != nil {
|
||||
l.Infof("Extending last partition up to %d MiB", s.Layout.Expand.Size)
|
||||
out, err := dev.ExpandLastPartition(s.Layout.Expand.Size)
|
||||
if err != nil {
|
||||
l.Error(out)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
@ -1,403 +0,0 @@
|
||||
package cloudinit
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/jaypipes/ghw/pkg/block"
|
||||
"github.com/kairos-io/kairos/v2/pkg/partitioner"
|
||||
v1 "github.com/kairos-io/kairos/v2/pkg/types/v1"
|
||||
v1mock "github.com/kairos-io/kairos/v2/tests/mocks"
|
||||
"github.com/mudler/yip/pkg/schema"
|
||||
. "github.com/onsi/ginkgo/v2"
|
||||
. "github.com/onsi/gomega"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/twpayne/go-vfs"
|
||||
"github.com/twpayne/go-vfs/vfst"
|
||||
)
|
||||
|
||||
var _ = Describe("Layout", Label("layout"), func() {
|
||||
// unit test stolen from yip
|
||||
var logger v1.Logger
|
||||
var stage schema.Stage
|
||||
var fs vfs.FS
|
||||
var console *cloudInitConsole
|
||||
var runner *v1mock.FakeRunner
|
||||
var ghwTest v1mock.GhwMock
|
||||
var defaultSizeForTest uint
|
||||
var device string
|
||||
|
||||
BeforeEach(func() {
|
||||
device = "/dev/device"
|
||||
defaultSizeForTest = 100
|
||||
logger = v1.NewLogger()
|
||||
logger.SetLevel(logrus.DebugLevel)
|
||||
fs, _, _ = vfst.NewTestFS(map[string]interface{}{device: ""})
|
||||
runner = v1mock.NewFakeRunner()
|
||||
console = newCloudInitConsole(logger, runner)
|
||||
mainDisk := block.Disk{
|
||||
Name: "device",
|
||||
Partitions: []*block.Partition{
|
||||
{
|
||||
Name: "device1",
|
||||
FilesystemLabel: "FAKE",
|
||||
Type: "ext4",
|
||||
MountPoint: "/mnt/fake",
|
||||
SizeBytes: 0,
|
||||
},
|
||||
{
|
||||
Name: "device2",
|
||||
FilesystemLabel: "FAKE",
|
||||
Type: "ext4",
|
||||
MountPoint: "/mnt/fake",
|
||||
SizeBytes: 0,
|
||||
},
|
||||
},
|
||||
}
|
||||
ghwTest = v1mock.GhwMock{}
|
||||
ghwTest.AddDisk(mainDisk)
|
||||
ghwTest.CreateDevices()
|
||||
})
|
||||
|
||||
Describe("Expand partition", Label("expand"), func() {
|
||||
BeforeEach(func() {
|
||||
partition := "/dev/device1"
|
||||
|
||||
layout := schema.Layout{
|
||||
Device: &schema.Device{
|
||||
Label: "FAKE",
|
||||
Path: device,
|
||||
},
|
||||
Expand: &schema.Expand{Size: defaultSizeForTest},
|
||||
Parts: []schema.Partition{},
|
||||
}
|
||||
stage = schema.Stage{
|
||||
Layout: layout,
|
||||
}
|
||||
|
||||
runner.SideEffect = func(command string, args ...string) ([]byte, error) {
|
||||
if command == "parted" && args[4] == "unit" && args[5] == "s" && args[6] == "print" {
|
||||
/*
|
||||
|
||||
Getting free sectors is called by running:
|
||||
`parted --script --machine -- /dev/device unit s print`
|
||||
And returns the following:
|
||||
BYT;
|
||||
/dev/nvme0n1:7814037168s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
1:2048s:206847s:204800s:fat32:EFI System Partition:boot, esp, no_automount;
|
||||
2:206848s:239615s:32768s::Microsoft reserved partition:msftres, no_automount;
|
||||
3:239616s:2046941183s:2046701568s:ntfs:Basic data partition:msftdata;
|
||||
4:2046941184s:2048237567s:1296384s:ntfs::hidden, diag, no_automount;
|
||||
5:2048237568s:2050334719s:2097152s:ext4::;
|
||||
6:2050334720s:7814035455s:5763700736s:btrfs::;
|
||||
|
||||
So it's the device and its total sectors and picks the last partition and its final sector.
|
||||
In this case:
|
||||
/dev/nvme0n1:7814037168s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
^device ^total sectors
|
||||
6:2050334720s:7814035455s:5763700736s:btrfs::;
|
||||
^partition ^end sector
|
||||
|
||||
And you rest (total - end secor of last partition) to know how many free sectors there are.
|
||||
At least 20480 sectors are needed to expand properly
|
||||
*/
|
||||
// Return 1.000.000 total sectors - 1000 used by the partition
|
||||
rtn := `
|
||||
BYT;
|
||||
/dev/device:1000000s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
1:0s:1000s:0s:ext4::;`
|
||||
return []byte(rtn), nil
|
||||
}
|
||||
// removing the first partition and creating a new one
|
||||
if command == "parted" && len(args) == 13 {
|
||||
if args[6] == "rm" && args[7] == "1" && args[8] == "mkpart" {
|
||||
// Create the device
|
||||
_, err := fs.Create(partition)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
})
|
||||
|
||||
AfterEach(func() {
|
||||
ghwTest.Clean()
|
||||
})
|
||||
|
||||
It("Expands latest partition", func() {
|
||||
err := layoutPlugin(logger, stage, fs, console)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// This is the sector size that it's going to be passed to parted to increase the new partition size
|
||||
// Remember to remove 1 last sector, don't ask me why
|
||||
Sectors := partitioner.MiBToSectors(defaultSizeForTest, 512) - 1
|
||||
// Check that it tried to delete+create and check the new fs for the new partition and resize it
|
||||
Expect(runner.IncludesCmds([][]string{
|
||||
{"udevadm", "settle"},
|
||||
{"parted", "--script", "--machine", "--", "/dev/device", "unit", "s", "print"},
|
||||
{"parted", "--script", "--machine", "--", "/dev/device", "unit", "s", "rm", "1", "mkpart", "part1", "", "0", strconv.Itoa(int(Sectors))},
|
||||
{"e2fsck", "-fy", "/dev/device1"},
|
||||
{"resize2fs", "/dev/device1"},
|
||||
})).ToNot(HaveOccurred())
|
||||
})
|
||||
It("Fails if there is not enough space", func() {
|
||||
// Override runner side effect to return 0 sectors when asked
|
||||
runner.SideEffect = func(command string, args ...string) ([]byte, error) {
|
||||
if command == "parted" && args[4] == "unit" && args[5] == "s" && args[6] == "print" {
|
||||
rtn := `
|
||||
BYT;
|
||||
/dev/device:1000000s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
1:0s:1000000s:0s:ext4::;`
|
||||
return []byte(rtn), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
err := layoutPlugin(logger, stage, fs, console)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("not enough free space"))
|
||||
})
|
||||
It("Fails if device doesnt exists", func() {
|
||||
// Override runner side effect to return 0 sectors when asked
|
||||
_ = fs.RemoveAll("/dev/device")
|
||||
err := layoutPlugin(logger, stage, fs, console)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("Target disk not found"))
|
||||
})
|
||||
It("Fails if new device didnt get created", func() {
|
||||
// Override runner side effect to return error when partition is recreated
|
||||
runner.SideEffect = func(command string, args ...string) ([]byte, error) {
|
||||
if command == "parted" && args[4] == "unit" && args[5] == "s" && args[6] == "print" {
|
||||
rtn := `
|
||||
BYT;
|
||||
/dev/device:1000000s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
1:0s:1000s:0s:ext4::;`
|
||||
return []byte(rtn), nil
|
||||
}
|
||||
// removing the first partition and creating a new one
|
||||
if command == "parted" && len(args) == 13 {
|
||||
if args[6] == "rm" && args[7] == "1" && args[8] == "mkpart" {
|
||||
// return an error
|
||||
return nil, fmt.Errorf("failed")
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
err := layoutPlugin(logger, stage, fs, console)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("failed"))
|
||||
})
|
||||
It("Fails if new device didnt get created, even when command didnt return an error", func() {
|
||||
// Override runner side effect to return error when partition is recreated
|
||||
runner.SideEffect = func(command string, args ...string) ([]byte, error) {
|
||||
if command == "parted" && args[4] == "unit" && args[5] == "s" && args[6] == "print" {
|
||||
rtn := `
|
||||
BYT;
|
||||
/dev/device:1000000s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
1:0s:1000s:0s:ext4::;`
|
||||
return []byte(rtn), nil
|
||||
}
|
||||
// removing the first partition and creating a new one
|
||||
if command == "parted" && len(args) == 13 {
|
||||
if args[6] == "rm" && args[7] == "1" && args[8] == "mkpart" {
|
||||
// Do nothing like the command failed
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
err := layoutPlugin(logger, stage, fs, console)
|
||||
Expect(err).To(HaveOccurred())
|
||||
Expect(err.Error()).To(ContainSubstring("could not find partition device"))
|
||||
Expect(err.Error()).To(ContainSubstring("/dev/device1"))
|
||||
})
|
||||
})
|
||||
|
||||
Describe("Add partitions", Label("add", "partitions"), func() {
|
||||
BeforeEach(func() {
|
||||
runner.SideEffect = func(command string, args ...string) ([]byte, error) {
|
||||
if command == "parted" && args[4] == "unit" && args[5] == "s" && args[6] == "print" {
|
||||
rtn := `
|
||||
BYT;
|
||||
/dev/device:1000000s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
1:0s:1000s:0s:ext4::;`
|
||||
return []byte(rtn), nil
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
})
|
||||
AfterEach(func() {
|
||||
ghwTest.Clean()
|
||||
})
|
||||
It("Adds one partition", func() {
|
||||
fslabel := "jojo"
|
||||
fstype := "ext3"
|
||||
plabel := "dio"
|
||||
|
||||
layout := schema.Layout{
|
||||
Device: &schema.Device{
|
||||
Label: "FAKE",
|
||||
Path: device,
|
||||
},
|
||||
Parts: []schema.Partition{
|
||||
{
|
||||
Size: defaultSizeForTest,
|
||||
FSLabel: fslabel,
|
||||
FileSystem: fstype,
|
||||
PLabel: plabel,
|
||||
},
|
||||
},
|
||||
}
|
||||
stage = schema.Stage{
|
||||
Layout: layout,
|
||||
}
|
||||
runner.SideEffect = func(command string, args ...string) ([]byte, error) {
|
||||
if command == "parted" && args[4] == "unit" && args[5] == "s" && args[6] == "print" {
|
||||
rtn := `
|
||||
BYT;
|
||||
/dev/device:1000000s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;
|
||||
1:0s:1000s:0s:ext4::;`
|
||||
return []byte(rtn), nil
|
||||
}
|
||||
// removing the first partition and creating a new one
|
||||
if command == "parted" && len(args) == 11 {
|
||||
// creating partition with our given label and fs type
|
||||
if args[6] == "mkpart" && args[7] == plabel && args[8] == fstype {
|
||||
logger.Info("Creating part")
|
||||
//Create the device
|
||||
_, err := fs.Create("/dev/device2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
err := layoutPlugin(logger, stage, fs, console)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// Because this is adding a new partition and according to our fake parted the first partitions occupies 1000 sectors
|
||||
// We need to sum 1000 sectors to this number to calculate the sectors passed to parted
|
||||
// As parted will create the new partition from sector 1001 to MBsToSectors+1001
|
||||
Sectors := partitioner.MiBToSectors(defaultSizeForTest, 512) - 1 + 1001
|
||||
// Checks that commands to create the new partition were called with the proper fs, size and labels
|
||||
Expect(runner.IncludesCmds([][]string{
|
||||
{"udevadm", "settle"},
|
||||
{"parted", "--script", "--machine", "--", "/dev/device", "unit", "s", "mkpart", plabel, fstype, "1001", strconv.Itoa(int(Sectors))},
|
||||
{"mkfs.ext3", "-L", fslabel, "/dev/device2"},
|
||||
})).ToNot(HaveOccurred())
|
||||
})
|
||||
|
||||
It("Adds multiple partitions", func() {
|
||||
partitions := []schema.Partition{
|
||||
{
|
||||
Size: 100,
|
||||
FSLabel: "fs-label-part-1",
|
||||
FileSystem: "ext3",
|
||||
PLabel: "label-part-1",
|
||||
},
|
||||
{
|
||||
Size: 120,
|
||||
FSLabel: "fs-label-part-2",
|
||||
FileSystem: "ext4",
|
||||
PLabel: "label-part-2",
|
||||
},
|
||||
}
|
||||
|
||||
layout := schema.Layout{
|
||||
Device: &schema.Device{
|
||||
Label: "FAKE",
|
||||
Path: device,
|
||||
},
|
||||
Parts: partitions,
|
||||
Expand: &schema.Expand{Size: 0}, // Expand to the end of disk
|
||||
}
|
||||
stage = schema.Stage{
|
||||
Layout: layout,
|
||||
}
|
||||
|
||||
type partitionData struct {
|
||||
StartSector int
|
||||
EndSector int
|
||||
TotalSectors int
|
||||
PartitionNumber int
|
||||
Filesystem string
|
||||
PLabel string
|
||||
}
|
||||
createdPartitions := []partitionData{}
|
||||
|
||||
runner.SideEffect = func(command string, args ...string) ([]byte, error) {
|
||||
if command == "parted" && args[4] == "unit" && args[5] == "s" && args[6] == "print" {
|
||||
rtn := `
|
||||
BYT;
|
||||
/dev/device:1000000s:nvme:512:512:gpt:KINGSTON SFYRD4000G:;`
|
||||
for _, p := range createdPartitions {
|
||||
rtn += fmt.Sprintf("\n%d:%ds:%ds:%ds:%s::;", p.PartitionNumber, p.StartSector, p.EndSector, p.TotalSectors, p.Filesystem)
|
||||
}
|
||||
|
||||
return []byte(rtn), nil
|
||||
}
|
||||
|
||||
// removing the first partition and creating a new one
|
||||
if command == "parted" && len(args) == 11 {
|
||||
// creating partition with our given label and fs type
|
||||
if args[6] == "mkpart" {
|
||||
endSector, err := strconv.Atoi(args[10])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
startSector, err := strconv.Atoi(args[9])
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
newPart := partitionData{
|
||||
StartSector: startSector,
|
||||
EndSector: endSector,
|
||||
TotalSectors: endSector - startSector,
|
||||
PartitionNumber: len(createdPartitions) + 1,
|
||||
Filesystem: args[8],
|
||||
PLabel: args[7],
|
||||
}
|
||||
|
||||
createdPartitions = append(createdPartitions, newPart)
|
||||
_, err = fs.Create(fmt.Sprintf("/dev/device%d", newPart.PartitionNumber))
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
// removing the first partition and creating a new one (expand)
|
||||
if command == "parted" && len(args) == 13 {
|
||||
if args[6] == "rm" && args[7] == "2" && args[8] == "mkpart" {
|
||||
// Create the device
|
||||
_, err := fs.Create("/dev/device2")
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
// Normally we would update these to match the truth:
|
||||
//createdPartitions[1].EndSector = 1000000
|
||||
//createdPartitions[1].TotalSectors = createdPartitions[1].EndSector - createdPartitions[1].StartSector
|
||||
// but the test below, needs the old value to check if the command
|
||||
// that created the first version of the partition was run (using the old EndSector)
|
||||
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return nil, nil
|
||||
}
|
||||
err := layoutPlugin(logger, stage, fs, console)
|
||||
Expect(err).ToNot(HaveOccurred())
|
||||
|
||||
Expect(len(createdPartitions)).To(Equal(len(partitions)))
|
||||
// Checks that commands to create the new partition were called with the proper fs, size and labels
|
||||
partedCmds := [][]string{}
|
||||
for _, p := range createdPartitions {
|
||||
partedCmds = append(partedCmds, []string{
|
||||
"parted", "--script", "--machine", "--", "/dev/device", "unit", "s", "mkpart", p.PLabel, p.Filesystem, strconv.Itoa(p.StartSector), strconv.Itoa(p.EndSector),
|
||||
})
|
||||
partedCmds = append(partedCmds, []string{
|
||||
fmt.Sprintf("mkfs.%s", p.Filesystem), "-L", fmt.Sprintf("fs-%s", p.PLabel), fmt.Sprintf("/dev/device%d", p.PartitionNumber),
|
||||
})
|
||||
}
|
||||
|
||||
Expect(runner.IncludesCmds(partedCmds)).ToNot(HaveOccurred())
|
||||
|
||||
Expect(runner.IncludesCmds([][]string{
|
||||
{"parted", "--script", "--machine", "--", "/dev/device", "unit", "s", "rm", "2", "mkpart", "part2", "", strconv.Itoa(createdPartitions[1].StartSector), "100%"},
|
||||
{"e2fsck", "-fy", "/dev/device2"},
|
||||
{"resize2fs", "/dev/device2"},
|
||||
})).ToNot(HaveOccurred())
|
||||
})
|
||||
})
|
||||
})
|
@ -367,18 +367,19 @@ func (dev *Disk) ExpandLastPartition(size uint) (string, error) {
|
||||
}
|
||||
|
||||
part := dev.parts[len(dev.parts)-1]
|
||||
var sizeSectors uint
|
||||
if size > 0 {
|
||||
size = MiBToSectors(size, dev.sectorS)
|
||||
sizeSectors = MiBToSectors(size, dev.sectorS)
|
||||
part := dev.parts[len(dev.parts)-1]
|
||||
if size < part.SizeS {
|
||||
if sizeSectors < part.SizeS {
|
||||
return "", errors.New("Layout plugin can only expand a partition, not shrink it")
|
||||
}
|
||||
freeS := dev.computeFreeSpaceWithoutLast()
|
||||
if size > freeS {
|
||||
if sizeSectors > freeS {
|
||||
return "", fmt.Errorf("not enough free space for to expand last partition up to %d sectors", size)
|
||||
}
|
||||
}
|
||||
part.SizeS = size
|
||||
part.SizeS = sizeSectors
|
||||
pc.DeletePartition(part.Number)
|
||||
pc.CreatePartition(&part)
|
||||
out, err := pc.WriteChanges()
|
||||
|
Loading…
Reference in New Issue
Block a user