-
Notifications
You must be signed in to change notification settings - Fork 174
Add migration for groups with invalid values #3789
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,55 @@ | ||
| package binenc | ||
|
|
||
| import ( | ||
| "bytes" | ||
| "encoding/binary" | ||
| "io" | ||
|
|
||
| "github.com/pkg/errors" | ||
| ) | ||
|
|
||
| // WriteBytesList takes a list of byte slices and writes them in encoded form to a writer. | ||
| func WriteBytesList(w io.Writer, byteSlices ...[]byte) (int, error) { | ||
| var total int | ||
| for _, byteSlice := range byteSlices { | ||
| n, err := WriteUVarInt(w, uint64(len(byteSlice))) | ||
| total += n | ||
| if err != nil { | ||
| return total, err | ||
| } | ||
|
|
||
| n, err = w.Write(byteSlice) | ||
| total += n | ||
| if err != nil { | ||
| return total, err | ||
| } | ||
| } | ||
| return total, nil | ||
| } | ||
|
|
||
| // EncodeBytesList takes a list of byte slices and encodes them into a single byte slice. | ||
| func EncodeBytesList(byteSlices ...[]byte) []byte { | ||
| var buf bytes.Buffer | ||
| _, _ = WriteBytesList(&buf, byteSlices...) | ||
| return buf.Bytes() | ||
| } | ||
|
|
||
| // DecodeBytesList takes a byte buffer encoded via EncodeBytesList or WriteBytesList and decodes it into a list of byte | ||
| // slices. | ||
| func DecodeBytesList(buf []byte) ([][]byte, error) { | ||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Who is using it?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Same as above. |
||
| var result [][]byte | ||
|
|
||
| for len(buf) > 0 { | ||
| sliceLen, l := binary.Uvarint(buf) | ||
| if l <= 0 { | ||
| return nil, errors.New("invalid varint in buffer") | ||
| } | ||
| buf = buf[l:] | ||
| if sliceLen > uint64(len(buf)) { | ||
| return nil, errors.Errorf("encountered varint %d which is larger than the remaining buffer size (%d)", sliceLen, len(buf)) | ||
| } | ||
| result = append(result, buf[:sliceLen]) | ||
| buf = buf[sliceLen:] | ||
| } | ||
| return result, nil | ||
| } | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,41 @@ | ||
| package binenc | ||
|
|
||
| import "encoding/binary" | ||
|
|
||
| // UintEncoder allows encoding uint values directly to byte slices. | ||
| type UintEncoder interface { | ||
| binary.ByteOrder | ||
|
|
||
| EncodeUint16(x uint16) []byte | ||
| EncodeUint32(x uint32) []byte | ||
| EncodeUint64(x uint64) []byte | ||
| } | ||
|
|
||
| var ( | ||
| // BigEndian provides encoding functions for the big endian byte order. | ||
| BigEndian = uintEncoder{ByteOrder: binary.BigEndian} | ||
| // LittleEndian provides encoding functions for the little endian byte order. | ||
| LittleEndian = uintEncoder{ByteOrder: binary.LittleEndian} | ||
| ) | ||
|
|
||
| type uintEncoder struct { | ||
| binary.ByteOrder | ||
| } | ||
|
|
||
| func (e uintEncoder) EncodeUint16(x uint16) []byte { | ||
| buf := make([]byte, 2) | ||
| e.PutUint16(buf, x) | ||
| return buf | ||
| } | ||
|
|
||
| func (e uintEncoder) EncodeUint32(x uint32) []byte { | ||
| buf := make([]byte, 4) | ||
| e.PutUint32(buf, x) | ||
| return buf | ||
| } | ||
|
|
||
| func (e uintEncoder) EncodeUint64(x uint64) []byte { | ||
| buf := make([]byte, 8) | ||
| e.PutUint64(buf, x) | ||
| return buf | ||
| } |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,14 @@ | ||
| package binenc | ||
|
|
||
| import ( | ||
| "encoding/binary" | ||
| "io" | ||
| ) | ||
|
|
||
| // WriteUVarInt writes the given unsigned integer in its varint representation to the specified writer. The return value | ||
| // is the result of calling `Write` with the corresponding byte buffer. | ||
| func WriteUVarInt(w io.Writer, x uint64) (int, error) { | ||
| var buf [binary.MaxVarintLen64]byte | ||
| l := binary.PutUvarint(buf[:], x) | ||
| return w.Write(buf[:l]) | ||
| } |
| Original file line number | Diff line number | Diff line change | ||||||||||||
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
| @@ -0,0 +1,102 @@ | ||||||||||||||
| package m111tom112 | ||||||||||||||
|
|
||||||||||||||
| import ( | ||||||||||||||
| "bytes" | ||||||||||||||
|
|
||||||||||||||
| "github.com/hashicorp/go-multierror" | ||||||||||||||
| "github.com/pkg/errors" | ||||||||||||||
| "github.com/stackrox/rox/generated/storage" | ||||||||||||||
| "github.com/stackrox/rox/migrator/migrations" | ||||||||||||||
| "github.com/stackrox/rox/migrator/types" | ||||||||||||||
| bolt "go.etcd.io/bbolt" | ||||||||||||||
| ) | ||||||||||||||
|
|
||||||||||||||
| // groupStoredByCompositeKey is a helper struct which contains the group as well as the composite key. | ||||||||||||||
| type groupStoredByCompositeKey struct { | ||||||||||||||
| grp *storage.Group | ||||||||||||||
| compositeKey []byte | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| var ( | ||||||||||||||
| bucketName = []byte("groups2") | ||||||||||||||
|
|
||||||||||||||
| emptyPropertiesCompositeKey = serializePropsKey(nil) | ||||||||||||||
|
|
||||||||||||||
| migration = types.Migration{ | ||||||||||||||
| StartingSeqNum: 111, | ||||||||||||||
| VersionAfter: &storage.Version{SeqNum: 112}, | ||||||||||||||
| Run: func(databases *types.Databases) error { | ||||||||||||||
| return removeGroupsWithInvalidValues(databases.BoltDB) | ||||||||||||||
| }, | ||||||||||||||
| } | ||||||||||||||
| ) | ||||||||||||||
|
|
||||||||||||||
| func init() { | ||||||||||||||
| migrations.MustRegisterMigration(migration) | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| func removeGroupsWithInvalidValues(db *bolt.DB) error { | ||||||||||||||
| groupsWithInvalidValues, err := fetchGroupsToRemove(db) | ||||||||||||||
| if err != nil { | ||||||||||||||
| return errors.Wrap(err, "error fetching groups to remove") | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| if err := removeGroupsStoredByCompositeKey(db, groupsWithInvalidValues); err != nil { | ||||||||||||||
| return errors.Wrap(err, "error removing groups with invalid values") | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| return nil | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| func fetchGroupsToRemove(db *bolt.DB) (groupsStoredByCompositeKey []groupStoredByCompositeKey, err error) { | ||||||||||||||
| err = db.View(func(tx *bolt.Tx) error { | ||||||||||||||
| bucket := tx.Bucket(bucketName) | ||||||||||||||
| // Pre-req: Migrating a non-existent bucket should not fail. | ||||||||||||||
| if bucket == nil { | ||||||||||||||
| return nil | ||||||||||||||
| } | ||||||||||||||
| return bucket.ForEach(func(k, v []byte) error { | ||||||||||||||
| // In a prior migration, the groups bucket was migrated from groups being stored by a composite key to groups | ||||||||||||||
| // being stored by a UUID. | ||||||||||||||
| // Within that migration, groups that had an empty role name were skipped during migration. | ||||||||||||||
| // This lead to bucket entries where the properties and role name was both empty, thus making it impossible | ||||||||||||||
|
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should we also delete groups with empty role names here as a pre-caution?
Contributor
Author
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. We do delete groups with empty role names as long as they are still stored keyed by the composite key. I don't think we need to remove groups with empty role names when they are stored using a unique ID - currently it would be possible to delete those via API directly (I also don't expect those to be there, but then again neither did I with the current situation). Still, I'd say let's not do this and add additional complexity to the migration.
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. For me, empty role name = invalid group = should be deleted. This corresponds with the goal of this migration as I see it - fix our previous mistake and return the groups bucket to the valid state. Maybe I understood it wrong and we delete somewhere groups with an empty role, but definitely not during startup: stackrox/central/group/datastore/singleton.go Lines 22 to 27 in 6113b92
I won't fixate on this though. If you feel strongly about not adding this to migration right now, let's put it on hold - for me, it makes sense to do it now due to our release cadence. |
||||||||||||||
| // to delete the group now that we require an ID being set. | ||||||||||||||
| // We should check if there are still groups left stored by the composite key due to having an empty role | ||||||||||||||
| // name or empty properties and remove those. | ||||||||||||||
| if len(v) == 0 || bytes.Equal(k, emptyPropertiesCompositeKey) { | ||||||||||||||
| grp, err := deserialize(k, v) | ||||||||||||||
| if err != nil { | ||||||||||||||
| return err | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| groupsStoredByCompositeKey = append(groupsStoredByCompositeKey, | ||||||||||||||
| groupStoredByCompositeKey{grp: grp, compositeKey: k}) | ||||||||||||||
| } | ||||||||||||||
| return nil | ||||||||||||||
| }) | ||||||||||||||
| }) | ||||||||||||||
| if err != nil { | ||||||||||||||
| return nil, err | ||||||||||||||
| } | ||||||||||||||
| return groupsStoredByCompositeKey, nil | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| func removeGroupsStoredByCompositeKey(db *bolt.DB, groupStoredByCompositeKeys []groupStoredByCompositeKey) error { | ||||||||||||||
| return db.Update(func(tx *bolt.Tx) error { | ||||||||||||||
| bucket := tx.Bucket(bucketName) | ||||||||||||||
| // Pre-req: Migrating a non-existent bucket should not fail. | ||||||||||||||
| if bucket == nil { | ||||||||||||||
| return nil | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| var deleteGroupErrs *multierror.Error | ||||||||||||||
| for _, group := range groupStoredByCompositeKeys { | ||||||||||||||
| // Remove the value stored behind the composite key, since the migrated group is now successfully stored. | ||||||||||||||
| if err := bucket.Delete(group.compositeKey); err != nil { | ||||||||||||||
| deleteGroupErrs = multierror.Append(deleteGroupErrs, err) | ||||||||||||||
| } | ||||||||||||||
| } | ||||||||||||||
|
|
||||||||||||||
| return deleteGroupErrs.ErrorOrNil() | ||||||||||||||
| }) | ||||||||||||||
| } | ||||||||||||||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,151 @@ | ||
| package m111tom112 | ||
|
|
||
| import ( | ||
| "testing" | ||
|
|
||
| "github.com/gogo/protobuf/proto" | ||
| "github.com/stackrox/rox/generated/storage" | ||
| "github.com/stackrox/rox/migrator/bolthelpers" | ||
| "github.com/stackrox/rox/pkg/testutils" | ||
| "github.com/stretchr/testify/suite" | ||
| bolt "go.etcd.io/bbolt" | ||
| ) | ||
|
|
||
| func TestMigration(t *testing.T) { | ||
| suite.Run(t, new(removeGroupsMigrationSuite)) | ||
| } | ||
|
|
||
| type removeGroupsMigrationSuite struct { | ||
| suite.Suite | ||
|
|
||
| db *bolt.DB | ||
| } | ||
|
|
||
| func (suite *removeGroupsMigrationSuite) SetupTest() { | ||
| db, err := bolthelpers.NewTemp(testutils.DBFileName(suite)) | ||
| suite.Require().NoError(err, "failed to make BoltDB") | ||
| suite.db = db | ||
| } | ||
|
|
||
| func (suite *removeGroupsMigrationSuite) TearDownTest() { | ||
| testutils.TearDownDB(suite.db) | ||
| } | ||
|
|
||
| func (suite *removeGroupsMigrationSuite) TestMigrate() { | ||
| // Expected groups after migration. Note that: | ||
| // * Group "r1" should not be removed, as it is by ID, | ||
| // * Group "r2" should be removed, as it has an empty role name, | ||
| // * Group "r3" should be removed, as it has nil properties, | ||
| // * Group "r4" should be removed, as it has nil properties and an empty role name. | ||
| expectedGroups := map[string]*storage.Group{ | ||
| "r1": { | ||
| Props: &storage.GroupProperties{ | ||
| AuthProviderId: "something", | ||
| Id: "io.stackrox.authz.group.", | ||
| }, | ||
| RoleName: "r1", | ||
| }, | ||
| "r2": { | ||
| Props: &storage.GroupProperties{ | ||
| AuthProviderId: "something", | ||
| Id: "io.stackrox.authz.group.migrated.", | ||
| }, | ||
| RoleName: "", | ||
| }, | ||
| "r3": { | ||
| Props: nil, | ||
| RoleName: "r3", | ||
| }, | ||
| "r4": { | ||
| Props: nil, | ||
| RoleName: "", | ||
| }, | ||
| } | ||
|
|
||
| cases := []struct { | ||
| storedGroup *storage.Group | ||
| newGroup *storage.Group | ||
| oldValue bool | ||
| }{ | ||
| { | ||
| storedGroup: expectedGroups["r2"], | ||
| newGroup: expectedGroups["r2"], | ||
| oldValue: true, | ||
| }, | ||
| { | ||
| storedGroup: expectedGroups["r1"], | ||
| newGroup: expectedGroups["r1"], | ||
| }, | ||
| { | ||
| storedGroup: expectedGroups["r3"], | ||
| newGroup: expectedGroups["r3"], | ||
| oldValue: true, | ||
| }, | ||
| { | ||
| storedGroup: expectedGroups["r4"], | ||
| newGroup: expectedGroups["r4"], | ||
| oldValue: true, | ||
| }, | ||
| } | ||
|
|
||
| // 1. Migration should succeed if the bucket does not exist. | ||
| suite.NoError(removeGroupsWithInvalidValues(suite.db)) | ||
|
|
||
| // 2. Add the old groups to the groups bucket and create it if it does not exist yet. | ||
| err := suite.db.Update(func(tx *bolt.Tx) error { | ||
| bucket, err := tx.CreateBucketIfNotExists(bucketName) | ||
| suite.NoError(err) | ||
| for _, c := range cases { | ||
| var key, value []byte | ||
| if c.oldValue { | ||
| key, value = serialize(c.storedGroup) | ||
| } else { | ||
| key = []byte(c.storedGroup.GetProps().GetId()) | ||
| value, err = c.storedGroup.Marshal() | ||
| suite.NoError(err) | ||
| } | ||
|
|
||
| suite.NoError(bucket.Put(key, value)) | ||
| } | ||
| return nil | ||
| }) | ||
| suite.NoError(err) | ||
|
|
||
| // 3. Migrate the groups without ID. | ||
| suite.NoError(removeGroupsWithInvalidValues(suite.db)) | ||
|
|
||
| // 4. Verify the expected group can be found | ||
| err = suite.db.View(func(tx *bolt.Tx) error { | ||
| bucket := tx.Bucket(bucketName) | ||
| err := bucket.ForEach(func(k, v []byte) error { | ||
| var group storage.Group | ||
| suite.NoError(proto.Unmarshal(v, &group)) | ||
| expectedGroup := expectedGroups["r1"] | ||
| suite.Equal(expectedGroup.GetRoleName(), group.GetRoleName()) | ||
| suite.Equal(expectedGroup.GetProps().GetAuthProviderId(), group.GetProps().GetAuthProviderId()) | ||
| suite.Contains(group.GetProps().GetId(), expectedGroup.GetProps().GetId()) | ||
| return nil | ||
| }) | ||
| suite.NoError(err) | ||
| return nil | ||
| }) | ||
| suite.NoError(err) | ||
|
|
||
| // 5. Verify all other groups are removed. | ||
| err = suite.db.View(func(tx *bolt.Tx) error { | ||
| bucket := tx.Bucket(bucketName) | ||
| for _, c := range cases { | ||
| if !c.oldValue { | ||
| continue | ||
| } | ||
|
|
||
| key, _ := serialize(c.storedGroup) | ||
|
|
||
| val := bucket.Get(key) | ||
| suite.Nil(val) | ||
| } | ||
|
|
||
| return nil | ||
| }) | ||
| suite.NoError(err) | ||
| } |
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Who is using it?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
I followed the same examples for copying stackrox code into the migrations - which is copying the package as is and not modifying it, even though some functions may be unused.