Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
5f432be
Empty commit
vikin91 Dec 15, 2025
12b8ee9
ROX-32316: Add generic rate limiter for VM index reports
vikin91 Dec 15, 2025
0b2ecb6
Readd rate limiting env vars
vikin91 Jan 6, 2026
ebb4459
Use HasCapability as in the other pipelines
vikin91 Jan 6, 2026
77d0c20
Inject clock to rate limiter to avoid test flakiness
vikin91 Jan 7, 2026
a956823
Fix style: use the correct sync pkg
vikin91 Jan 7, 2026
66b6d84
Fix: Skip sending on nil injector
vikin91 Jan 7, 2026
5d115c8
Make rate limiter more generic
vikin91 Jan 9, 2026
1963fef
Improve metrics coverage
vikin91 Jan 9, 2026
a65b4e0
Improve logs
vikin91 Jan 9, 2026
a506e0c
Add defensive checks for rate limiter
vikin91 Jan 12, 2026
91afe7a
Rephrase log message
vikin91 Jan 12, 2026
0614827
Fix race in getOrCreateLimiter
vikin91 Jan 12, 2026
cb04dc7
Add debug log
vikin91 Jan 12, 2026
a99237e
Change default ROX_VM_INDEX_REPORT_RATE_LIMIT to 1.0
vikin91 Jan 12, 2026
fe6ad97
Add comment on default rate limiter setting
vikin91 Jan 12, 2026
d4f9711
Record metrics in rate-unlimited mode
vikin91 Jan 12, 2026
26bb1d9
Remove metrics: RequestsAccepted, RequestsRejected
vikin91 Jan 12, 2026
7fc3477
Remove rate-limiter registry
vikin91 Jan 12, 2026
ee51992
Add test coverage
vikin91 Jan 13, 2026
70e5ebb
More limiter tests. Modify log statements
vikin91 Jan 13, 2026
4a697fd
Drop sync.Map in favor of map+RWmutex (benchmarked)
vikin91 Jan 13, 2026
672b9fe
Address review: logging & test coverage
vikin91 Jan 13, 2026
8825aab
Add VM/Sensor ACK wiring, drop relay payload cache, and route retries…
vikin91 Jan 9, 2026
0eba237
Post-rebase corrections
vikin91 Jan 14, 2026
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion central/sensor/service/connection/connection_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ func (c *sensorConnection) Scrapes() scrape.Controller {
}

func (c *sensorConnection) InjectMessageIntoQueue(msg *central.MsgFromSensor) {
c.multiplexedPush(sac.WithAllAccess(withConnection(context.Background(), c)), msg, nil)
c.multiplexedPush(sac.WithAllAccess(WithConnection(context.Background(), c)), msg, nil)
}

func (c *sensorConnection) NetworkEntities() networkentities.Controller {
Expand Down
3 changes: 2 additions & 1 deletion central/sensor/service/connection/context.go
Original file line number Diff line number Diff line change
Expand Up @@ -10,6 +10,7 @@ func FromContext(ctx context.Context) SensorConnection {
return conn
}

func withConnection(ctx context.Context, conn SensorConnection) context.Context {
// WithConnection returns a context with the given sensor connection attached.
func WithConnection(ctx context.Context, conn SensorConnection) context.Context {
return context.WithValue(ctx, contextKey{}, conn)
}
2 changes: 1 addition & 1 deletion central/sensor/service/connection/manager_impl.go
Original file line number Diff line number Diff line change
Expand Up @@ -279,7 +279,7 @@ func (m *manager) HandleConnection(ctx context.Context, sensorHello *central.Sen
m.complianceOperatorMgr,
m.initSyncMgr,
)
ctx = withConnection(ctx, conn)
ctx = WithConnection(ctx, conn)

oldConnection, err := m.replaceConnection(ctx, cluster, conn)
if err != nil {
Expand Down
29 changes: 18 additions & 11 deletions central/sensor/service/pipeline/nodeindex/pipeline.go
Original file line number Diff line number Diff line change
Expand Up @@ -130,23 +130,30 @@ func sendComplianceAck(ctx context.Context, node *storage.Node, injector common.
if injector == nil {
return
}
reply := replyCompliance(node.GetClusterId(), node.GetName(), central.NodeInventoryACK_ACK)
if err := injector.InjectMessage(ctx, reply); err != nil {
log.Warnf("Failed sending node-indexing-ACK to Sensor for %s: %v", nodeDatastore.NodeString(node), err)
} else {
log.Debugf("Sent node-indexing-ACK for %s", nodeDatastore.NodeString(node))
// Always send SensorACK (new path).
if err := injector.InjectMessage(ctx, &central.MsgToSensor{
Msg: &central.MsgToSensor_SensorAck{
SensorAck: &central.SensorACK{
Action: central.SensorACK_ACK,
MessageType: central.SensorACK_NODE_INDEX_REPORT,
ResourceId: node.GetName(),
},
},
}); err != nil {
log.Warnf("Failed injecting SensorACK for node index report (node=%s): %v", node.GetName(), err)
}
}

func replyCompliance(clusterID, nodeName string, t central.NodeInventoryACK_Action) *central.MsgToSensor {
return &central.MsgToSensor{
// Always send legacy NodeInventoryACK for backward compatibility.
if err := injector.InjectMessage(ctx, &central.MsgToSensor{
Msg: &central.MsgToSensor_NodeInventoryAck{
NodeInventoryAck: &central.NodeInventoryACK{
ClusterId: clusterID,
NodeName: nodeName,
Action: t,
ClusterId: node.GetClusterId(),
NodeName: node.GetName(),
Action: central.NodeInventoryACK_ACK,
MessageType: central.NodeInventoryACK_NodeIndexer,
},
},
}); err != nil {
log.Warnf("Failed injecting legacy NodeInventoryACK for node index report (node=%s): %v", node.GetName(), err)
}
}
91 changes: 91 additions & 0 deletions central/sensor/service/pipeline/nodeindex/pipeline_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,10 @@ import (
"github.com/stackrox/rox/generated/internalapi/central"
v4 "github.com/stackrox/rox/generated/internalapi/scanner/v4"
"github.com/stackrox/rox/generated/storage"
"github.com/stackrox/rox/pkg/concurrency"
"github.com/stackrox/rox/pkg/features"
nodesEnricherMocks "github.com/stackrox/rox/pkg/nodes/enricher/mocks"
"github.com/stackrox/rox/pkg/protoassert"
"github.com/stretchr/testify/assert"
"go.uber.org/mock/gomock"
)
Expand Down Expand Up @@ -77,6 +79,58 @@ func TestPipelineEnrichesAndUpserts(t *testing.T) {
}
}

func TestPipelineSendsSensorAndLegacyACKs(t *testing.T) {
t.Setenv(features.NodeIndexEnabled.EnvVar(), "true")
t.Setenv(features.ScannerV4.EnvVar(), "true")

ctrl := gomock.NewController(t)
clusterStore := clusterDatastoreMocks.NewMockDataStore(ctrl)
nodeDatastore := nodeDatastoreMocks.NewMockDataStore(ctrl)
riskManager := riskManagerMocks.NewMockManager(ctrl)
enricher := nodesEnricherMocks.NewMockNodeEnricher(ctrl)

node := storage.Node{
Id: "1",
Name: "node-name",
ClusterId: "cluster-id",
}
msg := createMsg(mockIndexReport)

gomock.InOrder(
nodeDatastore.EXPECT().GetNode(gomock.Any(), gomock.Eq(node.GetId())).Times(1).Return(&node, true, nil),
enricher.EXPECT().EnrichNodeWithVulnerabilities(gomock.Any(), nil, gomock.Any()).Times(1).Return(nil),
riskManager.EXPECT().CalculateRiskAndUpsertNode(gomock.Any()).Times(1).Return(nil),
)

injector := &recordingInjector{}
p := &pipelineImpl{
clusterStore: clusterStore,
nodeDatastore: nodeDatastore,
riskManager: riskManager,
enricher: enricher,
}

err := p.Run(t.Context(), node.GetClusterId(), msg, injector)
assert.NoError(t, err)

protoassert.SlicesEqual(t, []*central.SensorACK{
{
Action: central.SensorACK_ACK,
MessageType: central.SensorACK_NODE_INDEX_REPORT,
ResourceId: node.GetName(),
},
}, injector.getSentSensorACKs())

protoassert.SlicesEqual(t, []*central.NodeInventoryACK{
{
ClusterId: node.GetClusterId(),
NodeName: node.GetName(),
Action: central.NodeInventoryACK_ACK,
MessageType: central.NodeInventoryACK_NodeIndexer,
},
}, injector.getSentACKs())
}

func createMsg(ir *v4.IndexReport) *central.MsgFromSensor {
return &central.MsgFromSensor{
Msg: &central.MsgFromSensor_Event{
Expand All @@ -90,6 +144,43 @@ func createMsg(ir *v4.IndexReport) *central.MsgFromSensor {
}
}

type recordingInjector struct {
legacy []*central.NodeInventoryACK
sensor []*central.SensorACK
}

func (r *recordingInjector) InjectMessage(_ concurrency.Waitable, msg *central.MsgToSensor) error {
if ack := msg.GetNodeInventoryAck(); ack != nil {
r.legacy = append(r.legacy, ack.CloneVT())
}
if ack := msg.GetSensorAck(); ack != nil {
r.sensor = append(r.sensor, ack.CloneVT())
}
return nil
}

func (r *recordingInjector) InjectMessageIntoQueue(_ *central.MsgFromSensor) {}

func (r *recordingInjector) getSentACKs() []*central.NodeInventoryACK {
out := make([]*central.NodeInventoryACK, 0, len(r.legacy))
for _, ack := range r.legacy {
if ack != nil {
out = append(out, ack)
}
}
return out
}

func (r *recordingInjector) getSentSensorACKs() []*central.SensorACK {
out := make([]*central.SensorACK, 0, len(r.sensor))
for _, ack := range r.sensor {
if ack != nil {
out = append(out, ack)
}
}
return out
}

var (
mockIndexReport = &v4.IndexReport{
HashId: "",
Expand Down
38 changes: 30 additions & 8 deletions central/sensor/service/pipeline/nodeinventory/pipeline.go
Original file line number Diff line number Diff line change
Expand Up @@ -142,16 +142,29 @@ func sendComplianceAck(ctx context.Context, node *storage.Node, ninv *storage.No
if injector == nil {
return
}
reply := replyCompliance(node.GetClusterId(), ninv.GetNodeName(), central.NodeInventoryACK_ACK)
if err := injector.InjectMessage(ctx, reply); err != nil {
log.Warnf("Failed sending node-inventory-ACK to Sensor for %s: %v", nodeDatastore.NodeString(node), err)
} else {
log.Debugf("Sent node-inventory-ACK for %s", nodeDatastore.NodeString(node))
}
replyCompliance(ctx, node.GetClusterId(), ninv.GetNodeName(), central.NodeInventoryACK_ACK, injector)
}

func replyCompliance(clusterID, nodeName string, t central.NodeInventoryACK_Action) *central.MsgToSensor {
return &central.MsgToSensor{
func replyCompliance(ctx context.Context, clusterID, nodeName string, t central.NodeInventoryACK_Action, injector common.MessageInjector) {
if injector == nil {
return
}

// Always send SensorACK (new path).
if err := injector.InjectMessage(ctx, &central.MsgToSensor{
Msg: &central.MsgToSensor_SensorAck{
SensorAck: &central.SensorACK{
Action: convertLegacyActionToSensor(t),
MessageType: central.SensorACK_NODE_INVENTORY,
ResourceId: nodeName,
},
},
}); err != nil {
log.Warnf("Failed injecting SensorACK for node inventory (clusterID=%s, nodeName=%s): %v", clusterID, nodeName, err)
}

// Always send legacy NodeInventoryACK for backward compatibility.
if err := injector.InjectMessage(ctx, &central.MsgToSensor{
Msg: &central.MsgToSensor_NodeInventoryAck{
NodeInventoryAck: &central.NodeInventoryACK{
ClusterId: clusterID,
Expand All @@ -160,7 +173,16 @@ func replyCompliance(clusterID, nodeName string, t central.NodeInventoryACK_Acti
MessageType: central.NodeInventoryACK_NodeInventory,
},
},
}); err != nil {
log.Warnf("Failed injecting legacy NodeInventoryACK for node inventory (clusterID=%s, nodeName=%s): %v", clusterID, nodeName, err)
}
}

func (p *pipelineImpl) OnFinish(_ string) {}

func convertLegacyActionToSensor(action central.NodeInventoryACK_Action) central.SensorACK_Action {
if action == central.NodeInventoryACK_ACK {
return central.SensorACK_ACK
}
return central.SensorACK_NACK
}
94 changes: 90 additions & 4 deletions central/sensor/service/pipeline/nodeinventory/pipeline_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -82,7 +82,9 @@ func Test_pipelineImpl_Run(t *testing.T) {
m.riskManager.EXPECT().CalculateRiskAndUpsertNode(gomock.Any()).Times(1).Return(nil),
)
},
wantInjectorContain: []*central.NodeInventoryACK{{Action: central.NodeInventoryACK_ACK}},
wantInjectorContain: []*central.NodeInventoryACK{
{Action: central.NodeInventoryACK_ACK},
},
},
{
name: "when event has inventory then enrich and upsert with risk",
Expand Down Expand Up @@ -167,24 +169,92 @@ func Test_pipelineImpl_Run(t *testing.T) {
if len(tt.wantInjectorContain) == 0 {
assert.Len(t, inj.getSentACKs(), 0)
} else {
protoassert.SlicesEqual(t, tt.wantInjectorContain, inj.getSentACKs())
protoassert.SlicesEqual(t, tt.wantInjectorContain, inj.getSentACKs(), "sent ACKs: %v", inj.getSentACKs())
}
}
})
}
}

func Test_pipelineImpl_Run_SendsSensorAndLegacyACKs(t *testing.T) {
ctrl := gomock.NewController(t)
clusterStore := clusterDatastoreMocks.NewMockDataStore(ctrl)
nodeDatastore := nodeDatastoreMocks.NewMockDataStore(ctrl)
riskManager := riskManagerMocks.NewMockManager(ctrl)
enricher := nodesEnricherMocks.NewMockNodeEnricher(ctrl)

clusterID := "cluster-1"
nodeName := "node-name"
node := storage.Node{
Id: "node-id",
ClusterId: clusterID,
}
msg := &central.MsgFromSensor{
Msg: &central.MsgFromSensor_Event{
Event: &central.SensorEvent{
Action: central.ResourceAction_CREATE_RESOURCE,
Resource: &central.SensorEvent_NodeInventory{
NodeInventory: &storage.NodeInventory{
NodeId: node.GetId(),
NodeName: nodeName,
},
},
},
},
}
injector := &recordingInjector{}

gomock.InOrder(
nodeDatastore.EXPECT().GetNode(gomock.Any(), gomock.Eq(node.GetId())).Times(1).Return(&node, true, nil),
enricher.EXPECT().EnrichNodeWithVulnerabilities(gomock.Any(), gomock.Any(), nil).Times(1).Return(nil),
riskManager.EXPECT().CalculateRiskAndUpsertNode(gomock.Any()).Times(1).Return(nil),
)

p := &pipelineImpl{
clusterStore: clusterStore,
nodeDatastore: nodeDatastore,
enricher: enricher,
riskManager: riskManager,
}

err := p.Run(context.Background(), clusterID, msg, injector)
assert.NoError(t, err)

protoassert.SlicesEqual(t, []*central.NodeInventoryACK{
{
ClusterId: clusterID,
NodeName: nodeName,
Action: central.NodeInventoryACK_ACK,
MessageType: central.NodeInventoryACK_NodeInventory,
},
}, injector.getSentACKs(), "legacy ACKs")

protoassert.SlicesEqual(t, []*central.SensorACK{
{
Action: central.SensorACK_ACK,
MessageType: central.SensorACK_NODE_INVENTORY,
ResourceId: nodeName,
},
}, injector.getSentSensorACKs(), "sensor ACKs")
}

var _ common.MessageInjector = (*recordingInjector)(nil)

type recordingInjector struct {
lock sync.Mutex
messages []*central.NodeInventoryACK
sensor []*central.SensorACK
}

func (r *recordingInjector) InjectMessage(_ concurrency.Waitable, msg *central.MsgToSensor) error {
r.lock.Lock()
defer r.lock.Unlock()
r.messages = append(r.messages, msg.GetNodeInventoryAck().CloneVT())
if ack := msg.GetNodeInventoryAck(); ack != nil {
r.messages = append(r.messages, ack.CloneVT())
}
if ack := msg.GetSensorAck(); ack != nil {
r.sensor = append(r.sensor, ack.CloneVT())
}
return nil
}

Expand All @@ -194,6 +264,22 @@ func (r *recordingInjector) getSentACKs() []*central.NodeInventoryACK {
r.lock.Lock()
defer r.lock.Unlock()
copied := make([]*central.NodeInventoryACK, 0, len(r.messages))
copied = append(copied, r.messages...)
for _, m := range r.messages {
if m != nil {
copied = append(copied, m)
}
}
return copied
}

func (r *recordingInjector) getSentSensorACKs() []*central.SensorACK {
r.lock.Lock()
defer r.lock.Unlock()
copied := make([]*central.SensorACK, 0, len(r.sensor))
for _, m := range r.sensor {
if m != nil {
copied = append(copied, m)
}
}
return copied
}
3 changes: 2 additions & 1 deletion compliance/cmd/compliance/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -38,6 +38,7 @@ func main() {
defer cancel()
umhNodeInv := handler.NewUnconfirmedMessageHandler(ctx, "node-inventory", env.NodeScanningAckDeadlineBase.DurationSetting())
umhNodeIndex := handler.NewUnconfirmedMessageHandler(ctx, "node-index", env.NodeScanningAckDeadlineBase.DurationSetting())
c := compliance.NewComplianceApp(np, scanner, cachedNodeIndexer, umhNodeInv, umhNodeIndex)
umhVMIndex := handler.NewUnconfirmedMessageHandler(ctx, "vm-index", env.NodeScanningAckDeadlineBase.DurationSetting())
c := compliance.NewComplianceApp(np, scanner, cachedNodeIndexer, umhNodeInv, umhNodeIndex, umhVMIndex)
c.Start()
}
Loading
Loading