@@ -165,7 +165,7 @@ func (m *onNodeGroupDeleteMock) Delete(id string) error {
165
165
166
166
func setUpScaleDownActuator (ctx * context.AutoscalingContext , autoscalingOptions config.AutoscalingOptions ) {
167
167
deleteOptions := options .NewNodeDeleteOptions (autoscalingOptions )
168
- ctx .ScaleDownActuator = actuation .NewActuator (ctx , nil , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), latencytracker . NewNodeLatencyTracker () , deleteOptions , rules .Default (deleteOptions ), processorstest .NewTestProcessors (ctx ).NodeGroupConfigProcessor )
168
+ ctx .ScaleDownActuator = actuation .NewActuator (ctx , nil , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), nil , deleteOptions , rules .Default (deleteOptions ), processorstest .NewTestProcessors (ctx ).NodeGroupConfigProcessor )
169
169
}
170
170
171
171
type nodeGroup struct {
@@ -211,7 +211,6 @@ type commonMocks struct {
211
211
podDisruptionBudgetLister * podDisruptionBudgetListerMock
212
212
daemonSetLister * daemonSetListerMock
213
213
nodeDeletionTracker * deletiontracker.NodeDeletionTracker
214
- nodeLatencyTracker * latencytracker.NodeLatencyTracker
215
214
216
215
resourceClaimLister * fakeAllObjectsLister [* resourceapi.ResourceClaim ]
217
216
resourceSliceLister * fakeAllObjectsLister [* resourceapi.ResourceSlice ]
@@ -322,12 +321,8 @@ func setupAutoscaler(config *autoscalerSetupConfig) (*StaticAutoscaler, error) {
322
321
if nodeDeletionTracker == nil {
323
322
nodeDeletionTracker = deletiontracker .NewNodeDeletionTracker (0 * time .Second )
324
323
}
325
- nodeLatencyTracker := config .mocks .nodeLatencyTracker
326
- if nodeLatencyTracker == nil {
327
- nodeLatencyTracker = latencytracker .NewNodeLatencyTracker ()
328
- }
329
- ctx .ScaleDownActuator = actuation .NewActuator (& ctx , clusterState , nodeDeletionTracker , nodeLatencyTracker , deleteOptions , drainabilityRules , processors .NodeGroupConfigProcessor )
330
- sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , nodeLatencyTracker )
324
+ ctx .ScaleDownActuator = actuation .NewActuator (& ctx , clusterState , nodeDeletionTracker , nil , deleteOptions , drainabilityRules , processors .NodeGroupConfigProcessor )
325
+ sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , nil )
331
326
332
327
processorCallbacks .scaleDownPlanner = sdPlanner
333
328
@@ -383,6 +378,21 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
383
378
ng1 := reflect .ValueOf (provider .GetNodeGroup ("ng1" )).Interface ().(* testprovider.TestNodeGroup )
384
379
assert .NotNil (t , ng1 )
385
380
assert .NotNil (t , provider )
381
+ // NodeLatencyTracker mock
382
+ nltMock := & latencytrackerMock {}
383
+ nltMock .On ("ObserveDeletion" ,
384
+ "n2" ,
385
+ mock .MatchedBy (func (t time.Time ) bool { return ! t .IsZero () }),
386
+ ).Return ()
387
+ nltMock .On ("UpdateStateWithUnneededList" ,
388
+ mock .MatchedBy (func (nodes []* apiv1.Node ) bool { return true }),
389
+ mock .MatchedBy (func (m map [string ]bool ) bool { return true }),
390
+ mock .Anything ,
391
+ ).Return ()
392
+ nltMock .On ("UpdateThreshold" ,
393
+ "n2" ,
394
+ time .Minute ,
395
+ ).Return ()
386
396
387
397
// Create context with mocked lister registry.
388
398
options := config.AutoscalingOptions {
@@ -415,7 +425,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
415
425
}
416
426
processors := processorstest .NewTestProcessors (& context )
417
427
clusterState := clusterstate .NewClusterStateRegistry (provider , clusterStateConfig , context .LogRecorder , NewBackoff (), nodegroupconfig .NewDefaultNodeGroupConfigProcessor (options .NodeGroupDefaults ), processors .AsyncNodeGroupStateChecker )
418
- sdPlanner , sdActuator := newScaleDownPlannerAndActuator (& context , processors , clusterState , nil , nil )
428
+ sdPlanner , sdActuator := newScaleDownPlannerAndActuator (& context , processors , clusterState , nil , nltMock )
419
429
suOrchestrator := orchestrator .New ()
420
430
suOrchestrator .Initialize (& context , processors , clusterState , newEstimatorBuilder (), taints.TaintConfig {})
421
431
@@ -2472,7 +2482,7 @@ func TestStaticAutoscalerUpcomingScaleDownCandidates(t *testing.T) {
2472
2482
csr := clusterstate .NewClusterStateRegistry (provider , csrConfig , ctx .LogRecorder , NewBackoff (), nodegroupconfig .NewDefaultNodeGroupConfigProcessor (config.NodeGroupAutoscalingOptions {MaxNodeProvisionTime : 15 * time .Minute }), processors .AsyncNodeGroupStateChecker )
2473
2483
2474
2484
// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
2475
- actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), latencytracker . NewNodeLatencyTracker () , options.NodeDeleteOptions {}, nil , processorstest .NewTestProcessors (& ctx ).NodeGroupConfigProcessor )
2485
+ actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), nil , options.NodeDeleteOptions {}, nil , processorstest .NewTestProcessors (& ctx ).NodeGroupConfigProcessor )
2476
2486
ctx .ScaleDownActuator = actuator
2477
2487
2478
2488
// Fake planner that keeps track of the scale-down candidates passed to UpdateClusterState.
@@ -3133,7 +3143,7 @@ func waitForDeleteToFinish(t *testing.T, deleteFinished <-chan bool) {
3133
3143
}
3134
3144
}
3135
3145
3136
- func newScaleDownPlannerAndActuator (ctx * context.AutoscalingContext , p * ca_processors.AutoscalingProcessors , cs * clusterstate.ClusterStateRegistry , nodeDeletionTracker * deletiontracker.NodeDeletionTracker , nodeLatencyTracker * latencytracker.NodeLatencyTracker ) (scaledown.Planner , scaledown.Actuator ) {
3146
+ func newScaleDownPlannerAndActuator (ctx * context.AutoscalingContext , p * ca_processors.AutoscalingProcessors , cs * clusterstate.ClusterStateRegistry , nodeDeletionTracker * deletiontracker.NodeDeletionTracker , nodeDeletionLatencyTracker latencytracker.LatencyTracker ) (scaledown.Planner , scaledown.Actuator ) {
3137
3147
ctx .MaxScaleDownParallelism = 10
3138
3148
ctx .MaxDrainParallelism = 1
3139
3149
ctx .NodeDeletionBatcherInterval = 0 * time .Second
@@ -3148,11 +3158,8 @@ func newScaleDownPlannerAndActuator(ctx *context.AutoscalingContext, p *ca_proce
3148
3158
if nodeDeletionTracker == nil {
3149
3159
nodeDeletionTracker = deletiontracker .NewNodeDeletionTracker (0 * time .Second )
3150
3160
}
3151
- if nodeLatencyTracker == nil {
3152
- nodeLatencyTracker = latencytracker .NewNodeLatencyTracker ()
3153
- }
3154
- planner := planner .New (ctx , p , deleteOptions , nil , nodeLatencyTracker )
3155
- actuator := actuation .NewActuator (ctx , cs , nodeDeletionTracker , nodeLatencyTracker , deleteOptions , nil , p .NodeGroupConfigProcessor )
3161
+ planner := planner .New (ctx , p , deleteOptions , nil , nodeDeletionLatencyTracker )
3162
+ actuator := actuation .NewActuator (ctx , cs , nodeDeletionTracker , nodeDeletionLatencyTracker , deleteOptions , nil , p .NodeGroupConfigProcessor )
3156
3163
return planner , actuator
3157
3164
}
3158
3165
@@ -3268,13 +3275,13 @@ func buildStaticAutoscaler(t *testing.T, provider cloudprovider.CloudProvider, a
3268
3275
processors .ScaleDownNodeProcessor = cp
3269
3276
3270
3277
csr := clusterstate .NewClusterStateRegistry (provider , clusterstate.ClusterStateRegistryConfig {OkTotalUnreadyCount : 1 }, ctx .LogRecorder , NewBackoff (), nodegroupconfig .NewDefaultNodeGroupConfigProcessor (config.NodeGroupAutoscalingOptions {MaxNodeProvisionTime : 15 * time .Minute }), processors .AsyncNodeGroupStateChecker )
3271
- actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), latencytracker . NewNodeLatencyTracker () , options.NodeDeleteOptions {}, nil , processors .NodeGroupConfigProcessor )
3278
+ actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), nil , options.NodeDeleteOptions {}, nil , processors .NodeGroupConfigProcessor )
3272
3279
ctx .ScaleDownActuator = actuator
3273
3280
3274
3281
deleteOptions := options .NewNodeDeleteOptions (ctx .AutoscalingOptions )
3275
3282
drainabilityRules := rules .Default (deleteOptions )
3276
3283
3277
- sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , latencytracker . NewNodeLatencyTracker () )
3284
+ sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , nil )
3278
3285
3279
3286
autoscaler := & StaticAutoscaler {
3280
3287
AutoscalingContext : & ctx ,
@@ -3324,3 +3331,25 @@ func assertNodesSoftTaintsStatus(t *testing.T, fakeClient *fake.Clientset, nodes
3324
3331
assert .Equal (t , tainted , taints .HasDeletionCandidateTaint (newNode ))
3325
3332
}
3326
3333
}
3334
+
3335
+ // latencytrackerMock implements LatencyTracker for mocking
3336
+ type latencytrackerMock struct {
3337
+ mock.Mock
3338
+ }
3339
+
3340
+ func (m * latencytrackerMock ) ObserveDeletion (nodeName string , timestamp time.Time ) {
3341
+ m .Called (nodeName , timestamp )
3342
+ }
3343
+
3344
+ func (m * latencytrackerMock ) UpdateStateWithUnneededList (list []* apiv1.Node , currentlyInDeletion map [string ]bool , timestamp time.Time ) {
3345
+ m .Called (list , currentlyInDeletion , timestamp )
3346
+ }
3347
+
3348
+ func (m * latencytrackerMock ) UpdateThreshold (nodeName string , threshold time.Duration ) {
3349
+ m .Called (nodeName , threshold )
3350
+ }
3351
+
3352
+ func (m * latencytrackerMock ) GetTrackedNodes () []string {
3353
+ args := m .Called ()
3354
+ return args .Get (0 ).([]string )
3355
+ }
0 commit comments