@@ -166,7 +166,7 @@ func (m *onNodeGroupDeleteMock) Delete(id string) error {
166
166
167
167
func setUpScaleDownActuator (ctx * context.AutoscalingContext , autoscalingOptions config.AutoscalingOptions ) {
168
168
deleteOptions := options .NewNodeDeleteOptions (autoscalingOptions )
169
- ctx .ScaleDownActuator = actuation .NewActuator (ctx , nil , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), latencytracker . NewNodeLatencyTracker () , deleteOptions , rules .Default (deleteOptions ), processorstest .NewTestProcessors (ctx ).NodeGroupConfigProcessor )
169
+ ctx .ScaleDownActuator = actuation .NewActuator (ctx , nil , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), nil , deleteOptions , rules .Default (deleteOptions ), processorstest .NewTestProcessors (ctx ).NodeGroupConfigProcessor )
170
170
}
171
171
172
172
type nodeGroup struct {
@@ -212,7 +212,6 @@ type commonMocks struct {
212
212
podDisruptionBudgetLister * podDisruptionBudgetListerMock
213
213
daemonSetLister * daemonSetListerMock
214
214
nodeDeletionTracker * deletiontracker.NodeDeletionTracker
215
- nodeLatencyTracker * latencytracker.NodeLatencyTracker
216
215
217
216
resourceClaimLister * fakeAllObjectsLister [* resourceapi.ResourceClaim ]
218
217
resourceSliceLister * fakeAllObjectsLister [* resourceapi.ResourceSlice ]
@@ -323,12 +322,8 @@ func setupAutoscaler(config *autoscalerSetupConfig) (*StaticAutoscaler, error) {
323
322
if nodeDeletionTracker == nil {
324
323
nodeDeletionTracker = deletiontracker .NewNodeDeletionTracker (0 * time .Second )
325
324
}
326
- nodeLatencyTracker := config .mocks .nodeLatencyTracker
327
- if nodeLatencyTracker == nil {
328
- nodeLatencyTracker = latencytracker .NewNodeLatencyTracker ()
329
- }
330
- ctx .ScaleDownActuator = actuation .NewActuator (& ctx , clusterState , nodeDeletionTracker , nodeLatencyTracker , deleteOptions , drainabilityRules , processors .NodeGroupConfigProcessor )
331
- sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , nodeLatencyTracker )
325
+ ctx .ScaleDownActuator = actuation .NewActuator (& ctx , clusterState , nodeDeletionTracker , nil , deleteOptions , drainabilityRules , processors .NodeGroupConfigProcessor )
326
+ sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , nil )
332
327
333
328
processorCallbacks .scaleDownPlanner = sdPlanner
334
329
@@ -384,6 +379,21 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
384
379
ng1 := reflect .ValueOf (provider .GetNodeGroup ("ng1" )).Interface ().(* testprovider.TestNodeGroup )
385
380
assert .NotNil (t , ng1 )
386
381
assert .NotNil (t , provider )
382
+ // NodeLatencyTracker mock
383
+ nltMock := & latencytrackerMock {}
384
+ nltMock .On ("ObserveDeletion" ,
385
+ "n2" ,
386
+ mock .MatchedBy (func (t time.Time ) bool { return ! t .IsZero () }),
387
+ ).Return ()
388
+ nltMock .On ("UpdateStateWithUnneededList" ,
389
+ mock .MatchedBy (func (nodes []* apiv1.Node ) bool { return true }),
390
+ mock .MatchedBy (func (m map [string ]bool ) bool { return true }),
391
+ mock .Anything ,
392
+ ).Return ()
393
+ nltMock .On ("UpdateThreshold" ,
394
+ "n2" ,
395
+ time .Minute ,
396
+ ).Return ()
387
397
388
398
// Create context with mocked lister registry.
389
399
options := config.AutoscalingOptions {
@@ -416,7 +426,7 @@ func TestStaticAutoscalerRunOnce(t *testing.T) {
416
426
}
417
427
processors := processorstest .NewTestProcessors (& context )
418
428
clusterState := clusterstate .NewClusterStateRegistry (provider , clusterStateConfig , context .LogRecorder , NewBackoff (), nodegroupconfig .NewDefaultNodeGroupConfigProcessor (options .NodeGroupDefaults ), processors .AsyncNodeGroupStateChecker )
419
- sdPlanner , sdActuator := newScaleDownPlannerAndActuator (& context , processors , clusterState , nil , nil )
429
+ sdPlanner , sdActuator := newScaleDownPlannerAndActuator (& context , processors , clusterState , nil , nltMock )
420
430
suOrchestrator := orchestrator .New ()
421
431
suOrchestrator .Initialize (& context , processors , clusterState , newEstimatorBuilder (), taints.TaintConfig {})
422
432
@@ -2473,7 +2483,7 @@ func TestStaticAutoscalerUpcomingScaleDownCandidates(t *testing.T) {
2473
2483
csr := clusterstate .NewClusterStateRegistry (provider , csrConfig , ctx .LogRecorder , NewBackoff (), nodegroupconfig .NewDefaultNodeGroupConfigProcessor (config.NodeGroupAutoscalingOptions {MaxNodeProvisionTime : 15 * time .Minute }), processors .AsyncNodeGroupStateChecker )
2474
2484
2475
2485
// Setting the Actuator is necessary for testing any scale-down logic, it shouldn't have anything to do in this test.
2476
- actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), latencytracker . NewNodeLatencyTracker () , options.NodeDeleteOptions {}, nil , processorstest .NewTestProcessors (& ctx ).NodeGroupConfigProcessor )
2486
+ actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), nil , options.NodeDeleteOptions {}, nil , processorstest .NewTestProcessors (& ctx ).NodeGroupConfigProcessor )
2477
2487
ctx .ScaleDownActuator = actuator
2478
2488
2479
2489
// Fake planner that keeps track of the scale-down candidates passed to UpdateClusterState.
@@ -3134,7 +3144,7 @@ func waitForDeleteToFinish(t *testing.T, deleteFinished <-chan bool) {
3134
3144
}
3135
3145
}
3136
3146
3137
- func newScaleDownPlannerAndActuator (ctx * context.AutoscalingContext , p * ca_processors.AutoscalingProcessors , cs * clusterstate.ClusterStateRegistry , nodeDeletionTracker * deletiontracker.NodeDeletionTracker , nodeLatencyTracker * latencytracker.NodeLatencyTracker ) (scaledown.Planner , scaledown.Actuator ) {
3147
+ func newScaleDownPlannerAndActuator (ctx * context.AutoscalingContext , p * ca_processors.AutoscalingProcessors , cs * clusterstate.ClusterStateRegistry , nodeDeletionTracker * deletiontracker.NodeDeletionTracker , nodeDeletionLatencyTracker latencytracker.LatencyTracker ) (scaledown.Planner , scaledown.Actuator ) {
3138
3148
ctx .MaxScaleDownParallelism = 10
3139
3149
ctx .MaxDrainParallelism = 1
3140
3150
ctx .NodeDeletionBatcherInterval = 0 * time .Second
@@ -3149,11 +3159,8 @@ func newScaleDownPlannerAndActuator(ctx *context.AutoscalingContext, p *ca_proce
3149
3159
if nodeDeletionTracker == nil {
3150
3160
nodeDeletionTracker = deletiontracker .NewNodeDeletionTracker (0 * time .Second )
3151
3161
}
3152
- if nodeLatencyTracker == nil {
3153
- nodeLatencyTracker = latencytracker .NewNodeLatencyTracker ()
3154
- }
3155
- planner := planner .New (ctx , p , deleteOptions , nil , nodeLatencyTracker )
3156
- actuator := actuation .NewActuator (ctx , cs , nodeDeletionTracker , nodeLatencyTracker , deleteOptions , nil , p .NodeGroupConfigProcessor )
3162
+ planner := planner .New (ctx , p , deleteOptions , nil , nodeDeletionLatencyTracker )
3163
+ actuator := actuation .NewActuator (ctx , cs , nodeDeletionTracker , nodeDeletionLatencyTracker , deleteOptions , nil , p .NodeGroupConfigProcessor )
3157
3164
return planner , actuator
3158
3165
}
3159
3166
@@ -3269,13 +3276,13 @@ func buildStaticAutoscaler(t *testing.T, provider cloudprovider.CloudProvider, a
3269
3276
processors .ScaleDownNodeProcessor = cp
3270
3277
3271
3278
csr := clusterstate .NewClusterStateRegistry (provider , clusterstate.ClusterStateRegistryConfig {OkTotalUnreadyCount : 1 }, ctx .LogRecorder , NewBackoff (), nodegroupconfig .NewDefaultNodeGroupConfigProcessor (config.NodeGroupAutoscalingOptions {MaxNodeProvisionTime : 15 * time .Minute }), processors .AsyncNodeGroupStateChecker )
3272
- actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), latencytracker . NewNodeLatencyTracker () , options.NodeDeleteOptions {}, nil , processors .NodeGroupConfigProcessor )
3279
+ actuator := actuation .NewActuator (& ctx , csr , deletiontracker .NewNodeDeletionTracker (0 * time .Second ), nil , options.NodeDeleteOptions {}, nil , processors .NodeGroupConfigProcessor )
3273
3280
ctx .ScaleDownActuator = actuator
3274
3281
3275
3282
deleteOptions := options .NewNodeDeleteOptions (ctx .AutoscalingOptions )
3276
3283
drainabilityRules := rules .Default (deleteOptions )
3277
3284
3278
- sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , latencytracker . NewNodeLatencyTracker () )
3285
+ sdPlanner := planner .New (& ctx , processors , deleteOptions , drainabilityRules , nil )
3279
3286
3280
3287
autoscaler := & StaticAutoscaler {
3281
3288
AutoscalingContext : & ctx ,
@@ -3325,3 +3332,25 @@ func assertNodesSoftTaintsStatus(t *testing.T, fakeClient *fake.Clientset, nodes
3325
3332
assert .Equal (t , tainted , taints .HasDeletionCandidateTaint (newNode ))
3326
3333
}
3327
3334
}
3335
+
3336
+ // latencytrackerMock implements LatencyTracker for mocking
3337
+ type latencytrackerMock struct {
3338
+ mock.Mock
3339
+ }
3340
+
3341
+ func (m * latencytrackerMock ) ObserveDeletion (nodeName string , timestamp time.Time ) {
3342
+ m .Called (nodeName , timestamp )
3343
+ }
3344
+
3345
+ func (m * latencytrackerMock ) UpdateStateWithUnneededList (list []* apiv1.Node , currentlyInDeletion map [string ]bool , timestamp time.Time ) {
3346
+ m .Called (list , currentlyInDeletion , timestamp )
3347
+ }
3348
+
3349
+ func (m * latencytrackerMock ) UpdateThreshold (nodeName string , threshold time.Duration ) {
3350
+ m .Called (nodeName , threshold )
3351
+ }
3352
+
3353
+ func (m * latencytrackerMock ) GetTrackedNodes () []string {
3354
+ args := m .Called ()
3355
+ return args .Get (0 ).([]string )
3356
+ }
0 commit comments