@@ -151,8 +151,7 @@ void testInboundRecord(EmbeddedKafkaBroker embeddedKafka) {
151
151
adapter .setRecordMessageConverter (new MessagingMessageConverter () {
152
152
153
153
@ Override
154
- public Message <?> toMessage (ConsumerRecord <?, ?> record , Acknowledgment acknowledgment ,
155
- Consumer <?, ?> consumer , Type type ) {
154
+ public Message <?> toMessage (ConsumerRecord <?, ?> record , Object acknowledgment , Object consumer , Type type ) {
156
155
Message <?> message = super .toMessage (record , acknowledgment , consumer , type );
157
156
return MessageBuilder .fromMessage (message ).setHeader ("testHeader" , "testValue" ).build ();
158
157
}
@@ -189,27 +188,26 @@ public Message<?> toMessage(ConsumerRecord<?, ?> record, Acknowledgment acknowle
189
188
assertThat (received .getPayload ()).isInstanceOf (KafkaNull .class );
190
189
191
190
headers = received .getHeaders ();
192
- assertThat (headers .get (KafkaHeaders .RECEIVED_KEY )).isEqualTo (1 );
193
- assertThat (headers .get (KafkaHeaders .RECEIVED_TOPIC )).isEqualTo (topic1 );
194
- assertThat (headers .get (KafkaHeaders .RECEIVED_PARTITION )).isEqualTo (0 );
195
- assertThat (headers .get (KafkaHeaders .OFFSET )).isEqualTo (1L );
196
- assertThat ((Long ) headers .get (KafkaHeaders .RECEIVED_TIMESTAMP )).isGreaterThan (0L );
197
- assertThat (headers .get (KafkaHeaders .TIMESTAMP_TYPE )).isEqualTo ("CREATE_TIME" );
198
-
199
- assertThat (headers .get ("testHeader" )).isEqualTo ("testValue" );
191
+ assertThat (headers )
192
+ .containsEntry (KafkaHeaders .RECEIVED_KEY , 1 )
193
+ .containsEntry (KafkaHeaders .RECEIVED_TOPIC , topic1 )
194
+ .containsEntry (KafkaHeaders .RECEIVED_PARTITION , 0 )
195
+ .containsEntry (KafkaHeaders .OFFSET , 1L )
196
+ .containsEntry (KafkaHeaders .TIMESTAMP_TYPE , "CREATE_TIME" )
197
+ .containsEntry ("testHeader" , "testValue" );
200
198
201
199
adapter .setMessageConverter (new RecordMessageConverter () {
202
200
203
201
@ Override
204
- public Message <?> toMessage (ConsumerRecord <?, ?> record , Acknowledgment acknowledgment ,
205
- Consumer <?, ?> consumer , Type type ) {
202
+ public Message <?> toMessage (ConsumerRecord <?, ?> record , Object acknowledgment , Object con , Type type ) {
206
203
throw new RuntimeException ("testError" );
207
204
}
208
205
209
206
@ Override
210
207
public ProducerRecord <?, ?> fromMessage (Message <?> message , String defaultTopic ) {
211
208
return null ;
212
209
}
210
+
213
211
});
214
212
PollableChannel errors = new QueueChannel ();
215
213
adapter .setErrorChannel (errors );
@@ -272,10 +270,12 @@ protected boolean doSend(Message<?> message, long timeout) {
272
270
assertThat (originalMessage ).isNotNull ();
273
271
assertThat (originalMessage .getHeaders ().get (IntegrationMessageHeaderAccessor .SOURCE_DATA )).isNull ();
274
272
headers = originalMessage .getHeaders ();
275
- assertThat (headers .get (KafkaHeaders .RECEIVED_KEY )).isEqualTo (1 );
276
- assertThat (headers .get (KafkaHeaders .RECEIVED_TOPIC )).isEqualTo (topic4 );
277
- assertThat (headers .get (KafkaHeaders .RECEIVED_PARTITION )).isEqualTo (0 );
278
- assertThat (headers .get (KafkaHeaders .OFFSET )).isEqualTo (0L );
273
+ assertThat (headers )
274
+ .containsEntry (KafkaHeaders .RECEIVED_KEY , 1 )
275
+ .containsEntry (KafkaHeaders .RECEIVED_TOPIC , topic4 )
276
+ .containsEntry (KafkaHeaders .RECEIVED_PARTITION , 0 )
277
+ .containsEntry (KafkaHeaders .OFFSET , 0L );
278
+
279
279
assertThat (StaticMessageHeaderAccessor .getDeliveryAttempt (originalMessage ).get ()).isEqualTo (3 );
280
280
281
281
assertThat (receivedMessageHistory .get ()).isNotNull ();
@@ -383,10 +383,11 @@ protected boolean doSend(Message<?> message, long timeout) {
383
383
assertThat (originalMessage .getHeaders ().get (IntegrationMessageHeaderAccessor .SOURCE_DATA ))
384
384
.isSameAs (headers .get (KafkaHeaders .RAW_DATA ));
385
385
headers = originalMessage .getHeaders ();
386
- assertThat (headers .get (KafkaHeaders .RECEIVED_KEY )).isEqualTo (1 );
387
- assertThat (headers .get (KafkaHeaders .RECEIVED_TOPIC )).isEqualTo (topic5 );
388
- assertThat (headers .get (KafkaHeaders .RECEIVED_PARTITION )).isEqualTo (0 );
389
- assertThat (headers .get (KafkaHeaders .OFFSET )).isEqualTo (0L );
386
+ assertThat (headers )
387
+ .containsEntry (KafkaHeaders .RECEIVED_KEY , 1 )
388
+ .containsEntry (KafkaHeaders .RECEIVED_TOPIC , topic5 )
389
+ .containsEntry (KafkaHeaders .RECEIVED_PARTITION , 0 )
390
+ .containsEntry (KafkaHeaders .OFFSET , 0L );
390
391
assertThat (StaticMessageHeaderAccessor .getDeliveryAttempt (originalMessage ).get ()).isEqualTo (1 );
391
392
392
393
adapter .stop ();
@@ -397,7 +398,8 @@ protected boolean doSend(Message<?> message, long timeout) {
397
398
void testInboundBatch (EmbeddedKafkaBroker embeddedKafka ) throws Exception {
398
399
Map <String , Object > props = KafkaTestUtils .consumerProps (embeddedKafka , "test2" , true );
399
400
props .put (ConsumerConfig .AUTO_OFFSET_RESET_CONFIG , "earliest" );
400
- props .put (ConsumerConfig .FETCH_MIN_BYTES_CONFIG , 12 );
401
+ props .put (ConsumerConfig .FETCH_MIN_BYTES_CONFIG , 24 );
402
+ props .put (ConsumerConfig .FETCH_MAX_WAIT_MS_CONFIG , 2000 );
401
403
402
404
DefaultKafkaConsumerFactory <Integer , String > cf = new DefaultKafkaConsumerFactory <>(props );
403
405
ContainerProperties containerProps = new ContainerProperties (topic2 );
@@ -513,14 +515,15 @@ void testInboundJson(EmbeddedKafkaBroker embeddedKafka) {
513
515
assertThat (received ).isNotNull ();
514
516
515
517
MessageHeaders headers = received .getHeaders ();
516
- assertThat (headers .get (KafkaHeaders .RECEIVED_KEY )).isEqualTo (1 );
517
- assertThat (headers .get (KafkaHeaders .RECEIVED_TOPIC )).isEqualTo (topic3 );
518
- assertThat (headers .get (KafkaHeaders .RECEIVED_PARTITION )).isEqualTo (0 );
519
- assertThat (headers .get (KafkaHeaders .OFFSET )).isEqualTo (0L );
518
+ assertThat (headers )
519
+ .containsEntry (KafkaHeaders .RECEIVED_KEY , 1 )
520
+ .containsEntry (KafkaHeaders .RECEIVED_TOPIC , topic3 )
521
+ .containsEntry (KafkaHeaders .RECEIVED_PARTITION , 0 )
522
+ .containsEntry (KafkaHeaders .OFFSET , 0L )
523
+ .containsEntry (KafkaHeaders .RECEIVED_TIMESTAMP , 1487694048607L )
524
+ .containsEntry (KafkaHeaders .TIMESTAMP_TYPE , "CREATE_TIME" )
525
+ .containsEntry ("foo" , "bar" );
520
526
521
- assertThat (headers .get (KafkaHeaders .RECEIVED_TIMESTAMP )).isEqualTo (1487694048607L );
522
- assertThat (headers .get (KafkaHeaders .TIMESTAMP_TYPE )).isEqualTo ("CREATE_TIME" );
523
- assertThat (headers .get ("foo" )).isEqualTo ("bar" );
524
527
assertThat (received .getPayload ()).isInstanceOf (Map .class );
525
528
526
529
adapter .stop ();
@@ -579,8 +582,8 @@ void testInboundJsonWithPayload(EmbeddedKafkaBroker embeddedKafka) {
579
582
@ SuppressWarnings ({"unchecked" , "rawtypes" })
580
583
@ Test
581
584
void testPauseResume () throws Exception {
582
- ConsumerFactory <Integer , String > cf = mock (ConsumerFactory . class );
583
- Consumer <Integer , String > consumer = mock (Consumer . class );
585
+ ConsumerFactory <Integer , String > cf = mock ();
586
+ Consumer <Integer , String > consumer = mock ();
584
587
given (cf .createConsumer (eq ("testPauseResumeGroup" ), eq ("clientId" ), isNull (), any ())).willReturn (consumer );
585
588
final Map <TopicPartition , List <ConsumerRecord <Integer , String >>> records = new HashMap <>();
586
589
records .put (new TopicPartition ("foo" , 0 ), Arrays .asList (
0 commit comments