Kafka Synchronous Communication/Two way communication using ReplyingKafkaTemplate causing Lags in Response / Reply Topic - spring-kafka

We are having several microservices in our product, there are some business use cases where one microservice (TryServiceOne) have to delegate request to another microserice (TryServiceThree). For this end user is waiting for response from API. So we used ReplyingKafkaTemplate So that we can instantly respond back to Caller. Everything seems to be working, but we are seeing LAGs in REPLY Topic which is causing our Alert system to bombard with alerts. But behind the scenes messages are getting read by RequestReplyFuture and processed successfully lag is keep increasing from Kafka broker. Please suggest how to avoid LAGs.
IMPORTANT
We are using cluster deployment of microsrvices with more than one node. Hence we are using Custom Partitioning to assign response/ reply topic to one partition all the time.
TryServiceOne
KafkaConfiguration.class
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(org.apache.kafka.clients.producer.ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers);
props.put(org.apache.kafka.clients.producer.ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(org.apache.kafka.clients.producer.ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
return props;
}
#Bean
public Map<String,Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
return props;
}
#Bean
public ProducerFactory<String, RequestModel> requestProducerFactory() {
return new DefaultKafkaProducerFactory<>(producerConfigs());
}
#Bean
public KafkaTemplate<String, RequestModel> kafkaTemplate() {
return new KafkaTemplate<>(requestProducerFactory());
}
#Bean
public ReplyingKafkaTemplate<String, RequestModel, ResponseModel> replyKafkaTemplate(ProducerFactory<String, RequestModel> pf,
KafkaMessageListenerContainer<String, ResponseModel> container){
return new ReplyingKafkaTemplate<>(pf, container);
}
#Bean
public KafkaMessageListenerContainer<String, ResponseModel> replyContainer(ConsumerFactory<String, ResponseModel> cf) {
TopicPartitionOffset topicPartitionOffset = new TopicPartitionOffset("RESPONSE_TOPIC",0);
ContainerProperties containerProperties = new ContainerProperties(topicPartitionOffset);
containerProperties.setAckMode(ContainerProperties.AckMode.MANUAL);
return new KafkaMessageListenerContainer<>(cf, containerProperties);
}
My SendAndReceive Service Component looks like below
RequestModel requestModel= new RequestModel();
distributorRequestEvent.setDistributorModel(producerRecord);
// create producer record
ProducerRecord<String, RequestModel> record = new ProducerRecord<String, RequestModel>("REQUEST_TOPIC", requestModel);
// set reply topic in header
record.headers().add(new RecordHeader(KafkaHeaders.REPLY_TOPIC, "RESPONSE_TOPIC".getBytes(StandardCharsets.UTF_8)));
kafkaTemplate.setDefaultReplyTimeout(Duration.ofSeconds(30));
LOGGER.info("Sending message ... {}",producerRecord);
RequestReplyFuture<String, RequestModel, ResponseModel> sendAndReceive = kafkaTemplate.sendAndReceive(record);
// confirm if producer produced successfully
SendResult<String, RequestModel> sendResult = sendAndReceive.getSendFuture().get();
// get consumer record
ConsumerRecord<String, ResponseModel> consumerRecord = sendAndReceive.get();
return consumerRecord.value();
TryServiceThree Microservice
Kafka Configuration
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId);
props.put(JsonDeserializer.TYPE_MAPPINGS,RequestModel.class);
return props;
}
#Bean
public Map<String, Object> producerConfigs() {
Map<String, Object> props = new HashMap<>();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaBootstrapServers);
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
props.put(ProducerConfig.PARTITIONER_CLASS_CONFIG,CustomPartitioner.class);
props.put(ProducerConfig.ACKS_CONFIG, "all");
return props;
}
#Bean
public ConsumerFactory<String, RequestModel> requestConsumerFactory() {
JsonDeserializer<RequestModel> deserializer = new JsonDeserializer<>(RequestModel.class);
deserializer.setRemoveTypeHeaders(false);
deserializer.addTrustedPackages("*");
deserializer.setUseTypeMapperForKey(true);
return new DefaultKafkaConsumerFactory<>(consumerConfigs(), new StringDeserializer(),
deserializer);
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, RequestModel>> requestListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, RequestModel> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(requestConsumerFactory());
// factory.setMessageConverter(new JsonMessageConverter());
factory.setReplyTemplate(replyTemplate());
return factory;
}
#Bean
public ProducerFactory<String, ResponseModel> replyProducerFactory() {
ProducerFactory<String, ResponseModel> producerFactory = new DefaultKafkaProducerFactory<>(producerConfigs());
return producerFactory;
}
#Bean
public KafkaTemplate<String, ResponseModel> replyTemplate() {
return new KafkaTemplate<>(replyProducerFactory());
}
CustomPartitioning on TryServiceThree
public class CustomPartitioner implements Partitioner {
#Override
public int partition(String s, Object o, byte[] bytes, Object o1, byte[] bytes1, Cluster cluster) {
return 0;
}
#Override
public void close() {
}
#Override
public void configure(Map<String, ?> map) {
}

Use
containerProperties.setAckMode(ContainerProperties.AckMode.BATCH);
in the reply container.

Related

AWS MSK configuration issue with Spring

We recently migrated from self-managed Kafka instance to fully-managed AWS MSK cluster. We have only IAM based role-authentication enabled to connect to MSK cluster from local systems.
When I do telnet to the public url of the cluster, I get successful response, but when trying to start my java application, it fails due to different errors. Below is my KafkaConfiguration
Error :
Invalid login module control flag 'com.amazonaws.auth.AWSStaticCredentialsProvider' in JAAS config
#Configuration
public class KafkaConfiguration {
#Value("${aws.kafka.bootstrap-servers}")
private String bootstrapServers;
#Value("${aws.kafka.accessKey}")
private String accessKey;
#Value("${aws.kafka.secret}")
private String secret;
#Bean
public KafkaAdmin kafkaAdmin() {
AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secret);
Map<String, Object> configs = new HashMap<>();
configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
configs.put(AdminClientConfig.SECURITY_PROTOCOL_CONFIG, "SASL_SSL");
configs.put(SaslConfigs.SASL_MECHANISM, "AWS_MSK_IAM");
configs.put(SaslConfigs.SASL_JAAS_CONFIG, "com.amazonaws.auth.AWSCredentialsProvider com.amazonaws.auth.AWSStaticCredentialsProvider(" + awsCredentials + ")");
return new KafkaAdmin(configs);
}
#Bean
public ProducerFactory<String, String> producerFactory() {
AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secret);
Map<String, Object> configProps = new HashMap<>();
configProps.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
configProps.put("security.protocol", "SASL_SSL");
configProps.put(SaslConfigs.SASL_MECHANISM, "AWS_MSK_IAM");
configProps.put(SaslConfigs.SASL_JAAS_CONFIG, "com.amazonaws.auth.AWSCredentialsProvider com.amazonaws.auth.AWSStaticCredentialsProvider(" + awsCredentials + ")");
return new DefaultKafkaProducerFactory<>(configProps);
}
#Bean
public KafkaTemplate<String, String> kafkaTemplate() {
return new KafkaTemplate<>(producerFactory());
}
}
Consumer Configuration :
#EnableKafka
#Configuration
public class KafkaConsumerConfig {
#Value("${aws.kafka.bootstrap-servers}")
private String bootstrapServers;
#Value("${aws.kafka.accessKey}")
private String accessKey;
#Value("${aws.kafka.secret}")
private String secret;
public ConsumerFactory<String, String> consumerFactory() {
AWSCredentials awsCredentials = new BasicAWSCredentials(accessKey, secret);
Map<String, Object> configProps = new HashMap<>();
configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
configProps.put("security.protocol", "SASL_SSL");
configProps.put(SaslConfigs.SASL_MECHANISM, "AWS_MSK_IAM");
configProps.put(SaslConfigs.SASL_JAAS_CONFIG, "com.amazonaws.auth.AWSCredentialsProvider com.amazonaws.auth.AWSStaticCredentialsProvider(" + awsCredentials + ")");
configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configProps.put(ConsumerConfig.GROUP_ID_CONFIG, "iTopLight");
return new DefaultKafkaConsumerFactory<>(configProps);
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> rawKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
}
There are more than one option to connect MSK with IAM auth.
Firstly you need to use this lib in your project.
<dependency>
<groupId>software.amazon.msk</groupId>
<artifactId>aws-msk-iam-auth</artifactId>
<version>1.0.0</version>
</dependency>
Than, you need to provide AWS access credentials provider. First option you can use environment variable or using system property.
System property solution will look like.
#EnableKafka
#Configuration
public class KafkaConsumerConfig {
#Value("${aws.kafka.bootstrap-servers}")
private String bootstrapServers;
#Value("${aws.kafka.accessKey}")
private String accessKey;
#Value("${aws.kafka.secret}")
private String secret;
public ConsumerFactory<String, String> consumerFactory() {
System.setProperty("aws.accessKeyId", accessKey);
System.setProperty("aws.secretKey", secret);
Map<String, Object> configProps = new HashMap<>();
configProps.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
configProps.put("security.protocol", "SASL_SSL");
configProps.put(SaslConfigs.SASL_MECHANISM, "AWS_MSK_IAM");
configProps.put(SaslConfigs.SASL_JAAS_CONFIG, "software.amazon.msk.auth.iam.IAMLoginModule required");
configProps.put(SaslConfigs.SASL_CLIENT_CALLBACK_HANDLER_CLASS, "software.amazon.msk.auth.iam.IAMClientCallbackHandler");
configProps.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configProps.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
configProps.put(ConsumerConfig.GROUP_ID_CONFIG, "iTopLight");
return new DefaultKafkaConsumerFactory<>(configProps);
}
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>> rawKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
return factory;
}
}
You can check aws-msiam-auth project for providers.

How can i capture record key and value when there is a DeserializationException while consuming a message from kafka topic?

I'm using spring boot 2.1.7.RELEASE and spring-kafka 2.2.8.RELEASE.And I'm using #KafkaListener annotation to create a consumer and I'm using all default settings for the consumer.And I'm using below configuration as specified in the Spring-Kafka documentation.
// other props
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer2.class);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, ErrorHandlingDeserializer2.class);
props.put(ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS, StringDeserializer.class);
props.put(ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS, AvroDeserializer.class.getName());
return new DefaultKafkaConsumerFactory<>(props);
Now, I've implemented my custom SeekToCurrentErrorHandler by extending SeekToCurrentErrorHandler as per the below thread but the record value is coming as null and the record key is not in a readable format. Please suggest me how can i get the record key and value?
How to capture the exception and message key when using ErrorHandlingDeserializer2 to handle exceptions during deserialization
Here is my custom SeekToCurrentErrorHandler code
#Component
public class MySeekToCurrentErrorHandler extends SeekToCurrentErrorHandler {
private final MyDeadLetterRecoverer deadLetterRecoverer;
#Autowired
public MySeekToCurrentErrorHandler(MyDeadLetterRecoverer deadLetterRecoverer) {
super(-1);
this.deadLetterRecoverer = deadLetterRecoverer;
}
#Override
public void handle(Exception thrownException, List<ConsumerRecord<?, ?>> data, Consumer<?, ?> consumer, MessageListenerContainer container) {
if (thrownException instanceof DeserializationException) {
//Improve to support multiple records
DeserializationException deserializationException = (DeserializationException) thrownException;
deadLetterRecoverer.accept(data.get(0), deserializationException);
ConsumerRecord<?, ?>. consumerRecord = data.get(0);
sout(consumerRecord.key());
sout(consumerRecord.value());
} else {
//Calling super method to let the 'SeekToCurrentErrorHandler' do what it is actually designed for
super.handle(thrownException, data, consumer, container);
}
}
}
If the key fails deserialization, the original byte[] can be obtained by calling getData() on the exception.
Similarly, if the value fails deserialization, use getData() to get the original data.
The DeadLetterPublishingRecoverer does this (since 2.3).
You can tell which of the key or value failed by calling isKey() on the exception.
EDIT
I was wrong, the key and value are available if the value or key failed.
This is written with Boot 2.3.4:
#SpringBootApplication
public class Application {
public static void main(String[] args) {
SpringApplication.run(Application.class, args);
}
#Bean
SeekToCurrentErrorHandler errorHandler(ProducerFactory<String, String> pf) {
Map<String, Object> configs = new HashMap<>(pf.getConfigurationProperties());
configs.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
configs.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, ByteArraySerializer.class);
ProducerFactory<byte[], byte[]> bytesPF = new DefaultKafkaProducerFactory<>(configs);
KafkaOperations<byte[], byte[]> template = new KafkaTemplate<>(bytesPF);
return new SeekToCurrentErrorHandler(new DeadLetterPublishingRecoverer(template),
new FixedBackOff(1000, 5));
}
#KafkaListener(id = "so64597061", topics = "so64597061",
properties = {
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG
+ ":org.springframework.kafka.support.serializer.ErrorHandlingDeserializer",
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG
+ ":org.springframework.kafka.support.serializer.ErrorHandlingDeserializer",
ErrorHandlingDeserializer.KEY_DESERIALIZER_CLASS
+ ":com.example.demo.Application$FailSometimesDeserializer",
ErrorHandlingDeserializer.VALUE_DESERIALIZER_CLASS
+ ":com.example.demo.Application$FailSometimesDeserializer"
})
public void listen(String val, #Header(name = KafkaHeaders.RECEIVED_MESSAGE_KEY) String key) {
System.out.println(key + ":" + val);
}
#KafkaListener(id = "so64597061.dlt", topics = "so64597061.DLT",
properties = {
ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG
+ ":org.apache.kafka.common.serialization.ByteArrayDeserializer",
ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG
+ ":org.apache.kafka.common.serialization.ByteArrayDeserializer"
})
public void dltListen(byte[] val, #Header(name = KafkaHeaders.RECEIVED_MESSAGE_KEY, required = false) byte[] key) {
String keyStr = key != null ? new String(key) : null;
String valStr = val != null ? new String(val) : null;
System.out.println("DLT:" + keyStr + ":" + valStr);
}
#Bean
public ApplicationRunner runner(KafkaTemplate<String, String> template) {
return args -> {
template.send("so64597061", "foo", "bar");
template.send("so64597061", "fail", "keyFailed");
template.send("so64597061", "valueFailed", "fail");
};
}
#Bean
public NewTopic topic() {
return TopicBuilder.name("so64597061").partitions(1).replicas(1).build();
}
#Bean
public NewTopic dlt() {
return TopicBuilder.name("so64597061.DLT").partitions(1).replicas(1).build();
}
public static class FailSometimesDeserializer implements Deserializer<byte[]> {
#Override
public void configure(Map<String, ?> configs, boolean isKey) {
}
#Override
public byte[] deserialize(String topic, byte[] data) {
return data;
}
#Override
public void close() {
}
#Override
public byte[] deserialize(String topic, Headers headers, byte[] data) {
String string = new String(data);
if ("fail".equals(string)) {
throw new RuntimeException("fail");
}
return data;
}
}
}
spring.kafka.consumer.auto-offset-reset=earliest
foo:bar
DLT:fail:keyFailed
DLT:valueFailed:fail

Kafka consumer can't connect to broker other than localhost:9092 using Spring Boot 2.2.0.M4

I'm using Spring Boot 2.2.0.M4 and Kafka 2.2.0 trying to build an application based on the sample at https://www.baeldung.com/spring-kafka. When I enable the listener for my topic, I get the following error on the consumer.
[AdminClient clientId=adminclient-2] Connection to node -1 (localhost/127.0.0.1:9092) could not be established. Broker may not be available.
The following is defined in my application properties.
kafka.bootstrapAddress=172.22.22.55:9092
Here's the #KafkaListener annotated method.
#KafkaListener(topics = "add_app", groupId = "foo")
public void listen(String message) {
System.out.println("Received Message in group foo: " + message);
}
Below is the Consumer configuration class that is referencing the kafka.bootstrapAddress value. It is logged properly.
#Configuration
#Slf4j
public class KafkaConsumerConfig {
#Value(value = "${kafka.bootstrapAddress}")
private String bootstrapAddress;
public ConsumerFactory<String, String> consumerFactory(String groupId) {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
log.info("Created {} using address {}.", this.getClass(), bootstrapAddress);
return new DefaultKafkaConsumerFactory<>(props);
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> fooKafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory("foo"));
return factory;
}
The solution to this is fairly simple. I just needed to add the following to the application.properties file.
spring.kafka.bootstrap-servers=174.22.22.55:9092
After looking at KafkaProperties.java, I found this line:
private List<String> bootstrapServers = new ArrayList<>(Collections.singletonList("localhost:9092"));
and this method actually builds them:
private Map<String, Object> buildCommonProperties() {
Map<String, Object> properties = new HashMap();
if (this.bootstrapServers != null) {
properties.put("bootstrap.servers", this.bootstrapServers);
}
if (this.clientId != null) {
properties.put("client.id", this.clientId);
}
properties.putAll(this.ssl.buildProperties());
if (!CollectionUtils.isEmpty(this.properties)) {
properties.putAll(this.properties);
}
return properties;
}
Since it's already predefined on the class, the broker initially defined on the KafkaConsumerConfig is not used.
Update
Adding the containerFactory attribute to the listener annotation also fixes it and removes the need for the change to application.properties.
#KafkaListener(topics = "add_app", groupId = "foo", containerFactory = "fooKafkaListenerContainerFactory")
public void listen(String message) {
System.out.println("Received Message in group foo: " + message);
}
In order to use your custom property kafka.bootstrapAddress you need to create #Bean KafkaAdmin. It has its own configuration class AdminClientConfig which is by default configured to connect to 127.0.0.1:9092. To override the configuration you have to use something like this:
#Value(value = "${kafka.bootstrapAddress}")
private String bootstrapAddress;
#Bean
public KafkaAdmin kafkaAdmin() {
Map<String, Object> configs = new HashMap<>();
configs.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapAddress);
return new KafkaAdmin(configs);
}

KafkaConsumer to read topic from beginning

I want to start consuming from beginning of the topic. I have set the property "AUTO_OFFSET_RESET_CONFIG" to earliest but it somehow still not reading from beginning.
Any thoughts if I missing anything? I am creating a new consumer group every time.
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory
= new ConcurrentKafkaListenerContainerFactory<>();
factory.setConsumerFactory(consumerFactory());
factory.setBatchListener(true);
return factory;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
return new DefaultKafkaConsumerFactory<>(getConsumerConfigs(false));
}
private Map<String, Object> getConsumerConfigs(boolean isEmbedded) {
Map<String, Object> props = new HashMap<>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, isEmbedded ? embeddedBootstrapServers : bootstrapServers);
props.put(ConsumerConfig.GROUP_ID_CONFIG, consumerGroupId + "temp");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords);
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, sessionTimeoutMillis);
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, heartbeatIntervalMillis);
return props;
}
That property only applies to new consumer groups that have never consumed. Use a ConsumerSeekAware message listener and you can call seekToBeginning for each assigned topic/partition.

Multiple consumers using spring kafka

I am looking to setup multiple listeners on a kafka topic inside my application. Below is my setup. it is supposed to be consumed by both the groups, but it is consumed by only one listener. What am i missing here?
#Bean
public Map<String, Object> consumerConfigs() {
Map<String, Object> props = new HashMap<String, Object>();
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.GROUP_ID_CONFIG, groupName);
return props;
}
#Bean
public ConsumerFactory<String, String> consumerFactory() {
ConsumerFactory<String, String> consumerFactory = new DefaultKafkaConsumerFactory(consumerConfigs());
return consumerFactory;
}
#Bean
public ConcurrentKafkaListenerContainerFactory<String, String> kafkaListenerContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConcurrency(100);
factory.setConsumerFactory(consumerFactory());
return factory;
}
#Bean("notificationFactory")
public ConcurrentKafkaListenerContainerFactory<String, String> notificationFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConcurrency(100);
factory.setConsumerFactory(consumerFactory());
return factory;
}
#Bean("insertContainerFactory")
public ConcurrentKafkaListenerContainerFactory<String, String> insertContainerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory = new ConcurrentKafkaListenerContainerFactory<>();
factory.setConcurrency(100);
factory.setConsumerFactory(consumerFactory());
return factory;
}
#KafkaListener(id = "insert_listener", topics = "${kafka.topic.readlocation}", group = "insert_listener", containerFactory = "insertContainerFactory")
public void receiveForInsert(String message) {
locationProcessor.insertLocationData(message);
}
#KafkaListener(id = "notification_listener", topics = "${kafka.topic.readlocation}", group = "notification_listener",containerFactory="notificationFactory")
public void receiveForNotification(String message) {
locationProcessor.processNotificationMessage(message);
}
Edit: Below is the code that worked
#KafkaListener(id = "insert_listener", topics = "${kafka.topic.readlocation}", groupId = "insert_listener")
public void receiveForInsert(String message) {
locationProcessor.insertLocationData(message);
}
You need a different group.id for each; the group property is not the group.id - see the javadocs. In the upcoming 1.3 release, there is a new groupId property and we also can use the id as a group if present.
For earlier version you need a different consumer factory for each.

Resources