Duplicate consumer metrics when concurrency is set to more than 1 - spring-kafka

I am using spring-kafka 2.2.8 and setting concurrency to n for my consumer and trying pause the consumer instance/thread to after that thread/instance consumed a pre-defined no of records(10k in this case). And then I'm trying to capture the consumer metrics to understand how fast my consumer can consume and process the records.
For example, I've produced 50k messages onto a topic and setting concurrency to 5, adding trying to pause the consumer threads after it consumed 10k records. Now, when i try capture the consumer metrics, I'm getting duplicate values and not sure which is correct. So, can someone help me understand, how can i get the correct metrics of my consumer?
Here is my consumer code
public static ThreadLocal<Integer> consumedMessages = new ThreadLocal<Integer>() {
#Override
protected Integer initialValue() {
return Integer.valueOf(0);
}
};
public static List<ConsumerMetrics> mtpConsumerMetricsList = new ArrayList<>();
int messageCount =10000;
#KafkaListener(id = "myConsumer", topics = "myTopic", concurrency = 5)
public void listen(String in) {
try {
consumedMessages.set(consumedMessages.get().intValue() + 1);
System.out.println(" ConsumerConsumedMessages " + consumedMessages.get());
if (consumedMessages.get() == messageCount) {
System.out.println("ATTENTION! ATTENTION! ATTENTION! Consumer Finished processing " + messageCount + " messages");
ConcurrentMessageListenerContainer concurrentMsgLstnrCntnr = (ConcurrentMessageListenerContainer) this.registry.getListenerContainer("myConsumer");
if (concurrentMsgLstnrCntnr != null) {
concurrentMsgLstnrCntnr.pause();
System.out.println("Pausing of mtpConsumer is done at " + LocalDateTime.now());
List<KafkaMessageListenerContainer> list = concurrentMsgLstnrCntnr.getContainers();
System.out.println("Containers list size "+list.size());
List<ConsumerMetrics> consumerMetricsList = new ArrayList<>();
list.forEach(childContainer -> {
ConsumerMetrics consumerMetrics = new ConsumerMetrics();
logger.logDebug(String.format("Child Listener Id is %s and assigned partitions are %s ",
childContainer.getListenerId(),
childContainer.getAssignedPartitions()),
this.getClass());
populateAndPrintContainerMetrics(childContainer.metrics(), consumerMetrics);
if (!consumerMetrics.getRecordsConsumedRatePerSec()
.equalsIgnoreCase("0.0") && !consumerMetrics.getTotalRecordsConsumed()
.equalsIgnoreCase("0.0")) {
consumerMetricsList.add(consumerMetrics);
}
});
mtpConsumerMetricsList.addAll(consumerMetricsList);
} else {
logger.logErrMsg("ERROR! ERROR! ERROR! concurrentMsgLstnrCntnr for Id myConsumer is null",
this.getClass());
}
}
} catch (Exception ex) {
ex.printStackTrace();
logger.logException(ex,
this.getClass());
}
}
private void populateAndPrintContainerMetrics(Map<String, Map<MetricName, ? extends Metric>> metrics, ConsumerMetrics consumerMetrics) {
metrics.entrySet()
.forEach(entry -> {
String topLevelMaetricKey = entry.getKey();
consumerMetrics.setConsumerId(topLevelMaetricKey);
System.out.println("metrics map entry key is " + topLevelMaetricKey);
entry.getValue()
.entrySet()
.forEach(innerMapEntry -> {
String metricKey = innerMapEntry.getKey()
.name();
String metricValue = String.valueOf(((Metric) innerMapEntry.getValue()).metricValue());
System.out.println(" metricKey is " + metricKey + " and metricValue is " + metricValue);
switch (metricKey) {
case "records-consumed-rate":
//The average number of records consumed per second
consumerMetrics.setRecordsConsumedRatePerSec(metricValue);
break;
case "records-consumed-total":
//The total number of records consumed
consumerMetrics.setTotalRecordsConsumed(metricValue);
break;
case "request-size-avg":
//The average size of requests sent
consumerMetrics.setRequestSizeAvg(metricValue);
break;
case "request-rate":
//The number of requests sent per second
consumerMetrics.setRequestRate(metricValue);
break;
case "request-total":
//The total number of requests sent
consumerMetrics.setRequestTotal(metricValue);
break;
case "fetch-rate":
//The number of fetch requests per second
consumerMetrics.setFetchRate(metricValue);
break;
case "fetch-total":
//The total number of fetch requests
consumerMetrics.setFetchTotal(metricValue);
break;
case "fetch-latency-max":
//The max time taken for any fetch request
consumerMetrics.setFetchLatencyMax(metricValue);
break;
case "records-per-request-avg":
//The average number of records in each request for a topic
consumerMetrics.setRecordsPerRequestAvg(metricValue);
break;
case "assigned-partitions":
//The number of partitions currently assigned to this consumer
consumerMetrics.setAssignedPartitions(metricValue);
break;
case "records-lag-max":
//The max lag of the partition
consumerMetrics.setRecordsLagMax(metricValue);
break;
}
});
});
}
Here is the result of the consumer metrics and you can clearly see more than one result is coming for the same thread:
consumerId is producer-perf-test-topic32-ClientId-0 and recordsConsumedRatePerSec is 68.71762342742362 and totalRecordsConsumed is 8715.0
consumerId is producer-perf-test-topic32-ClientId-1 and recordsConsumedRatePerSec is 154.52282723584165 and totalRecordsConsumed is 9064.0
consumerId is producer-perf-test-topic32-ClientId-2 and recordsConsumedRatePerSec is 150.11590645667144 and totalRecordsConsumed is 8807.0
consumerId is producer-perf-test-topic32-ClientId-3 and recordsConsumedRatePerSec is 164.1384476565027 and totalRecordsConsumed is 9641.0
consumerId is producer-perf-test-topic32-ClientId-4 and recordsConsumedRatePerSec is 166.65247526438583 and totalRecordsConsumed is 9786.0
consumerId is producer-perf-test-topic32-ClientId-0 and recordsConsumedRatePerSec is 68.71762342742362 and totalRecordsConsumed is 8715.0
consumerId is producer-perf-test-topic32-ClientId-1 and recordsConsumedRatePerSec is 154.5070230465021 and totalRecordsConsumed is 9064.0
consumerId is producer-perf-test-topic32-ClientId-2 and recordsConsumedRatePerSec is 150.10055561236663 and totalRecordsConsumed is 8807.0
consumerId is producer-perf-test-topic32-ClientId-3 and recordsConsumedRatePerSec is 164.16919252120016 and totalRecordsConsumed is 9641.0
consumerId is producer-perf-test-topic32-ClientId-4 and recordsConsumedRatePerSec is 166.63828627865 and totalRecordsConsumed is 9786.0
consumerId is producer-perf-test-topic32-ClientId-0 and recordsConsumedRatePerSec is 68.52444038373686 and totalRecordsConsumed is 8715.0
consumerId is producer-perf-test-topic32-ClientId-1 and recordsConsumedRatePerSec is 170.05067510118016 and totalRecordsConsumed is 10000.0
consumerId is producer-perf-test-topic32-ClientId-2 and recordsConsumedRatePerSec is 170.0420003740924 and totalRecordsConsumed is 10000.0
consumerId is producer-perf-test-topic32-ClientId-3 and recordsConsumedRatePerSec is 163.85390642261086 and totalRecordsConsumed is 9641.0
consumerId is producer-perf-test-topic32-ClientId-4 and recordsConsumedRatePerSec is 166.39178412935914 and totalRecordsConsumed is 9786.0

It's not clear what you are asking - you are running that code on all 5 consumer threads so you will get 5x5 = 25 lines (each one rendered 5 times).
Here, I only 5 lines, regardless which technique I use to get the metrics:
#SpringBootApplication
public class So62452699Application {
public static void main(String[] args) {
SpringApplication.run(So62452699Application.class, args);
}
#KafkaListener(id = "so62452699", topics = "so62452699", concurrency = "5")
public void listen(String in) {
System.out.println(in);
}
#Bean
public NewTopic topic() {
return TopicBuilder.name("so62452699").partitions(1).replicas(1).build();
}
#Bean
public ApplicationRunner runner(KafkaListenerEndpointRegistry registry) {
return args -> {
// this one's more concise
registry.getListenerContainer("so62452699").metrics().forEach((clientId, metrics) -> {
System.out.println(clientId + ": " + metrics);
});
// or
((ConcurrentMessageListenerContainer<?, ?>) registry.getListenerContainer("so62452699"))
.getContainers()
.forEach(container -> System.out.println(container.metrics()));
};
}
}

Related

Setting SSL Parameters on Apache http5 Client

I am upgrading from Apache httpcomponents 4 to version 5 in order to get http2/http1.1 support. I need to specify the ciphers my client offers. I assume that H2/1.1 ALPN is the default behavior for the AsyncHttpClient.
Here is my current code for the httpcomponents 4 client
// TLS
SSLConnectionSocketFactory sslConnectionFactory = new SSLConnectionSocketFactory(
SSLContexts.createDefault(),
new String[] { "TLSv1.2" },
new String[] {"TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256",
"TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA",
"TLS_RSA_WITH_AES_256_CBC_SHA"},
SSLConnectionSocketFactory.getDefaultHostnameVerifier());
// Proxy
HttpHost proxyhost = new HttpHost(proxyAddress, proxyPort);
HttpRoutePlanner routePlanner = new DefaultProxyRoutePlanner(proxyhost);
CredentialsProvider credentialsProvider = new BasicCredentialsProvider();
credentialsProvider.setCredentials(
new AuthScope(proxyAddress, proxyPort),
new UsernamePasswordCredentials(proxyUsername, proxyPassword)
);
httpClient = HttpClients.custom()
.setRoutePlanner(routePlanner)
.setSSLSocketFactory(sslConnectionFactory)
.setDefaultCredentialsProvider(credentialsProvider)
.setRedirectStrategy(new LaxRedirectStrategy())
.setDefaultCookieStore(cookieStore)
.build();
Everything seems to be roughly the same for creating the asyc client except specifying the SSL factory. So setting the TLS parameters appears to take a different route. I've spent about an hour looking for examples and documentation with no luck. Some examples show a class called TLSConfig, but I can't find any documentation on it.
Any help is greatly appreciated.
You need to build a custom TlsStrategy pretty much the same way as shown in the "Custom SSL context" example on the project website [1]
TLSConfig will be available as of 5.2 release which is going to go BETA soon.
final TlsStrategy tlsStrategy = ClientTlsStrategyBuilder.create()
.setTlsVersions(TLS.V_1_2)
.setCiphers("TLS_AES_128_GCM_SHA256", "TLS_AES_256_GCM_SHA384",
"TLS_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256", "TLS_ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"TLS_ECDHE_RSA_WITH_AES_256_GCM_SHA384", "TLS_ECDHE_ECDSA_WITH_CHACHA20_POLY1305_SHA256",
"TLS_ECDHE_RSA_WITH_CHACHA20_POLY1305_SHA256", "TLS_ECDHE_RSA_WITH_AES_128_CBC_SHA",
"TLS_ECDHE_RSA_WITH_AES_256_CBC_SHA", "TLS_RSA_WITH_AES_128_GCM_SHA256",
"TLS_RSA_WITH_AES_256_GCM_SHA384", "TLS_RSA_WITH_AES_128_CBC_SHA",
"TLS_RSA_WITH_AES_256_CBC_SHA")
.build();
final PoolingAsyncClientConnectionManager cm = PoolingAsyncClientConnectionManagerBuilder.create()
.setTlsStrategy(tlsStrategy)
.build();
try (final CloseableHttpAsyncClient client = HttpAsyncClients.custom()
.setConnectionManager(cm)
.build()) {
client.start();
final HttpHost target = new HttpHost("https", "httpbin.org");
final HttpClientContext clientContext = HttpClientContext.create();
final SimpleHttpRequest request = SimpleRequestBuilder.get()
.setHttpHost(target)
.setPath("/")
.build();
System.out.println("Executing request " + request);
final Future<SimpleHttpResponse> future = client.execute(
SimpleRequestProducer.create(request),
SimpleResponseConsumer.create(),
clientContext,
new FutureCallback<SimpleHttpResponse>() {
#Override
public void completed(final SimpleHttpResponse response) {
System.out.println(request + "->" + new StatusLine(response));
final SSLSession sslSession = clientContext.getSSLSession();
if (sslSession != null) {
System.out.println("SSL protocol " + sslSession.getProtocol());
System.out.println("SSL cipher suite " + sslSession.getCipherSuite());
}
System.out.println(response.getBody());
}
#Override
public void failed(final Exception ex) {
System.out.println(request + "->" + ex);
}
#Override
public void cancelled() {
System.out.println(request + " cancelled");
}
});
future.get();
System.out.println("Shutting down");
client.close(CloseMode.GRACEFUL);
[1] https://hc.apache.org/httpcomponents-client-5.1.x/examples-async.html

Kafka exactly once messaging test with "consume-transform-produce" Integration test

I am writing testcase to test the my application's consume-transform-produce loop of the Kafka. So effectively I am consuming from a sourceTopic-processing-sendMessage to Destination topic. I am writing these testcases to prove the exactly once messaging with Kafka as I will add other failure cases later.
Here is my configuration:
private Map<String, Object> consConfigProps(boolean txnEnabled) {
Map<String, Object> props = new HashMap<>(
KafkaTestUtils.consumerProps(AB_CONSUMER_GROUP_ID, "false", kafkaBroker));
props.put(ConsumerConfig.GROUP_ID_CONFIG, AB_CONSUMER_GROUP_ID);
props.put(JsonDeserializer.TRUSTED_PACKAGES, "*");
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
props.put(ConsumerConfig.ISOLATION_LEVEL_CONFIG, "read_committed");
props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "latest");
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class);
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, JsonDeserializer.class);
return props;
}
private Map<String, Object> prodConfigProps(boolean txnEnabled) {
Map<String, Object> props = new HashMap<>(KafkaTestUtils.producerProps(kafkaBroker));
props.put(JsonDeserializer.TRUSTED_PACKAGES, "*");
props.put(ProducerConfig.CLIENT_ID_CONFIG, "client-" + UUID.randomUUID().toString());
props.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
props.put(ProducerConfig.MAX_IN_FLIGHT_REQUESTS_PER_CONNECTION, "3");
props.put(ProducerConfig.RETRIES_CONFIG, "3");
props.put(ProducerConfig.ACKS_CONFIG, "all");
props.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG,
"prod-txn-" + UUID.randomUUID().toString());
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class);
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, JsonSerializer.class);
return props;
}
public KafkaMessageListenerContainer<String, NormalUser> fetchContainer() {
ContainerProperties containerProperties = new ContainerProperties(ABTOPIC, XYTOPIC, PATOPIC);
containerProperties.setGroupId("groupId-10001");
containerProperties.setAckMode(AckMode.MANUAL);
containerProperties.setSyncCommits(true);
containerProperties.setSyncCommitTimeout(Duration.ofMillis(5000));
containerProperties.setTransactionManager(kafkaTransactionManager());
KafkaMessageListenerContainer<String, NormalUser> kafkaMessageListContainer = new KafkaMessageListenerContainer<>(
consumerFactory(), containerProperties);
kafkaMessageListContainer.setupMessageListener(new AcknowledgingMessageListener<String, NormalUser>() {
#Override
public void onMessage(ConsumerRecord<String, NormalUser> record, Acknowledgment acknowledgment) {
log.debug("test-listener received message='{}'", record.toString());
records.add(record);
acknowledgment.acknowledge();
}
});
return kafkaMessageListContainer;
}
#Test
public void testProducerABSuccess() throws InterruptedException, IOException {
NormalUser userObj = new NormalUser(ABTypeGood,
Double.valueOf(Math.random() * 10000).longValue(),
"Blah" + String.valueOf(Math.random() * 10));
sendMessage(XYTOPIC, "AB-id", userObj);
try {
ConsumerRecords<String, NormalUser> records;
parserConsumer.subscribe(Collections.singletonList(XYTOPIC));
Map<TopicPartition, OffsetAndMetadata> currentOffsets = new LinkedHashMap<>();
// Check for messages
parserProducer.beginTransaction();
records = parserConsumer.poll(Duration.ofSeconds(3));
assertThat(1).isEqualTo(records.count()); // --> this asserts passes like 50% of the time.
for (ConsumerRecord<String, NormalUser> record : records) {
assertEquals(record.key(), "AB-id");
assertEquals(record.value(), userObj);
currentOffsets.put(new TopicPartition(record.topic(), record.partition()),
new OffsetAndMetadata(record.offset()));
}
parserProducer.send(new ProducerRecord<String, NormalUser>(ABTOPIC, "AB-id", userObj));
parserProducer.sendOffsetsToTransaction(currentOffsets, AB_CONSUMER_GROUP_ID);
parserProducer.commitTransaction();
} catch (ProducerFencedException | OutOfOrderSequenceException | AuthorizationException e) {
parserProducer.close();
} catch (final KafkaException e) {
parserProducer.abortTransaction();
}
ConsumerRecords<String, NormalUser> records;
loadConsumer.subscribe(Collections.singletonList(ABTOPIC));
records = loadConsumer.poll(Duration.ofSeconds(3));
assertThat(1).isEqualTo(records.count()); //--> this assert fails all the time.
for (ConsumerRecord<String, NormalUser> record : records) {
assertEquals(record.key(), "AB-id");
assertEquals(record.value(), userObj);
}
}
My issue is that the above testcase "testProducerABSuccess" is not consistent and the asserts fails sometimes and sometimes they pass. I have not been able to figure out why they are so inconsistent. What is wrong with the above.
Edits: 16-12:
Tested with consumerconfig.Auto_Offset_Reset_config-earliest no change. The first assert passes like 70% of the time. The second assert fails all the time (0% pass rate).
Which assertion fails? If it's assertThat(1).isEqualTo(records.count());, it's probably because you are setting auto.offset.reset to latest. It needs to be earliest to avoid a race condition whereby the record is sent before the consumer is assigned the partitition(s).

Requeue the failed record in the kafka topic

I have a use case where the records are to be persisted in table which has foriegn key to itself.
Example:
zObject
{
uid,
name,
parentuid
}
parent uid also present in same table and any object which has non existent parentuid will be failed to persist .
At times the records are placed in the topic such a way that the dependency is not at the head of the list , instead it will be after the dependent records are present
This will cause failure in process the record . I have used the seektocurrenterrorhandler which actually retries the same failed records for the given backoff and it fails since the dependency is not met .
Is there any way where I can requeue the record at the end of the topic so that dependency is met ? If it fails for day 5 times even after enqueue , the records can be pushed to a DLT .
Thanks,
Rajasekhar
There is nothing built in; you can, however, use a custom destination resolver in the DeadLetterPublishingRecoverer to determine which topic to publish to, based on a header in the failed record.
See https://docs.spring.io/spring-kafka/docs/2.6.2/reference/html/#dead-letters
EDIT
#SpringBootApplication
public class So64646996Application {
public static void main(String[] args) {
SpringApplication.run(So64646996Application.class, args);
}
#Bean
public NewTopic topic() {
return TopicBuilder.name("so64646996").partitions(1).replicas(1).build();
}
#Bean
public NewTopic dlt() {
return TopicBuilder.name("so64646996.DLT").partitions(1).replicas(1).build();
}
#Bean
public ErrorHandler eh(KafkaOperations<String, String> template) {
return new SeekToCurrentErrorHandler(new DeadLetterPublishingRecoverer(template,
(rec, ex) -> {
org.apache.kafka.common.header.Header retries = rec.headers().lastHeader("retries");
if (retries == null) {
retries = new RecordHeader("retries", new byte[] { 1 });
rec.headers().add(retries);
}
else {
retries.value()[0]++;
}
return retries.value()[0] > 5
? new TopicPartition("so64646996.DLT", rec.partition())
: new TopicPartition("so64646996", rec.partition());
}), new FixedBackOff(0L, 0L));
}
#KafkaListener(id = "so64646996", topics = "so64646996")
public void listen(String in,
#Header(KafkaHeaders.OFFSET) long offset,
#Header(name = "retries", required = false) byte[] retry) {
System.out.println(in + "#" + offset + ":" + retry[0]);
throw new IllegalStateException();
}
#KafkaListener(id = "so64646996.DLT", topics = "so64646996.DLT")
public void listenDLT(String in,
#Header(KafkaHeaders.OFFSET) long offset,
#Header(name = "retries", required = false) byte[] retry) {
System.out.println("DLT: " + in + "#" + offset + ":" + retry[0]);
}
#Bean
public ApplicationRunner runner(KafkaTemplate<String, String> template) {
return args -> System.out.println(template.send("so64646996", "foo").get(10, TimeUnit.SECONDS)
.getRecordMetadata());
}
}

Scanning for beacons using universal beacon library

I am trying to implement a mobile app (on iPhone) that just scans for beacons and displays a notification for each one. I am a noob with beacons/bluetooth.
I implemented it using the universal beacon library (https://github.com/andijakl/universal-beacon) and i've attached my ios bluetooth implementation.
my problem is that i receive about 12 beacon added events even though i only have two (I assume it is picking up all my other bluetooth devices). I also only receive the local name in the advertisement_received event.
My questions are:
how do I distinguish that it is a beacon being added?
how do i get the unique id an url from the beacon? (they are kontakt beacons)
Thanks for any help.
My beacon service:
public BeaconService()
{
// get the platform-specific provider
var provider = RootWorkItem.Services.Get<IBluetoothPacketProvider>();
if (null != provider)
{
// create a beacon manager, giving it an invoker to marshal collection changes to the UI thread
_manager = new BeaconManager(provider, Device.BeginInvokeOnMainThread);
_manager.Start();
_manager.BeaconAdded += _manager_BeaconAdded;
provider.AdvertisementPacketReceived += Provider_AdvertisementPacketReceived;
}
}
My ios bluetooth implementation:
public class iOSBluetoothPacketProvider : CocoaBluetoothPacketProvider { }
public class CocoaBluetoothPacketProvider : NSObject, IBluetoothPacketProvider
{
public event EventHandler<BLEAdvertisementPacketArgs> AdvertisementPacketReceived;
public event EventHandler<BTError> WatcherStopped;
private readonly CocoaBluetoothCentralDelegate centralDelegate;
private readonly CBCentralManager central;
public CocoaBluetoothPacketProvider()
{
Debug.WriteLine("BluetoothPacketProvider()");
centralDelegate = new CocoaBluetoothCentralDelegate();
central = new CBCentralManager(centralDelegate, null);
}
private void ScanCallback_OnAdvertisementPacketReceived(object sender, BLEAdvertisementPacketArgs e)
{
AdvertisementPacketReceived?.Invoke(this, e);
}
public void Start()
{
Debug.WriteLine("BluetoothPacketProvider:Start()");
centralDelegate.OnAdvertisementPacketReceived += ScanCallback_OnAdvertisementPacketReceived;
// Wait for the PoweredOn state
//if(CBCentralManagerState.PoweredOn == central.State) {
// central.ScanForPeripherals(peripheralUuids: new CBUUID[] { },
// options: new PeripheralScanningOptions { AllowDuplicatesKey = false });
//}
}
public void Stop()
{
Debug.WriteLine("BluetoothPacketProvider:Stop()");
centralDelegate.OnAdvertisementPacketReceived -= ScanCallback_OnAdvertisementPacketReceived;
central.StopScan();
WatcherStopped?.Invoke(sender: this, e: new BTError(BTError.BluetoothError.Success));
}
}
internal class CocoaBluetoothCentralDelegate : CBCentralManagerDelegate
{
public event EventHandler<BLEAdvertisementPacketArgs> OnAdvertisementPacketReceived;
#region CBCentralManagerDelegate
public override void ConnectedPeripheral(CBCentralManager central, CBPeripheral peripheral)
{
Debug.WriteLine($"ConnectedPeripheral(CBCentralManager central, CBPeripheral {peripheral})");
}
public override void DisconnectedPeripheral(CBCentralManager central, CBPeripheral peripheral, NSError error)
{
Debug.WriteLine($"DisconnectedPeripheral(CBCentralManager central, CBPeripheral {peripheral}, NSError {error})");
}
public override void DiscoveredPeripheral(CBCentralManager central, CBPeripheral peripheral, NSDictionary advertisementData, NSNumber RSSI)
{
Debug.WriteLine($"Cocoa peripheral {peripheral}");
Debug.WriteLine($"Cocoa advertisementData {advertisementData}");
Debug.WriteLine($"Cocoa RSSI {RSSI}");
var bLEAdvertisementPacket = new BLEAdvertisementPacket()
{
Advertisement = new BLEAdvertisement()
{
LocalName = peripheral.Name,
ServiceUuids = new List<Guid>(),
DataSections = new List<BLEAdvertisementDataSection>(),
ManufacturerData = new List<BLEManufacturerData>()
},
AdvertisementType = BLEAdvertisementType.ScanResponse,
BluetoothAddress = (ulong)peripheral.Identifier.GetHashCode(),
RawSignalStrengthInDBm = RSSI.Int16Value,
Timestamp = DateTimeOffset.Now
};
//https://developer.apple.com/documentation/corebluetooth/cbadvertisementdataserviceuuidskey
//if (advertisementData.ContainsKey(CBAdvertisement.DataServiceUUIDsKey))
//{
// bLEAdvertisementPacket.Advertisement.ServiceUuids.Add(
// item: new BLEManufacturerData(packetType: BLEPacketType.UUID16List,
// data: (advertisementData[CBAdvertisement.DataServiceUUIDsKey])));
//}
//https://developer.apple.com/documentation/corebluetooth/cbadvertisementdataservicedatakey
//if (advertisementData.ContainsKey(CBAdvertisement.DataServiceDataKey))
//{
// bLEAdvertisementPacket.Advertisement.DataSections.Add(
// item: new BLEManufacturerData(packetType: BLEPacketType.ServiceData,
// data: advertisementData[CBAdvertisement.DataServiceDataKey]));
//}
//https://developer.apple.com/documentation/corebluetooth/cbadvertisementdatamanufacturerdatakey
if (advertisementData.ContainsKey(CBAdvertisement.DataManufacturerDataKey))
{
bLEAdvertisementPacket.Advertisement.ManufacturerData.Add(
item: new BLEManufacturerData(packetType: BLEPacketType.ManufacturerData,
data: (advertisementData[CBAdvertisement.DataManufacturerDataKey]
as NSData).ToArray()));
}
// Missing CBAdvertisement.DataTxPowerLevelKey
var bLEAdvertisementPacketArgs = new BLEAdvertisementPacketArgs(data: bLEAdvertisementPacket);
OnAdvertisementPacketReceived?.Invoke(this, bLEAdvertisementPacketArgs);
}
public override void FailedToConnectPeripheral(CBCentralManager central, CBPeripheral peripheral, NSError error)
{
Debug.WriteLine($"FailedToConnectPeripheral(CBCentralManager central, CBPeripheral {peripheral}, NSError {error})");
}
public override void UpdatedState(CBCentralManager central)
{
switch (central.State)
{
case CBCentralManagerState.Unknown:
Debug.WriteLine("CBCentralManagerState.Unknown");
break;
case CBCentralManagerState.Resetting:
Debug.WriteLine("CBCentralManagerState.Resetting");
break;
case CBCentralManagerState.Unsupported:
Debug.WriteLine("CBCentralManagerState.Unsupported");
break;
case CBCentralManagerState.Unauthorized:
Debug.WriteLine("CBCentralManagerState.Unauthorized");
break;
case CBCentralManagerState.PoweredOff:
Debug.WriteLine("CBCentralManagerState.PoweredOff");
break;
case CBCentralManagerState.PoweredOn:
Debug.WriteLine("CBCentralManagerState.PoweredOn");
central.ScanForPeripherals(peripheralUuids: new CBUUID[] { },
options: new PeripheralScanningOptions { AllowDuplicatesKey = true });
break;
default:
throw new NotImplementedException();
}
}
public override void WillRestoreState(CBCentralManager central, NSDictionary dict)
{
Debug.WriteLine($"WillRestoreState(CBCentralManager central, NSDictionary {dict})");
}
#endregion CBCentralManagerDelegate
}
So in case anyone is looking for this. The universal beacon library does not have an ios implementation that converts the ios packets to the universal packets. This need to be implemented.
how do I distinguish that it is a beacon being added?
I look for the Eddystone packets and if found I add to the observable list.
how do i get the unique id an url from the beacon? (they are kontakt beacons)
You need to loop through the advertisementData sent with the advertisement and create a BLEAdvertisementDataSection. copy the frame data as NSData.

CSV File Not Being Read from Servlet

As the title says, I have a CSV file that will not load from within my web application. I am using Netbeans to build the project.
Whenever I launch the project from Netbeans, it works like it should however when I take the war file and try to deploy it from within the Glassfish interface it shows the variables as undefined which tells me that it is not reading the file. Screenshots below show what is happening and my folder structure.
I have read many posts here and #BalusC has some great information here, but its not working for me and I believe this is somehow my fault, but I need a bit more specific help here than just reading another post.
I have put the CSV file that I am intending to load into the /src/main/resources folder as noted here by BalusC. The code I am using to load the file is as follows.
As a side note, I have a JSP that I am using to check the location and access to the file. The JSP can access and display the file without any problems when the application is deployed manually.
Edit: I ran a debug and could not find anything wrong, so I ran glassfish in verbose mode and loaded the page, once the page was up, it started reading from the file and sending the data but still shows "undefined" in all fields.
Here is the output data from running glassfish in verbose mode.
[#|2017-05-05T16:34:37.609+0900|INFO|glassfish 4.1|DukeETFServlet|_ThreadID=33;_ThreadName=http-listener-1(3);_TimeMillis=1493969677609;_LevelValue=800;|
Connection open.|#]
[#|2017-05-05T16:34:38.014+0900|INFO|glassfish 4.1|DukeETFServlet|_ThreadID=109;_ThreadName=__ejb-thread-pool3;_TimeMillis=1493969678014;_LevelValue=800;|
Sent: ABRN / Arbor Realty Trust 7.375% Senior / 25.32 / 25.11 / 25.24 / 12000 / 24.27 / 26.15 / Fri May 05 16:34:38 JST 2017|#]
[#|2017-05-05T16:34:38.016+0900|INFO|glassfish 4.1|DukeETFServlet|_ThreadID=109;_ThreadName=__ejb-thread-pool3;_TimeMillis=1493969678016;_LevelValue=800;|
Connection closed.|#]
[#|2017-05-05T16:34:38.024+0900|INFO|glassfish 4.1|DukeETFServlet|_ThreadID=34;_ThreadName=http-listener-1(4);_TimeMillis=1493969678024;_LevelValue=800;|
Connection open.|#]
[#|2017-05-05T16:34:38.029+0900|INFO|glassfish 4.1|DukeETFServlet|_ThreadID=119;_ThreadName=__ejb-thread-pool4;_TimeMillis=1493969678029;_LevelValue=800;|
Sent: ABT / Abbott Laboratories / 44.01 / 43.60 / 43.65 / 7487400 / 36.76 / 45.84 / Fri May 05 16:34:38 JST 2017|#]
Here is the data for loading the file.
Servlet
#WebServlet(urlPatterns={"/dukeetf"}, asyncSupported=true)
public class DukeETFServlet extends HttpServlet {
private static final Logger logger = Logger.getLogger("DukeETFServlet");
private static final long serialVersionUID = 2114153638027156979L;
private Queue<AsyncContext> requestQueue;
#EJB private PriceVolumeBean pvbean;
#Override
public void init(ServletConfig config) {
/* Queue for requests */
requestQueue = new ConcurrentLinkedQueue<>();
/* Register with the bean that provides price/volume updates */
pvbean.registerServlet(this);
}
/* PriceVolumeBean calls this method every second to send updates */
public void send(String ticker, String name, float highPrice, float lowPrice,
float closingPrice, int volume, float fiftyTwoWeekHigh, float fiftyTwoWeekLow,
String currentTime) {
/* Send update to all connected clients */
for (AsyncContext acontext : requestQueue) {
try {
String msg = String.format("%s / %s / %.2f / %.2f / %.2f / %d /"
+ " %.2f / %.2f / %s",
ticker, name, highPrice, lowPrice, closingPrice, volume,
fiftyTwoWeekHigh, fiftyTwoWeekLow, currentTime);
PrintWriter writer = acontext.getResponse().getWriter();
writer.write(msg);
logger.log(Level.INFO, "Sent: {0}", msg);
/* Close the connection
* The client (JavaScript) makes a new one instantly */
acontext.complete();
} catch (IOException ex) {
logger.log(Level.INFO, ex.toString());
}
}
}
/* Service method */
#Override
public void doGet(HttpServletRequest request, HttpServletResponse response)
throws ServletException, IOException {
response.setContentType("text/html");
/* Put request in async mode. */
final AsyncContext acontext = request.startAsync();
/* Remove from the queue when done */
acontext.addListener(new AsyncListener() {
#Override
public void onComplete(AsyncEvent ae) throws IOException {
requestQueue.remove(acontext);
logger.log(Level.INFO, "Connection Being Closed.");
}
#Override
public void onTimeout(AsyncEvent ae) throws IOException {
requestQueue.remove(acontext);
logger.log(Level.INFO, "Connection Has Timed Out.");
}
#Override
public void onError(AsyncEvent ae) throws IOException {
requestQueue.remove(acontext);
logger.log(Level.INFO, "Connection error.");
}
#Override
public void onStartAsync(AsyncEvent ae) throws IOException { }
});
/* Add to the queue */
requestQueue.add(acontext);
logger.log(Level.INFO, "Connection Being Opened.");
}
}
Class to get information from CSV
//Get Stock Data From CSV File
public static ArrayList<Stock> getListOfStocks() throws IOException {
ArrayList<Stock> stocks = new ArrayList();
ClassLoader classLoader =
Thread.currentThread().getContextClassLoader();
InputStream is =
StockService.class.getResourceAsStream("/stockdata.csv");
// create an instance of BufferedReader
// using try with resource, Java 7 feature to close resources
try (CSVReader reader = new CSVReader(new InputStreamReader(is))) {
// read the first line from the text file
String[] nextLine;
reader.readNext();
// loop until all lines are read
while ((nextLine = reader.readNext()) != null) {
Stock newStock = new Stock(nextLine[0], nextLine[1],
Float.valueOf(nextLine[2]), Float.valueOf(nextLine[3]),
Float.valueOf(nextLine[4]), Integer.valueOf(nextLine[5]),
Float.valueOf(nextLine[6]), Float.valueOf(nextLine[7]));
stocks.add(newStock);
}
}
return stocks;
}
Bean that retrieves and sends information
/* Updates price and volume information every second */
#Startup
#Singleton
public class PriceVolumeBean {
/* Use the container's timer service */
#Resource TimerService tservice;
private DukeETFServlet servlet;
//Set Variable for Counter
private int i = 0;
//Set date time variable
String currentTime;
//Set Variables for Stock Data
private String ticker;
private String name;
private float highPrice;
private float lowPrice;
private float closingPrice;
private int volume;
private float fiftyTwoWeekHigh;
private float fiftyTwoWeekLow;
private static final Logger logger = Logger.getLogger("PriceVolumeBean");
#PostConstruct
public void init() {
/* Intialize the EJB and create a timer */
logger.log(Level.INFO, "Initializing EJB.");
servlet = null;
tservice.createIntervalTimer(2000, 2000, new TimerConfig());
}
public void registerServlet(DukeETFServlet servlet) {
/* Associate a servlet to send updates to */
this.servlet = servlet;
}
#Timeout
public void timeout() throws IOException {
// Update Date
Date date = new Date();
// Set stock variables //
ticker = StockService.getListOfStocks().get(i).getTicker();
name = StockService.getListOfStocks().get(i).getName();
highPrice = StockService.getListOfStocks().get(i).getHighPrice();
lowPrice = StockService.getListOfStocks().get(i).getLowPrice();
closingPrice = StockService.getListOfStocks().get(i).getClosingPrice();
volume = StockService.getListOfStocks().get(i).getVolume();
fiftyTwoWeekHigh = StockService.getListOfStocks().get(i).getFiftyTwoWeekHigh();
fiftyTwoWeekLow = StockService.getListOfStocks().get(i).getFiftyTwoWeekLow();
currentTime = date.toString();
// Send updated information
if (servlet != null)
servlet.send(ticker, name, highPrice, lowPrice, closingPrice,
volume, fiftyTwoWeekHigh, fiftyTwoWeekLow, currentTime);
// Counter that keeps from going beyond size of arraylist
i++;
if (i == 100) {
i = 0;
}
}
}

Resources