The arithmetic operation can overflow is it possible to cause an arithmetic overflow? - math

I wanted some advice for the following contract function giving a possible overflow. Assert or require input spit back "ParserError: Expected ',' but got identifier
assert (uint i = 0; i < _index + 1; i++) {"
I am leaning towards the require implementation, giving the safe math library is already imported at the beginning of the contract. I have seen many different contracts with the same implementation but have some issues determining the correct approach. thanks so much for any help.
The arithmetic operation can overflow.
It is possible to cause an arithmetic overflow. Prevent the overflow by constraining inputs using the required () statement or the OpenZeppelin SafeMath library for integer arithmetic operations. Refer to the transaction trace generated for this issue to reproduce the overflow
I will attach as well the reproduction the for the vulnerability
[Instructions to reproduce this vulnerability (Test Case 1)]
contract FreezableToken is StandardToken {
// freezing chains
mapping (bytes32 => uint64) internal chains;
// freezing amounts for each chain
mapping (bytes32 => uint) internal freezings;
// total freezing balance per address
mapping (address => uint) internal freezingBalance;
event Freezed(address indexed to, uint64 release, uint amount);
event Released(address indexed owner, uint amount);
/**
* #dev Gets the balance of the specified address include freezing tokens.
* #param _owner The address to query the the balance of.
* #return An uint256 representing the amount owned by the passed address.
*/
function balanceOf(address _owner) public view returns (uint256 balance) {
return super.balanceOf(_owner) + freezingBalance[_owner];
}
/**
* #dev Gets the balance of the specified address without freezing tokens.
* #param _owner The address to query the the balance of.
* #return An uint256 representing the amount owned by the passed address.
*/
function actualBalanceOf(address _owner) public view returns (uint256 balance) {
return super.balanceOf(_owner);
}
function freezingBalanceOf(address _owner) public view returns (uint256 balance) {
return freezingBalance[_owner];
}
/**
* #dev gets freezing count
* #param _addr Address of freeze tokens owner.
*/
function freezingCount(address _addr) public view returns (uint count) {
uint64 release = chains[toKey(_addr, 0)];
while (release != 0) {
count++;
release = chains[toKey(_addr, release)];
}
}
/**
* #dev gets freezing end date and freezing balance for the freezing portion specified by index.
* #param _addr Address of freeze tokens owner.
* #param _index Freezing portion index. It ordered by release date descending.
*/
function getFreezing(address _addr, uint _index) public view returns (uint64 _release, uint _balance) {
for (uint i = 0; i < _index + 1; i++) { **<- Error Here ``< _index + 1; i++)``**
_release = chains[toKey(_addr, _release)];
if (_release == 0) {
return;
}
}
_balance = freezings[toKey(_addr, _release)];
}
/**
* #dev freeze your tokens to the specified address.
* Be careful, gas usage is not deterministic,
* and depends on how many freezes _to address already has.
* #param _to Address to which token will be freeze.
* #param _amount Amount of token to freeze.
* #param _until Release date, must be in future.
*/
function freezeTo(address _to, uint _amount, uint64 _until) public {
require(_to != address(0));
require(_amount <= balances[msg.sender]);
balances[msg.sender] = balances[msg.sender].sub(_amount);
bytes32 currentKey = toKey(_to, _until);
freezings[currentKey] = freezings[currentKey].add(_amount);
freezingBalance[_to] = freezingBalance[_to].add(_amount);
freeze(_to, _until);
emit Transfer(msg.sender, _to, _amount);
emit Freezed(_to, _until, _amount);
}
/**
* #dev release first available freezing tokens.
*/
function releaseOnce() public {
bytes32 headKey = toKey(msg.sender, 0);
uint64 head = chains[headKey];
require(head != 0);
require(uint64(block.timestamp) > head);
bytes32 currentKey = toKey(msg.sender, head);
uint64 next = chains[currentKey];
uint amount = freezings[currentKey];
balances[msg.sender] = balances[msg.sender].add(amount);
freezingBalance[msg.sender] = freezingBalance[msg.sender].sub(amount);
if (next == 0) {
} else {
chains[headKey] = next;
}
emit Released(msg.sender, amount);
}
/**
* #dev release all available for release freezing tokens. Gas usage is not deterministic!
* #return how many tokens was released
*/
function releaseAll() public returns (uint tokens) {
uint release;
uint balance;
(release, balance) = getFreezing(msg.sender, 0);
while (release != 0 && block.timestamp > release) {
releaseOnce();
tokens += balance;
(release, balance) = getFreezing(msg.sender, 0);
}
}
function toKey(address _addr, uint _release) internal pure returns (bytes32 result) {
// WISH masc to increase entropy
result = 0x5749534800000000000000000000000000000000000000000000000000000000;
assembly {
result := or(result, mul(_addr, 0x10000000000000000))
result := or(result, and(_release, 0xffffffffffffffff))
}
}
function freeze(address _to, uint64 _until) internal {
require (_until > block.timestamp);
bytes32 key = toKey(_to, _until);
bytes32 parentKey = toKey(_to, uint64(0));
uint64 next = chains[parentKey];
if (next == 0) {
chains[parentKey] = _until;
return;
}
bytes32 nextKey = toKey(_to, next);
uint parent;
while (next != 0 && _until > next) {
parent = next;
parentKey = nextKey;
next = chains[nextKey];
nextKey = toKey(_to, next);
}
if (_until == next) {
return;
}
if (next != 0) {
chains[key] = next;
}
chains[parentKey] = _until;
}
}```
[1]: https://i.stack.imgur.com/ayg2D.png

Based on the context, I'm assuming that the error message "The arithmetic operation can overflow." is from a static analysis tool.
Assuming that you're using Solidity version lower than 0.8.0, the for loop definition is theoretically vulnerable to integer overflow. But only if the _index is 2^256, the max value of uint. This value would make the _index + 1 expression to overflow.
It's not sufficient to just import the SafeMath library. You also need to use its functions instead of the native arithmetic operations to prevent overflow.
contract FreezableToken is StandardToken {
// allows to use functions of the library on `uint` type
using SafeMath for uint;
function getFreezing() public {
// use the `add()` function of the library instead of the `+` operation
for (uint i = 0; i < _index.add(1); i = i.add(1)) {
Or upgrade to Solidity version 0.8+ that checks for overflow on the language level, so that you won't have to use the SafeMath library.

Related

Factorize code with custom functions in Firebase Cloud Function

I want to factorize my code in Cloud Functions in order to improve readability and maintenance. The code below works but after waiting for all Promises to complete with Promises.all(), the code timeout.
The things I don't understand is that :
It works great and complete without timeout when toiletJsonObject["fields"]["adresse"] = formatAddress(toiletJsonObject["fields"]["adresse"]) is commented
If it works without the line above, the timeout should be due to the formatAddress() function. However, this function in not an async one and just return a string synchronously. Maybe that's what I misunderstand.
So my questions are :
How to correct my code to avoid timeout?
what's the best way to factorize code with custom functions that are only accessible inside the file and therefore does not need export ?
The entire code :
import * as functions from "firebase-functions";
import * as admin from "firebase-admin";
import fetch from "node-fetch";
admin.initializeApp();
const db = admin.firestore();
export const tempoCF = functions.firestore.document("/tempo/{docId}").onCreate(async () => {
console.log("onCreate")
const settings = { method: "Get" }
const metaUrl = "https://opendata.paris.fr/api/datasets/1.0/sanisettesparis/"
const toiletUpdateDateRef = db.collection('toilets').doc("updateDate")
try {
// Get meta data to check last update date
const metaResponse = await fetch(metaUrl, settings)
const metaJson = await metaResponse.json()
const metaUpdateDate = metaJson["metas"]["modified"]
const lastUpdatedDateDoc = await toiletUpdateDateRef.get()
if (!lastUpdatedDateDoc.exists) {
console.log("No existing date document, create one and add last update date : " + metaUpdateDate)
await fetchDataFromURL()
return toiletUpdateDateRef.set({ "lastUpdateDate": metaUpdateDate })
} else {
const lastUpdateDate = lastUpdatedDateDoc.data()["lastUpdateDate"]
// If date from meta data newer that saved date : get data and update
if (new Date(lastUpdateDate) < new Date(metaUpdateDate)) {
console.log("New data available, update database")
await fetchDataFromURL()
return toiletUpdateDateRef.set({ "lastUpdateDate": metaUpdateDate })
}
else {
console.log("No new data available, do nothing")
return null
}
}
}
catch (error) {
console.log(error);
return null;
}
}
);
async function fetchDataFromURL() {
const dataUrl = "https://opendata.paris.fr/api/records/1.0/search/?dataset=sanisettesparis&q=&rows=-1"
const settings = { method: "Get" }
try {
const response = await fetch(dataUrl, settings)
const json = await response.json()
const promises = []
console.log("fetch data and add toilets to collection")
json["records"].forEach(toiletJsonObject => {
delete toiletJsonObject["fields"]["geo_shape"]
toiletJsonObject["fields"]["adresse"] = formatAddress(toiletJsonObject["fields"]["adresse"])
console.log("after updating adresse field: " + toiletJsonObject["fields"].toString())
const p = db.collection("toilets").doc(toiletJsonObject["recordid"]).set(toiletJsonObject["fields"])
promises.push(p)
})
console.log("finished creating promises. Wait for all to complete")
return Promise.all(promises);
}
catch (error) {
console.log(error);
return null;
}
}
const linkWords = ["de", "des", "du", "le"]
const linkLetters = ["l", "d"]
const firstWordsAddress = ["face", "opposé", "au"]
const alwaysLowerCaseWords = ["ville", "rue"]
function formatAddress(address) {
let processedAddress = ""
if (address != null) {
//if (address.length <= 1) processedAddress = address.toUpperCase();
// Split string into list of words
var wordsList = address.split(' ')
.filter((word) => {
// If there is a word in front of the street number, don't use it
if (firstWordsAddress.includes(word.toLowerCase())) return false
// Else use it
return true
})
var capitalizedList = wordsList.map((word) => {
const lowerCaseWord = word.toLowerCase() //TOSTRING ?
// If current word is a link word, don't capitalize
if (linkWords.includes(lowerCaseWord))
return lowerCaseWord
// If current word is a link letter, add ' char
else if (linkLetters.includes(lowerCaseWord))
return lowerCaseWord + '\''
// If current word should always be in lower case, don't capitalize
else if (alwaysLowerCaseWords.includes(lowerCaseWord))
return word.toLowerCase() //TOSTRING
// Else, capitalize the word
return word[0].toUpperCase() + word.substr(1).toLowerCase()
});
// Always capitalize first word of the address
capitalizedList[0] = capitalizedList[0][0].toUpperCase() + capitalizedList[0].substr(1).toLowerCase()
processedAddress = capitalizedList.join(' ')
processedAddress = processedAddress.replace("\' ", "\'")
processedAddress = processedAddress.trim()
}
return processedAddress
}
Regarding the formatAddress() helper function you defined, there doesn't appear to be an issue with it in it's current form. It can happily run through the entire list of 644 addresses ~210 times per second.
Any timeouts are instead likely to be caused by performing so many database writes in quick succession. When running fetchDataFromURL(), you "spam" the Firestore server with a request for each toilet object you are uploading.
The best-practice approach would be to compile a Batched Write and then commit the result once you've finished processing the data.
As stated in that documentation:
A batched write can contain up to 500 operations. Each operation in the batch counts separately towards your Cloud Firestore usage. Within a write operation, field transforms like serverTimestamp, arrayUnion, and increment each count as an additional operation.
Note: The current list of field transforms includes serverTimestamp, arrayUnion, arrayRemove, and increment. Reference: FieldValue
Creating/deleting/writing a document to Firestore is considered "one operation". Because a field transform requires reading the document, then writing data to that document, it is counted as "two operations".
Because a single batched write is limited to 500 operations, you should split your data up into smaller batched writes so that each batch is less than this 500 operations limit. The easiest way to achieve this would be to use this MultiBatch class (included below) that I've updated from one of my old answers.
If the data you are writing to a Cloud Firestore document is just basic data, use one of multibatch.create(), multibatch.delete(), multibatch.set(), or multibatch.update(). Each time one of these is called, the internal operations counter is increased by 1.
If the data you are writing to Cloud Firestore contains any FieldValue
transforms, use one of multibatch.transformCreate(), multibatch.transformDelete(), multibatch.transformSet(), or multibatch.transformUpdate(). Each time one of these is called, the internal operations counter is increased by 2.
Once the internal counter exceeds 500, it automatically starts a new batched write and adds it to it's internal list.
When you've queued up all your data ready to send off to Firestore, call multibatch.commit().
console.log("Fetching data from third-party server...")
const response = await fetch(dataUrl, settings)
const json = await response.json()
console.log("Data obtained. Parsing as Firestore documents...")
const batch = new MultiBatch(db)
json["records"].forEach(toiletJsonObject => {
delete toiletJsonObject["fields"]["geo_shape"]
toiletJsonObject["fields"]["adresse"] = formatAddress(toiletJsonObject["fields"]["adresse"])
console.log("after updating adresse field: " + toiletJsonObject["fields"].toString())
batch.set(db.collection("toilets").doc(toiletJsonObject["recordid"]), toiletJsonObject["fields"])
})
console.log("Finished parsing. Committing data to Firestore...")
const results = await batch.commit() // see notes about MultiBatch#commit()
console.log("Finished data upload!")
return results;
import { firestore } from "firebase-admin";
/**
* Helper class to compile an expanding `firestore.WriteBatch`.
*
* Using an internal operations counter, this class will automatically start a
* new `firestore.WriteBatch` instance when it detects it has hit the operations
* limit of 500. Once prepared, you can commit the batches together.
*
* Note: `FieldValue` transform operations such as `serverTimestamp`,
* `arrayUnion`, `arrayRemove`, `increment` are counted as two operations. If
* your written data makes use of one of these, you should use the appropriate
* `transformCreate`, `transformSet` or `transformUpdate` method so that the
* internal counter is correctly increased by 2 (the normal versions only
* increase the counter by 1).
*
* If not sure, just use `delete`, `transformCreate`, `transformSet`, or
* `transformUpdate` functions for every operation as this will make sure you
* don't exceed the limit.
*
* #author Samuel Jones [MIT License] (#samthecodingman)
* #see https://stackoverflow.com/a/66692467/3068190
* #see https://firebase.google.com/docs/firestore/manage-data/transactions
* #see https://firebase.google.com/docs/reference/js/firebase.firestore.FieldValue
*/
export class MultiBatch {
constructor(dbRef) {
this.dbRef = dbRef;
this.committed = false;
this.currentBatch = this.dbRef.batch();
this.currentBatchOpCount = 0;
this.batches = [this.currentBatch];
}
_getCurrentBatch(count) {
if (this.committed) throw new Error("MultiBatch already committed.");
if (this.currentBatchOpCount + count > 500) {
// operation limit exceeded, start a new batch
this.currentBatch = this.dbRef.batch();
this.currentBatchOpCount = 0;
this.batches.push(this.currentBatch);
}
this.currentBatchOpCount += count;
return this.currentBatch;
}
/** Creates the document, fails if it exists. */
create(ref, data) {
this._getCurrentBatch(1).create(ref, data);
return this;
}
/**
* Creates the document, fails if it exists.
*
* Used for commands that contain serverTimestamp, arrayUnion, etc
*/
transformCreate(ref, data) {
this._getCurrentBatch(2).create(ref, data);
return this;
}
/** Writes the document, creating/overwriting/etc as applicable. */
set(ref, data, options = undefined) {
this._getCurrentBatch(1).set(ref, data, options);
return this;
}
/**
* Writes the document, creating/overwriting/etc as applicable.
*
* Used for commands that contain serverTimestamp, arrayUnion, etc
*/
transformSet(ref, data, options = undefined) {
this._getCurrentBatch(2).set(ref, data, options);
return this;
}
/** Merges data into the document, failing if the document doesn't exist. */
update(ref, data, ...fieldsOrPrecondition) {
this._getCurrentBatch(1).update(ref, data, ...fieldsOrPrecondition);
return this;
}
/**
* Merges data into the document, failing if the document doesn't exist.
*
* Used for commands that contain serverTimestamp, arrayUnion, etc
*/
transformUpdate(ref, data, ...fieldsOrPrecondition) {
this._getCurrentBatch(2).update(ref, data, ...fieldsOrPrecondition);
return this;
}
/** Used when for basic update operations */
delete(ref) {
this._getCurrentBatch(1).delete(ref);
return this;
}
/**
*
* Commits all of the batches to Firestore.
*
* Note: Unlike normal batch operations, this may cause one or more atomic
* writes. One batch may succeed where others fail. By default, if any batch
* fails, it will fail the whole promise. This can be suppressed by passing in
* a truthy value as the first argument and checking the results returned by
* this method.
*
* #param {boolean} [suppressErrors=false] Whether to suppress errors on a
* per-batch basis.
* #return {firestore.WriteResult[]} array containing an array of
* `WriteResult` objects (and error-batch pairs if `suppressErrors=true`),
* for each batch.
*/
commit(suppressErrors = false) {
this.committed = true;
const mapCallback = suppressErrors
? (batch) => batch.commit().catch((error) => ({ error, batch }))
: (batch) => batch.commit();
return Promise.all(this.batches.map(mapCallback));
}
}

Breadth first traversal of arbitrary graph with minimal memory

I have an enormous directed graph I need to traverse in search for the shortest path to a specific node from a given starting point. The graph in question does not exist explicitly; the child nodes are determined algorithmically from the parent nodes.
(To give an illustration: imagine a graph of chess positions. Each node is a chess position and its children are all the legal moves from that position.)
So I have a queue for open nodes, and every time I process the next node in the queue I enqueue all of its children. But since the graph can have cycles I also need to maintain a hashset of all visited nodes so I can check if I have visited one before.
This works okay, but since this graph is so large, I run into memory problems. All of the nodes in the queue are also stored in the hashset, which tends to be around 50% of the total number or visited nodes in practice in my case.
Is there some magical way to get rid of this redundancy while keeping the speed of the hashset? (Obviously, I could get rid of the redundancy by NOT hashing and just doing a linear search, but that is out of the question.)
I solved it by writing a class that stores the keys in a list and stores the indices of the keys in a hashtable. The next node "in the queue" is always the the next node in the list until you find what you're looking for or you've traversed the entire graph.
class IndexMap<T>
{
private List<T> values;
private LinkedList<int>[] buckets;
public int Count { get; private set; } = 0;
public IndexMap(int capacity)
{
values = new List<T>(capacity);
buckets = new LinkedList<int>[NextPowerOfTwo(capacity)];
for (int i = 0; i < buckets.Length; ++i)
buckets[i] = new LinkedList<int>();
}
public void Add(T item) //assumes item is not yet in map
{
if (Count == buckets.Length)
ReHash();
int bucketIndex = item.GetHashCode() & (buckets.Length - 1);
buckets[bucketIndex].AddFirst(Count++);
values.Add(item);
}
public bool Contains(T item)
{
int bucketIndex = item.GetHashCode() & (buckets.Length - 1);
foreach(int i in buckets[bucketIndex])
{
if (values[i].Equals(item))
return true;
}
return false;
}
public T this[int index]
{
get => values[index];
}
private void ReHash()
{
LinkedList<int>[] newBuckets = new LinkedList<int>[2 * buckets.Length];
for (int i = 0; i < newBuckets.Length; ++i)
newBuckets[i] = new LinkedList<int>();
for (int i = 0; i < buckets.Length; ++i)
{
foreach (int index in buckets[i])
{
int bucketIndex = values[index].GetHashCode() & (newBuckets.Length - 1);
newBuckets[bucketIndex].AddFirst(index);
}
buckets[i] = null;
}
buckets = newBuckets;
}
private int NextPowerOfTwo(int n)
{
if ((n & n-1) == 0)
return n;
int output = 0;
while (n > output)
{
output <<= 1;
}
return output;
}
}
The old method of maintaining both an array of the open nodes and a hashtable of the visited nodes needed n*(1+a)*size(T) space, where a is the ratio of nodes_in_the_queue over total_nodes_found and size(T) is the size of a node.
This method needs n*(size(T) + size(int)). If your nodes are significantly larger than an int, this can save a lot.

Algorithm used to calculate hashcode for segments in ConcurrentHashMap in Java

What is the Algorithm used to calculate hashcode for segments in Concurrent HashMap in Java ?
Firstly we know that Concurrent HashMap is divided into a finite number of segments.
Segment is a final class inside Concurrent HashMap .
The definition of Segment is as below:
/** Inner Segment class plays a significant role **/
protected static final class Segment {
protected int count;
protected synchronized int getCount() {
return this.count;
}
protected synchronized void synch() {}
}
/** Segment Array declaration **/
public final Segment[] segments = new Segment[32];//By default
// i am taking as 32.
Let me explain by taking put method of ConcurrentHashMap class.
put(Object key, Object value)
Before placing this map into anyone one of those 32 segments we need to
calculate the hashcode right.
First we calculate the hash of key:
int hashVal = hash(key);
static int hash(Object x) {
int h = x.hashCode();
return (h << 7) - h + (h >>> 9) + (h >>> 17);
}
After getting the hashVal we can decide the Segment as below:
Segment seg = segments[(hash & 0x1F)];
// segments is an array defined above
This is just for understanding refer oracle docs for their practices.

Wireshark V2 plugin info column resets after applying filter

I have a basic Wireshark plugin that was originally written for Wireshark V1 and I'm currently trying to port it to V2.
The issue I'm currently having is that when the plugin is run in wireshark-qt-release the plugin starts fine and all the necessary information is displayed but once a filter is set the information contained within the info column gets cleared. The info column stays empty after clearing the filter also.
The packet type variable used to set the string found in the info column is also added to the Header tree of the packet being dissected. This stays set with or without a filter being set.
Built on branch master-2.0, branch is up-to-date.
Built with MSVC 2013 and I get no errors or warnings.
Have also enabled the debug console in Wireshark but get nothing out but that might be because I can't adjust the debug level which is currently set to 28.
Works fine in the build of wireshark-gtk2 built within the same check out.
Any help appreciated.
/* packet-trcp.c
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation; either version 2
* of the License, or (at your option) any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
*/
#include "config.h"
#include <stdio.h>
#include <epan/dissectors/packet-tcp.h>
#define PROTO_TAG_TRCP "TRCP"
#define MAGIC_NUMBER 0x111111
#define FRAME_HEADER_LEN 8
#define TRCP_PORT 1111
static int proto_trcp = -1;
static dissector_handle_t data_handle = NULL;
static dissector_handle_t trcp_handle = NULL;
static void dissect_trcp(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree);
static int dissect_trcp_message(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree, void *);
static const value_string packet_type_names[] =
{
{ 0, "Invalid message" },
{ 1, "Connection Request" },
{ 2, "Connection Response" },
{ 3, "Disconnect" },
{ 4, "File Header" },
{ 5, "File Chunk" },
{ 6, "Cancel File Transfer" },
{ 7, "Firmware Imported" },
{ 8, "Alert Message" },
{ 9, "Restore Factory Settings" },
{ 10, "Format Internal Storage" },
{ 11, "Beacon" },
{ 12, "Shutdown" },
{ 0, NULL }
};
static gint hf_trcp_header = -1;
static gint hf_trcp_magic = -1;
static gint hf_trcp_length = -1;
static gint hf_trcp_type = -1;
static gint hf_trcp_data = -1;
static gint ett_trcp = -1;
static gint ett_trcp_header = -1;
static gint ett_trcp_data = -1;
//-----------------------------------------------------------------------------------------------------------------------
void proto_reg_handoff_trcp(void)
{
static gboolean initialized = FALSE;
if (!initialized)
{
data_handle = find_dissector("data");
trcp_handle = create_dissector_handle(dissect_trcp, proto_trcp);
dissector_add_uint("tcp.port", TRCP_PORT, trcp_handle);
initialized = TRUE;
}
}
//-----------------------------------------------------------------------------------------------------------------------
void proto_register_trcp (void)
{
static hf_register_info hf[] =
{
{&hf_trcp_header,
{"Header", "trcp.header", FT_NONE, BASE_NONE, NULL, 0x0, "TRCP Header", HFILL }},
{&hf_trcp_magic,
{"Magic", "trcp.magic", FT_UINT32, BASE_HEX, NULL, 0x0, "Magic Bytes", HFILL }},
{&hf_trcp_length,
{"Package Length", "trcp.len", FT_UINT16, BASE_DEC, NULL, 0x0, "Package Length", HFILL }},
{&hf_trcp_type,
{"Type", "trcp.type", FT_UINT16, BASE_DEC, VALS(packet_type_names), 0x0, "Package Type", HFILL }},
{&hf_trcp_data,
{"Data", "trcp.data", FT_NONE, BASE_NONE, NULL, 0x0, "Data", HFILL }}
};
static gint *ett[] =
{
&ett_trcp,
&ett_trcp_header,
&ett_trcp_data
};
proto_trcp = proto_register_protocol ("TRCP Protocol", "TRCP", "trcp");
proto_register_field_array (proto_trcp, hf, array_length (hf));
proto_register_subtree_array (ett, array_length (ett));
register_dissector("trcp", dissect_trcp, proto_trcp);
}
//-----------------------------------------------------------------------------------------------------------------------
static guint get_trcp_message_len(packet_info * pinfo, tvbuff_t * tvb, int offset)
{
guint plen;
plen = tvb_get_ntohs(tvb, offset + 6);
// Add the header length to the data length to get the total packet length
plen += FRAME_HEADER_LEN;
return plen;
}
//-----------------------------------------------------------------------------------------------------------------------
static void dissect_trcp(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree)
{
// According to - 9.4.2. How to reassemble split TCP Packets
tcp_dissect_pdus(tvb, pinfo, tree, // Hand over from above
TRUE, // Reassemble packet or not
FRAME_HEADER_LEN, // Smallest amount of data required to determine message length (8 bytes)
get_trcp_message_len, // Function pointer to a method that returns message length
dissect_trcp_message, // Function pointer to real message dissector
NULL);
}
//-----------------------------------------------------------------------------------------------------------------------
static int dissect_trcp_message(tvbuff_t * tvb, packet_info * pinfo, proto_tree * tree, void * data)
{
proto_item * trcp_item = NULL;
proto_item * trcp_sub_item_header = NULL;
proto_item * trcp_sub_item_data = NULL;
proto_tree * trcp_tree = NULL;
proto_tree * trcp_header_tree = NULL;
proto_tree * trcp_data_tree = NULL;
guint32 magic = 0;
guint32 offset = 0;
guint32 length_tvb = 0;
guint16 type = 0;
guint16 length = 0;
col_set_str(pinfo->cinfo, COL_PROTOCOL, PROTO_TAG_TRCP);
col_clear(pinfo->cinfo, COL_INFO);
if (tree)
{
trcp_item = proto_tree_add_item(tree, proto_trcp, tvb, 0, -1, FALSE);
trcp_tree = proto_item_add_subtree(trcp_item, ett_trcp);
trcp_sub_item_header = proto_tree_add_item(trcp_tree, hf_trcp_header, tvb, offset, -1, FALSE);
trcp_header_tree = proto_item_add_subtree(trcp_sub_item_header, ett_trcp_header);
/*
4 bytes for magic number
2 bytes for packet type
2 bytes for data length
*/
// Add Magic to header tree
proto_tree_add_item(trcp_header_tree, hf_trcp_magic, tvb, offset, 4, FALSE);
offset += 4;
// Get the type byte
type = tvb_get_ntohs(tvb, offset);
// Add Type to header tree
proto_tree_add_uint(trcp_header_tree, hf_trcp_type, tvb, offset, 2, type);
offset += 2;
col_append_fstr(pinfo->cinfo, COL_INFO, "%s", val_to_str(type, packet_type_names, "Unknown Type:0x%02x"));
col_set_fence(pinfo->cinfo, COL_INFO);
// Add Length to header tree
length = tvb_get_ntohs(tvb, offset);
proto_tree_add_uint(trcp_header_tree, hf_trcp_length, tvb, offset, 2, length);
offset += 2;
if (length)
{
// Data
trcp_sub_item_data = proto_tree_add_item(trcp_tree, hf_trcp_data, tvb, offset, -1, FALSE);
trcp_data_tree = proto_item_add_subtree(trcp_sub_item_data, ett_trcp_data);
}
}
return tvb_captured_length(tvb);
}
You are setting the column only if the tree argument is non-NULL.
DO NOT DO THAT.
Always set it, regardless of whether tree is null or not. There is no guarantee that, if your dissector is called in order to provide column information, tree will be non-null; we have never provided such a guarantee, and we will never provide such a guarantee.

Size-limited queue that holds last N elements in Java

A very simple & quick question on Java libraries: is there a ready-made class that implements a Queue with a fixed maximum size - i.e. it always allows addition of elements, but it will silently remove head elements to accomodate space for newly added elements.
Of course, it's trivial to implement it manually:
import java.util.LinkedList;
public class LimitedQueue<E> extends LinkedList<E> {
private int limit;
public LimitedQueue(int limit) {
this.limit = limit;
}
#Override
public boolean add(E o) {
super.add(o);
while (size() > limit) { super.remove(); }
return true;
}
}
As far as I see, there's no standard implementation in Java stdlibs, but may be there's one in Apache Commons or something like that?
Apache commons collections 4 has a CircularFifoQueue<> which is what you are looking for. Quoting the javadoc:
CircularFifoQueue is a first-in first-out queue with a fixed size that replaces its oldest element if full.
import java.util.Queue;
import org.apache.commons.collections4.queue.CircularFifoQueue;
Queue<Integer> fifo = new CircularFifoQueue<Integer>(2);
fifo.add(1);
fifo.add(2);
fifo.add(3);
System.out.println(fifo);
// Observe the result:
// [2, 3]
If you are using an older version of the Apache commons collections (3.x), you can use the CircularFifoBuffer which is basically the same thing without generics.
Update: updated answer following release of commons collections version 4 that supports generics.
Guava now has an EvictingQueue, a non-blocking queue which automatically evicts elements from the head of the queue when attempting to add new elements onto the queue and it is full.
import java.util.Queue;
import com.google.common.collect.EvictingQueue;
Queue<Integer> fifo = EvictingQueue.create(2);
fifo.add(1);
fifo.add(2);
fifo.add(3);
System.out.println(fifo);
// Observe the result:
// [2, 3]
I like #FractalizeR solution. But I would in addition keep and return the value from super.add(o)!
public class LimitedQueue<E> extends LinkedList<E> {
private int limit;
public LimitedQueue(int limit) {
this.limit = limit;
}
#Override
public boolean add(E o) {
boolean added = super.add(o);
while (added && size() > limit) {
super.remove();
}
return added;
}
}
Use composition not extends (yes I mean extends, as in a reference to the extends keyword in java and yes this is inheritance). Composition is superier because it completely shields your implementation, allowing you to change the implementation without impacting the users of your class.
I recommend trying something like this (I'm typing directly into this window, so buyer beware of syntax errors):
public LimitedSizeQueue implements Queue
{
private int maxSize;
private LinkedList storageArea;
public LimitedSizeQueue(final int maxSize)
{
this.maxSize = maxSize;
storageArea = new LinkedList();
}
public boolean offer(ElementType element)
{
if (storageArea.size() < maxSize)
{
storageArea.addFirst(element);
}
else
{
... remove last element;
storageArea.addFirst(element);
}
}
... the rest of this class
A better option (based on the answer by Asaf) might be to wrap the Apache Collections CircularFifoBuffer with a generic class. For example:
public LimitedSizeQueue<ElementType> implements Queue<ElementType>
{
private int maxSize;
private CircularFifoBuffer storageArea;
public LimitedSizeQueue(final int maxSize)
{
if (maxSize > 0)
{
this.maxSize = maxSize;
storateArea = new CircularFifoBuffer(maxSize);
}
else
{
throw new IllegalArgumentException("blah blah blah");
}
}
... implement the Queue interface using the CircularFifoBuffer class
}
The only thing I know that has limited space is the BlockingQueue interface (which is e.g. implemented by the ArrayBlockingQueue class) - but they do not remove the first element if filled, but instead block the put operation until space is free (removed by other thread).
To my knowledge your trivial implementation is the easiest way to get such an behaviour.
You can use a MinMaxPriorityQueue from Google Guava, from the javadoc:
A min-max priority queue can be configured with a maximum size. If so, each time the size of the queue exceeds that value, the queue automatically removes its greatest element according to its comparator (which might be the element that was just added). This is different from conventional bounded queues, which either block or reject new elements when full.
An LRUMap is another possibility, also from Apache Commons.
http://commons.apache.org/collections/apidocs/org/apache/commons/collections/map/LRUMap.html
Ok I'll share this option. This is a pretty performant option - it uses an array internally - and reuses entries. It's thread safe - and you can retrieve the contents as a List.
static class FixedSizeCircularReference<T> {
T[] entries
FixedSizeCircularReference(int size) {
this.entries = new Object[size] as T[]
this.size = size
}
int cur = 0
int size
synchronized void add(T entry) {
entries[cur++] = entry
if (cur >= size) {
cur = 0
}
}
List<T> asList() {
int c = cur
int s = size
T[] e = entries.collect() as T[]
List<T> list = new ArrayList<>()
int oldest = (c == s - 1) ? 0 : c
for (int i = 0; i < e.length; i++) {
def entry = e[oldest + i < s ? oldest + i : oldest + i - s]
if (entry) list.add(entry)
}
return list
}
}
public class ArrayLimitedQueue<E> extends ArrayDeque<E> {
private int limit;
public ArrayLimitedQueue(int limit) {
super(limit + 1);
this.limit = limit;
}
#Override
public boolean add(E o) {
boolean added = super.add(o);
while (added && size() > limit) {
super.remove();
}
return added;
}
#Override
public void addLast(E e) {
super.addLast(e);
while (size() > limit) {
super.removeLast();
}
}
#Override
public boolean offerLast(E e) {
boolean added = super.offerLast(e);
while (added && size() > limit) {
super.pollLast();
}
return added;
}
}

Resources