Always getting same encrypted string with CryptoAPI - encryption

Is there a way to get a non-deterministic output from the CryptoAPI? In other words, a different string output when encrypting a string.
For example, using CALG_AES_256 when deriving a crypt key with password of 'password' and string to encrypt of 'a', it always returns "SnÆwÞ¢L\x1e?6FÏLþw"
I'm somewhat of a n00b in using CryptoAPI, so any assistance is appreciated.
Edited:
Here is the cryptography code from Microsoft's example code decrypte and encrypt This is the same code, just shortened/compacted. This code was compiled in VS 2017 as a Win32 Console app. pszSource and pszDest are two files in the C:\temp folder. source.txt has the letter we're trying to encrypt in it.
The problem I'm having is that this crypt/decrypt code from the CryptoAPI does not allow certain strings to be encrypted and then decrypted (i.e. n, t, L, p, aa, ab, ac, ad, ae, etc). If someone can tell me why, that would be very helpful.
#include <windows.h>
#include <tchar.h>
#include <wincrypt.h>
#define KEYLENGTH 0x00800000
#define ENCRYPT_BLOCK_SIZE 8
bool MyDecryptFile(LPTSTR szSource,LPTSTR szDestination,LPTSTR szPassword);
bool MyEncryptFile(LPTSTR szSource,LPTSTR szDestination,LPTSTR szPassword);
int _tmain(int argc, _TCHAR* argv[])
{
LPTSTR pszSource = L"c:\\temp\\source.txt";
LPTSTR pszDestination = L"c:\\temp\\dest.txt";
LPTSTR pszPassword = L"t";
if (MyEncryptFile(pszSource, pszDestination, pszPassword))
{
_tprintf(TEXT("Encryption of the file %s was successful. \n"),pszSource);
_tprintf(TEXT("The encrypted data is in file %s.\n"),pszDestination);
}
if (MyDecryptFile(pszSource, pszDestination, pszPassword))
{
_tprintf(TEXT("Encryption of the file %s was successful. \n"),pszSource);
_tprintf(TEXT("The encrypted data is in file %s.\n"),pszDestination);
}
return 0;
}
bool MyEncryptFile(LPTSTR pszSourceFile,LPTSTR pszDestinationFile,LPTSTR pszPassword)
{
bool fReturn = false;
HANDLE hSourceFile = INVALID_HANDLE_VALUE, hDestinationFile = INVALID_HANDLE_VALUE;
HCRYPTPROV hCryptProv = NULL;
HCRYPTKEY hKey = NULL, hXchgKey = NULL;
HCRYPTHASH hHash = NULL;
PBYTE pbBuffer = NULL;
DWORD dwBlockLen, dwBufferLen, dwCount;
hSourceFile = CreateFile(pszSourceFile,FILE_READ_DATA,FILE_SHARE_READ,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL);
if (INVALID_HANDLE_VALUE == hSourceFile)
goto Exit_MyEncryptFile;
hDestinationFile = CreateFile(pszDestinationFile,FILE_WRITE_DATA,FILE_SHARE_READ,NULL,OPEN_ALWAYS,FILE_ATTRIBUTE_NORMAL,NULL);
if (INVALID_HANDLE_VALUE == hDestinationFile)
goto Exit_MyEncryptFile;
CryptAcquireContext(&hCryptProv,NULL,MS_ENH_RSA_AES_PROV,PROV_RSA_AES,0);
CryptCreateHash(hCryptProv,CALG_SHA_256,0,0,&hHash);
CryptHashData(hHash,(BYTE *)pszPassword,lstrlen(pszPassword),0);
CryptDeriveKey(hCryptProv,CALG_AES_256,hHash,CRYPT_EXPORTABLE,&hKey);
dwBlockLen = 1000 - 1000 % ENCRYPT_BLOCK_SIZE;
if (ENCRYPT_BLOCK_SIZE > 1)
dwBufferLen = dwBlockLen + ENCRYPT_BLOCK_SIZE;
else
dwBufferLen = dwBlockLen;
pbBuffer = (BYTE *)malloc(dwBufferLen);
bool fEOF = FALSE;
do
{
if (ReadFile(hSourceFile,pbBuffer,dwBlockLen,&dwCount,NULL))
{
if (dwCount < dwBlockLen)
fEOF = TRUE;
if (CryptEncrypt(hKey,NULL,fEOF,0,pbBuffer,&dwCount,dwBufferLen))
WriteFile(hDestinationFile,pbBuffer,dwCount,&dwCount,NULL);
}
}
while (!fEOF);
fReturn = true;
Exit_MyEncryptFile:
if (hSourceFile) CloseHandle(hSourceFile);
if (hDestinationFile) CloseHandle(hDestinationFile);
if (pbBuffer) free(pbBuffer);
if (hHash) {CryptDestroyHash(hHash);hHash = NULL;}
if (hKey) CryptDestroyKey(hKey);
if (hCryptProv) CryptReleaseContext(hCryptProv, 0);
return fReturn;
}
bool MyDecryptFile(LPTSTR pszSourceFile,LPTSTR pszDestinationFile,LPTSTR pszPassword)
{
bool fReturn = false;
HANDLE hSourceFile = INVALID_HANDLE_VALUE, hDestinationFile = INVALID_HANDLE_VALUE;
HCRYPTKEY hKey = NULL;
HCRYPTHASH hHash = NULL;
HCRYPTPROV hCryptProv = NULL;
PBYTE pbBuffer = NULL;
DWORD dwCount, dwBlockLen, dwBufferLen;
hSourceFile = CreateFile(pszDestinationFile,FILE_READ_DATA,FILE_SHARE_READ,NULL,OPEN_ALWAYS,FILE_ATTRIBUTE_NORMAL,NULL);
if (INVALID_HANDLE_VALUE == hSourceFile)
goto Exit_MyDecryptFile;
hDestinationFile = CreateFile(pszSourceFile,FILE_WRITE_DATA,FILE_SHARE_READ,NULL,OPEN_EXISTING,FILE_ATTRIBUTE_NORMAL,NULL);
if (INVALID_HANDLE_VALUE == hDestinationFile)
goto Exit_MyDecryptFile;
CryptAcquireContext(&hCryptProv,NULL,MS_ENH_RSA_AES_PROV,PROV_RSA_AES,0);
CryptCreateHash(hCryptProv,CALG_SHA_256,0,0,&hHash);
CryptHashData(hHash,(BYTE *)pszPassword,lstrlen(pszPassword),0);
CryptDeriveKey(hCryptProv,CALG_AES_256,hHash,CRYPT_EXPORTABLE,&hKey);
dwBlockLen = 1000 - 1000 % ENCRYPT_BLOCK_SIZE;
dwBufferLen = dwBlockLen;
pbBuffer = (PBYTE)malloc(dwBufferLen);
bool fEOF = false;
do
{
if (!ReadFile(hSourceFile,pbBuffer,dwBlockLen,&dwCount,NULL))
goto Exit_MyDecryptFile;
if (dwCount <= dwBlockLen)
fEOF = TRUE;
LONG rv = CryptDecrypt(hKey,0,fEOF,0,pbBuffer,&dwCount);
if (rv==0)
{
DWORD dwErr = GetLastError(); // <--- fails if password and string are n, t, L, p, aa, ab, ac, ad , ae
goto Exit_MyDecryptFile;
}
if (!WriteFile(hDestinationFile,pbBuffer,dwCount,&dwCount,NULL))
goto Exit_MyDecryptFile;
}
while (!fEOF);
fReturn = true;
Exit_MyDecryptFile:
if (pbBuffer) free(pbBuffer);
if (hSourceFile) CloseHandle(hSourceFile);
if (hDestinationFile) CloseHandle(hDestinationFile);
if (hHash) {CryptDestroyHash(hHash);hHash = NULL;}
if (hKey) CryptDestroyKey(hKey);
if (hCryptProv) CryptReleaseContext(hCryptProv, 0);
return fReturn;
}
What about using this to get the KP_IV option?
BOOL bRV;
bRV = CryptAcquireContextW(&hCryptProv, NULL, MS_ENH_RSA_AES_PROV, PROV_RSA_AES, 0);
bRV = CryptGenKey(hCryptProv, CALG_AES_256,0,&hKey);
DWORD dwMode = CRYPT_MODE_CBC;
bRV = CryptSetKeyParam(hKey,KP_MODE,(BYTE*)&dwMode,0);
BYTE pbData[16];
memcpy(pbData,"n",sizeof("n")); // <--- Hard coded password
bRV = CryptSetKeyParam(hKey,KP_IV,pbData,0);
enter code here

If you want to obtain different cypertext when encrypting the same plaintext with the same key, you have to use the CBC mode of operation:
https://en.wikipedia.org/wiki/Block_cipher_mode_of_operation
In order to correctly encrypt with CBC, you need to generate a different random Initialization Vector (IV) every time.
In order to decrypt, you need to know the IV used during encryption.
So, the IV must be associated (in clear) to the cyphertext.
In reference to your example, when calling the CryptDeriveKey function, the CBC is the default mode but it uses an IV set to zero and this invalidates the utility of the CBC operating mode:
https://msdn.microsoft.com/en-us/library/windows/desktop/aa379916(v=vs.85).aspx
In order to set the random IV you need to call the CryptSetKeyParam function, which accept the KP_IV param:
https://msdn.microsoft.com/en-us/library/windows/desktop/aa380272(v=vs.85).aspx
Bye
Giovanni

Related

How to execute multi parameter ODBC command by C++

I'm working on an ODBC program that needs to store two types of long data: nvarchar (max) and nvarbinary (max). It works well when I do it with one parameter, but it gives an error for two parameters. secound loop for SQL_NEED_DATA gives odbc error
ERROR ==> [ODBC SQL Server Driver]Invalid character value for cast specification
.My code is as follows:
bool ExecuteQuery(wstring const& command_text,vector<void*> params)
{
SQLWCHAR* blob = nullptr;// (SQLWCHAR*)params[0];
SQLSMALLINT NumParams;
SQLSMALLINT i, sql_type, decimals, nullable, paramNo;
SQLULEN bytes;
RETCODE retcode;
SQLLEN cbTextSize, valueMax = 0;
vector<SQLLEN> lbytes;
PTR pParamID;
SQLLEN len;
vector<SDWORD> cbBatch;
SQLPOINTER pToken = NULL;
retcode = SQLPrepare(hstmt, (SQLWCHAR*)command_text.c_str(), SQL_NTS);
//Get number of parameters
SQLNumParams(hstmt, &NumParams);
for (i = 0; i < NumParams; i++)
{
size_t length = wcslen((wchar_t*)params[i]);
//This ia important to save all data to sql table
size_t size = length * sizeof(wchar_t);
len = SQL_LEN_DATA_AT_EXEC((SQLLEN)size);
retcode = SQLDescribeParam(hstmt, i + 1, &sql_type, &bytes, &decimals, &nullable);
//SQL_C_WCHAR for hex binary data in wchar_t and SQL_WLONGVARCHAR succeeded
short c_type = map_datatype_SQL_to_C(sql_type);
retcode = SQLBindParameter(hstmt, i + 1, INPUT, c_type, sql_type, bytes, decimals, (PTR)(1+i), 0, &len);
}
//Execute the insert query.
retcode = SQLExecute(hstmt);
// Check for EOD marker.
retcode = SQLParamData(hstmt, &pToken);
//Loop, sending the data in segments of 124
int buffer_size = sizeof(wchar_t);
int chunk = 124;
for (i = 0; i < NumParams; i++)
{
size_t length = wcslen((wchar_t*)params[i]);
blob = (wchar_t*)params[i];
while (retcode == SQL_NEED_DATA)
{
while (length > chunk)
{
retcode = SQLPutData(hstmt, blob, static_cast<SQLLEN>(chunk * buffer_size));
extract_error("", hstmt, SQL_HANDLE_STMT);
blob += chunk;
length -= chunk;
}
retcode = SQLPutData(hstmt, blob, length * buffer_size);
extract_error("", hstmt, SQL_HANDLE_STMT);
}
retcode = SQLParamData(hstmt, &pToken);
}
return SQL_SUCCEEDED(retcode);
}
short map_datatype_SQL_to_C(short sql_type)
{
switch (sql_type)
{
case SQL_TINYINT:return SQL_C_TINYINT;
case SQL_LONGVARBINARY://-4
case SQL_VARBINARY://-3
case SQL_WLONGVARCHAR://-10
case SQL_WVARCHAR: //-9
case SQL_WCHAR: //-8
return SQL_C_WCHAR;// = SQL_C_TCHAR -8
case SQL_BIT:return SQL_C_BIT;
case SQL_BINARY://-2
return SQL_C_BINARY;
case SQL_DOUBLE://8 - FLOAT, DOUBLE
return SQL_C_DOUBLE;
case SQL_SMALLINT://5
return SQL_C_SHORT;
case SQL_FLOAT://6
case SQL_REAL: //7
return SQL_C_FLOAT;//7
case SQL_INTEGER://4
return SQL_C_LONG;
case SQL_CHAR://1
case SQL_VARCHAR://12
case SQL_DECIMAL://3
case SQL_NUMERIC://2
return SQL_C_CHAR;// CHAR, VARCHAR, DECIMAL, NUMERIC
case SQL_TYPE_TIMESTAMP:return SQL_C_TIMESTAMP;
case SQL_TIME:return SQL_C_TIME;
case SQL_DATE:return SQL_C_DATE;
case SQL_GUID:return SQL_C_GUID;
default:return SQL_UNKNOWN_TYPE;
}
return 0;
}

What is missing from the AES Validation Standard Pseudocode for the Monte Carlo Tests?

I'm trying to use the prescribed validation procedure for AES-128 in CBC mode, as defined in the NIST AESAVS standard. One of the more important parts of the test suite is the Monte Carlo test, which provides an algorithm for generating many 10000 pseudorandom tests cases such that it is unlikely that a hardcoded circuit could fake AES. The algorithm pseudocode therein appears to be taking some liberties with variable scope and definition, so I am hoping someone could help me fill in the missing information to interpret this correctly.
The verbatim algorithm for the 128-bit key case is as follows:
Key[0] = Key
IV[0] = IV
PT[0] = PT
For i = 0 to 99
Output Key[i]
Output IV[i]
Output PT[0]
For j = 0 to 999
If ( j=0 )
CT[j] = AES(Key[i], IV[i], PT[j])
PT[j+1] = IV[i]
Else
CT[j] = AES(Key[i], PT[j])
PT[j+1] = CT[j-1]
Output CT[j]
Key[i+1] = Key[i] xor CT[j]
IV[i+1] = CT[j]
PT[0] = CT[j-1]
For the above pseudocode, starting with these initial values:
Key = 9dc2c84a37850c11699818605f47958c
IV = 256953b2feab2a04ae0180d8335bbed6
PT = 2e586692e647f5028ec6fa47a55a2aab
The first three iterations of the outer loop should output:
KEY = 9dc2c84a37850c11699818605f47958c
IV = 256953b2feab2a04ae0180d8335bbed6
PLAINTEXT = 2e586692e647f5028ec6fa47a55a2aab
CIPHERTEXT = 1b1ebd1fc45ec43037fd4844241a437f
KEY = 86dc7555f3dbc8215e6550247b5dd6f3
IV = 1b1ebd1fc45ec43037fd4844241a437f
PLAINTEXT = c1b77ed52521525f0a4ba341bdaf51d9
CIPHERTEXT = bf43583a665fa45fdee831243a16ea8f
KEY = 399f2d6f95846c7e808d6100414b3c7c
IV = bf43583a665fa45fdee831243a16ea8f
PLAINTEXT = 7cbeea19157ec7bbf6289e2dff5e8ee4
CIPHERTEXT = 5464e1900f81e06f67139456da25fc09
It looks like we are using j outside of the inner loop, which I believe is the source of the confusion. I had originally assumed that this meant whatever the final value of the ciphertext CT was (CT[999]), which would lead me to believe that the plaintext for the next outer loop PT[0] is initialized to CT[998]. However, this interpretation doesn't match the expected outputs given.
I also thought that maybe brackets are not indicating an array of values here, but rather they represent the time steps relative to now. However, this also makes referencing j outside of the loop confusing. If the loop has expired, then is i or j the current time?
Am I missing some crucial step here? Is there a typo (there is no errata in the document)?
Could anyone with some experience on the matter comment on the appropriate interpretation?
Some months ago I tried to get the AES CBC MonteCarlo running on Java. I encountered the same problems but in the end I could find a complete and running solution that meets the official NIST vector results.
Before I start - your inital test vector seems to be an own vector but not the one provided by NIST - here is the link to the official NIST-website with all AES testvectors:
NIST-Website: https://csrc.nist.gov/Projects/cryptographic-algorithm-validation-program/Block-Ciphers Montecarlo testvectors: https://csrc.nist.gov/CSRC/media/Projects/Cryptographic-Algorithm-Validation-Program/documents/aes/aesmct.zip
My test will start with these data:
[ENCRYPT]
COUNT = 0
KEY = 8809e7dd3a959ee5d8dbb13f501f2274
IV = e5c0bb535d7d54572ad06d170a0e58ae
PLAINTEXT = 1fd4ee65603e6130cfc2a82ab3d56c24
CIPHERTEXT = b127a5b4c4692d87483db0c3b0d11e64
and the function uses a "double" byte array for the inner and outer loop. I do not present the complete sourcode here on SO but the complete code is available in my GitHub repository https://github.com/java-crypto/Known_Answer_Tests with many other tests and test vector files. The encryption/decryption has to be done with NoPadding - don't use AES in default mode as in most
cases it would run with PKCS#5/#7 padding.
If you like you can run the code online (reduced to AES CBC 128 MonteCarlo) here: https://repl.it/#javacrypto/AesCbcMonteCarloTest#Main.java
The program will run the complete encryption and decryption test and does an additional cross-check (means the encryption result is checked
by a decryption and vice versa).
As it is some months ago that I took care of this I'm just offering my solution in Java code - hopefully it helps you in
your understanding of the NIST test procedure.
public static byte[] aes_cbc_mct_encrypt(byte[] PLAINTEXT, byte[] KEYinit, byte[] IVinit) throws Exception {
int i = 0; // outer loop
int j = 0; // inner loop
byte[][] KEY = new byte[101][128];
byte[][] IV = new byte[1001][128];
byte[][] PT = new byte[1001][128]; // plaintext
byte[][] CT = new byte[1001][128]; // ciphertext
byte[] CTsave = new byte[256]; // nimmt den letzten ct fuer nutzung als neuen iv auf
// init
int KEYLENGTH = KEYinit.length * 8;
KEY[0] = KEYinit;
IV[0] = IVinit;
PT[0] = PLAINTEXT;
for (i = 0; i < 100; i++) {
for (j = 0; j < 1000; j++) {
if (j == 0) {
CT[j] = aes_cbc_encrypt(PT[j], KEY[i], IV[i]);
CTsave = CT[j]; // sicherung fuer naechsten iv
PT[j + 1] = IV[i];
} else {
IV[i] = CTsave;
CT[j] = aes_cbc_encrypt(PT[j], KEY[i], IV[i]);
CTsave = CT[j];
PT[j + 1] = CT[j - 1];
}
}
j = j - 1; // correction of loop counter
if (KEYLENGTH == 128) {
KEY[i + 1] = xor(KEY[i], CT[j]);
}
if (KEYLENGTH == 192) {
KEY[i + 1] = xor192(KEY[i], CT[j - 1], CT[j]);
}
if (KEYLENGTH == 256) {
KEY[i + 1] = xor256(KEY[i], CT[j - 1], CT[j]);
}
IV[i + 1] = CT[j];
PT[0] = CT[j - 1];
ctCalculated[i] = CT[j].clone();
}
return CT[j];
}
public static byte[] xor(byte[] a, byte[] b) {
// nutzung in der mctCbcEncrypt und mctCbcDecrypt methode
byte[] result = new byte[Math.min(a.length, b.length)];
for (int i = 0; i < result.length; i++) {
result[i] = (byte) (((int) a[i]) ^ ((int) b[i]));
}
return result;
}
public static byte[] aes_cbc_encrypt(byte[] plaintextByte, byte[] keyByte, byte[] initvectorByte) throws NoSuchAlgorithmException, NoSuchPaddingException, InvalidKeyException, InvalidAlgorithmParameterException, BadPaddingException, IllegalBlockSizeException {
byte[] ciphertextByte = null;
SecretKeySpec keySpec = new SecretKeySpec(keyByte, "AES");
IvParameterSpec ivKeySpec = new IvParameterSpec(initvectorByte);
Cipher aesCipherEnc = Cipher.getInstance("AES/CBC/NOPADDING");
aesCipherEnc.init(Cipher.ENCRYPT_MODE, keySpec, ivKeySpec);
ciphertextByte = aesCipherEnc.doFinal(plaintextByte);
return ciphertextByte;
}

cryptoJS AES encrypt returning a wrong decryption [duplicate]

How to convert from Hex string to ASCII string in JavaScript?
Ex:
32343630 it will be 2460
function hex2a(hexx) {
var hex = hexx.toString();//force conversion
var str = '';
for (var i = 0; i < hex.length; i += 2)
str += String.fromCharCode(parseInt(hex.substr(i, 2), 16));
return str;
}
hex2a('32343630'); // returns '2460'
Another way to do it (if you use Node.js):
var input = '32343630';
const output = Buffer.from(input, 'hex');
log(input + " -> " + output); // Result: 32343630 -> 2460
For completeness sake the reverse function:
function a2hex(str) {
var arr = [];
for (var i = 0, l = str.length; i < l; i ++) {
var hex = Number(str.charCodeAt(i)).toString(16);
arr.push(hex);
}
return arr.join('');
}
a2hex('2460'); //returns 32343630
You can use this..
var asciiVal = "32343630".match(/.{1,2}/g).map(function(v){
return String.fromCharCode(parseInt(v, 16));
}).join('');
document.write(asciiVal);
** for Hexa to String**
let input = '32343630';
Note : let output = new Buffer(input, 'hex'); // this is deprecated
let buf = Buffer.from(input, "hex");
let data = buf.toString("utf8");
I found a useful function present in web3 library.
var hexString = "0x1231ac"
string strValue = web3.toAscii(hexString)
Update: Newer version of web3 has this function in utils
The functions now resides in utils:
var hexString = "0x1231ac"
string strValue = web3.utils.hexToAscii(hexString)
I've found that the above solution will not work if you have to deal with control characters like 02 (STX) or 03 (ETX), anything under 10 will be read as a single digit and throw off everything after. I ran into this problem trying to parse through serial communications. So, I first took the hex string received and put it in a buffer object then converted the hex string into an array of the strings like so:
buf = Buffer.from(data, 'hex');
l = Buffer.byteLength(buf,'hex');
for (i=0; i<l; i++){
char = buf.toString('hex', i, i+1);
msgArray.push(char);
}
Then .join it
message = msgArray.join('');
then I created a hexToAscii function just like in #Delan Azabani's answer above...
function hexToAscii(str){
hexString = str;
strOut = '';
for (x = 0; x < hexString.length; x += 2) {
strOut += String.fromCharCode(parseInt(hexString.substr(x, 2), 16));
}
return strOut;
}
then called the hexToAscii function on 'message'
message = hexToAscii(message);
This approach also allowed me to iterate through the array and slice into the different parts of the transmission using the control characters so I could then deal with only the part of the data I wanted.
Hope this helps someone else!
console.log(
"68656c6c6f20776f726c6421".match(/.{1,2}/g).reduce((acc,char)=>acc+String.fromCharCode(parseInt(char, 16)),"")
)
An optimized version of the implementation of the reverse function proposed by #michieljoris (according to the comments of #Beterraba and #Mala):
function a2hex(str) {
var hex = '';
for (var i = 0, l = str.length; i < l; i++) {
var hexx = Number(str.charCodeAt(i)).toString(16);
hex += (hexx.length > 1 && hexx || '0' + hexx);
}
return hex;
}
alert(a2hex('2460')); // display 32343630
I use this one, it seems more clear to me as I also receive data with spaces like '30 31 38 30 38 30' and the output is 018080
hexToString(hex: string): string {
return hex.split(' ').map(s => string.fromCharCode(parseInt(s,16))).join('');
}

Hashing, can't insert to hash table

struct googlePlayApp{
string name;
string category;
double rating;
int reviews;
googlePlayApp *next;
};
void appInsert(googlePlayApp &newApp, int &cmps) {
int slot = hash1(newApp.name, HASH_SIZE);
int cmps1 = 1;
googlePlayApp *tmp = appHash[slot];
if (tmp == 0)
appHash[slot] = &newApp;
else
{
while(tmp->next != 0)
{
tmp = tmp->next;
cmps1++;
}
tmp->next = &newApp;
}
cmps += cmps1;
}
while (getline(inFile, inputLine)) {
googlePlayApp newApp;
readSingleApp(inputLine, newApp);
appInsert(newApp, cmps);
linesRead++;
}
My program stops on the 65th iteration of the while loop....
64th for the appInsert call...
Why can't I get this to work?
This is a program where it reads a data file and stores it in a hash table and collision dealt with open addressing....
updated question
bool appFind(const string &name, googlePlayApp &foundApp, int &cmps) {
// Implement this function
int slot = hash1(name);
int cmps1 = 1;
googlePlayApp *tmp = appHash[slot];
while(tmp && tmp->name != name)
{
cmps1++;
tmp = tmp->next;
}
cmps += cmps1;
if(tmp)
{
foundApp.name = appHash[slot]->name;
foundApp.category = appHash[slot]->category;
foundApp.rating = appHash[slot]->rating;
foundApp.reviews = appHash[slot]->reviews;
}
else return false;
}
this is my serach function and I'm trying to search if an app exists based on the data I stored from my code above. I'm trying to search it by the hash addresses, but it's not working...

android hexToByteArray signed to unsigned

I've got the following function to make a conversion from a Hex String to a Byte array. Then, I calculate the Checksum:
private String CalcChecksum (String message) {
/**Get string's bytes*/
//byte[] bytes = DatatypeConverter.parseHexBinary(message.replaceAll("\\s","")).getBytes();
message = message.replaceAll("\\s","");
byte[] bytes = hexToByteArray(message);
byte b_checksum = 0;
for (int byte_index = 0; byte_index < bytes.length; byte_index++) {
b_checksum += bytes[byte_index];
}
int d_checksum = b_checksum; //Convert byte to int(2 byte)
int c2_checksum = 256 - d_checksum; //Hacer complemento a 2
String hexString = Integer.toHexString(c2_checksum); //Convertir el entero (decimal) a hexadecimal
return hexString;
}
public static byte[] hexToByteArray(String s) {
int len = s.length();
byte[] data = new byte[len / 2];
for (int i = 0; i < len; i += 2) {
data[i / 2] = (byte) ((Character.digit(s.charAt(i), 16) << 4) + Character.digit(s.charAt(i+1), 16));
}
return data;
}
Making some test, for example for the hex value "e0", the hexToByteArray is getting the value "-32". So the final returning value in the CalcChecksum is "17a".
What I need is to get unsigned values in the hexToByteArray function. This is because i need to send the Checksum in a hexString to a MCU where the Checksum is calculated with unsigned values, so isntead of get the "-32" value, it gets "224" and the final hex value is "7a" instead of "17a".
i think that doing some kind of conversion like when the byte result is a negative value, do something like 255 + "negative value" + 1. This will convert "-32" into "224".
The problem is that i'm trying to do it, but i'm having some errors making the conversions between bytes, int, etc...
So, how could i do?
For the moment, I think that this can be the solution.
Just including in the CalcChecksum function the next code after int d_checksum = b_checksum;:
if (d_checksum < 0) {
d_checksum = 255 + d_checksum + 1;
}

Resources