Kraken API : EAPI:Invalid nonce in c# - asp.net

I am trying to add order in Kraken through API call https://api.kraken.com/0/private/AddOrder. I found EAPI:Invalid nonce error on inserting new order in Kraken. Right Now I am inserting only one order at a time on button click, But there may be the situation when multiple order will be inserted. I have tried too many different solutions to generate a nonce But, still found the same issue. Does anybody know what's wrong?
private JsonObject QueryPrivate(string a_sMethod, string props = null)
{
// generate a 64 bit nonce using a timestamp at tick resolution
Int64 nonce = DateTime.Now.Ticks;
props = "nonce=" + nonce + props;
string path = string.Format("/{0}/private/{1}", _version, a_sMethod);
string address = _url + path;
HttpWebRequest webRequest = (HttpWebRequest)WebRequest.Create(address);
webRequest.ContentType = "application/x-www-form-urlencoded";
webRequest.Method = "POST";
webRequest.Headers.Add("API-Key", _key);
byte[] base64DecodedSecred = Convert.FromBase64String(_secret);
var np = nonce + Convert.ToChar(0) + props;
var pathBytes = Encoding.UTF8.GetBytes(path);
var hash256Bytes = sha256_hash(np);
var z = new byte[pathBytes.Count() + hash256Bytes.Count()];
pathBytes.CopyTo(z, 0);
hash256Bytes.CopyTo(z, pathBytes.Count());
var signature = getHash(base64DecodedSecred, z);
webRequest.Headers.Add("API-Sign", Convert.ToBase64String(signature));
if (props != null)
{
using (var writer = new StreamWriter(webRequest.GetRequestStream()))
{
writer.Write(props);
}
}
//Make the request
try
{
//Wait for RateGate
_rateGate.WaitToProceed();
using (WebResponse webResponse = webRequest.GetResponse())
{
using (Stream str = webResponse.GetResponseStream())
{
using (StreamReader sr = new StreamReader(str))
{
string data = sr.ReadToEnd();
dynamic d = JObject.Parse(data);
return (JsonObject)JsonConvert.Import(data);
}
}
}
}
catch (WebException wex)
{
using (HttpWebResponse response = (HttpWebResponse)wex.Response)
{
using (Stream str = response.GetResponseStream())
{
using (StreamReader sr = new StreamReader(str))
{
string data = sr.ReadToEnd();
if (response.StatusCode != HttpStatusCode.InternalServerError)
{
throw;
}
return (JsonObject)JsonConvert.Import(sr);
}
}
}
}
}

Some exchange platforms allow the use of float type nonce, in your case as you are using Kraken and according to Kraken Api documentation,
nonce = always increasing unsigned 64 bit integer
Kraken will require an integer nonce.
Why invalid nonce ?
In my opinion, the issue occurs when you sent more than one request with the same nonce ...
When you convert the timestamp to an integer, you will will be allowed consequently to sent only 1 request per second (because the limitation relative to the integer nonce changing each second)
In order to have the capability to send more than one requests per seconds, one tips could be to multiply timestamp by 1000 then convert to integer and use this value as nonce.
nonce=integer(1000*timestamp)
In that case you may sent more than 1 request per second (because each nonce will be different) but remember that :
exchange platform have safeguards in place to protect against
abuse/DoS attacks.

Related

Recieve/Accept file in WEBDAV from httpwebrequest POST or PUT in asp.net

Suppose I have sample Upload file method like this in POStFile.aspx.
This method POST file (upload file) to http WEBDAV url.
public static void HttpUploadFile(string url, string file, string paramName, string contentType, NameValueCollection nvc) {
log.Debug(string.Format("Uploading {0} to {1}", file, url));
string boundary = "---------------------------" + DateTime.Now.Ticks.ToString("x");
byte[] boundarybytes = System.Text.Encoding.ASCII.GetBytes("\r\n--" + boundary + "\r\n");
HttpWebRequest wr = (HttpWebRequest)WebRequest.Create(url);
wr.ContentType = "multipart/form-data; boundary=" + boundary;
wr.Method = "POST";
wr.KeepAlive = true;
wr.Credentials = System.Net.CredentialCache.DefaultCredentials;
Stream rs = wr.GetRequestStream();
string formdataTemplate = "Content-Disposition: form-data; name=\"{0}\"\r\n\r\n{1}";
foreach (string key in nvc.Keys)
{
rs.Write(boundarybytes, 0, boundarybytes.Length);
string formitem = string.Format(formdataTemplate, key, nvc[key]);
byte[] formitembytes = System.Text.Encoding.UTF8.GetBytes(formitem);
rs.Write(formitembytes, 0, formitembytes.Length);
}
rs.Write(boundarybytes, 0, boundarybytes.Length);
string headerTemplate = "Content-Disposition: form-data; name=\"{0}\"; filename=\"{1}\"\r\nContent-Type: {2}\r\n\r\n";
string header = string.Format(headerTemplate, paramName, file, contentType);
byte[] headerbytes = System.Text.Encoding.UTF8.GetBytes(header);
rs.Write(headerbytes, 0, headerbytes.Length);
FileStream fileStream = new FileStream(file, FileMode.Open, FileAccess.Read);
byte[] buffer = new byte[4096];
int bytesRead = 0;
while ((bytesRead = fileStream.Read(buffer, 0, buffer.Length)) != 0) {
rs.Write(buffer, 0, bytesRead);
}
fileStream.Close();
byte[] trailer = System.Text.Encoding.ASCII.GetBytes("\r\n--" + boundary + "--\r\n");
rs.Write(trailer, 0, trailer.Length);
rs.Close();
WebResponse wresp = null;
try {
wresp = wr.GetResponse();
Stream stream2 = wresp.GetResponseStream();
StreamReader reader2 = new StreamReader(stream2);
log.Debug(string.Format("File uploaded, server response is: {0}", reader2.ReadToEnd()));
} catch(Exception ex) {
log.Error("Error uploading file", ex);
if(wresp != null) {
wresp.Close();
wresp = null;
}
} finally {
wr = null;
}
}
From here
NameValueCollection nvc = new NameValueCollection();
nvc.Add("id", "TTR");
nvc.Add("btn-submit-photo", "Upload");
HttpUploadFile("http://your.server.com/upload",
#"C:\test\test.jpg", "file", "image/jpeg", nvc);
Question 1 : Shouldn't the url should be like "http://your.server.com/upload.aspx" instead of "http://your.server.com/upload"
If I give url like "http://your.server.com/upload" then i get 405 error method not found.
So it should point to any page.
Question 2 : How should I receive the post and save the file in upload.aspx.
Can the file directly uploaded to remote server without any receiving
page ?
This question was about "File transfer to WEBDAV http URL using or POST or PUT method"
Above is sample POST method.Similarly there can by PUT method which is little different from POST method.
Question 1 : Shouldn't the url should be like "http://your.server.com/upload.aspx" instead of "http://your.server.com/upload"
For novice man like me, main confusion is URL.It entirely depend upon "How WEBDAV server want to receive POST or PUT method ?"
I think for POST method ,there should be one receiving page which accept file and other parameters from POSTfile page and save the file to disk.
I don't know about .net code but WEB API has inbuilt feature which can parse data like "multipart/form-data; boundary=---------------------------8d60ff73d4553cc"
Below code is just sample code,
[HttpPost]
public async Task<FileUploadDetails> Post()
{
// file path
var fileuploadPath = HttpContext.Current.Server.MapPath("~/UploadedFiles");
////
var multiFormDataStreamProvider = new MultiFileUploadProvider(fileuploadPath);
// Read the MIME multipart asynchronously
await Request.Content.ReadAsMultipartAsync(multiFormDataStreamProvider);
string uploadingFileName = multiFormDataStreamProvider
.FileData.Select(x => x.LocalFileName).FirstOrDefault();
// Files
//
foreach (MultipartFileData file in multiFormDataStreamProvider.FileData)
{
Debug.WriteLine(file.Headers.ContentDisposition.FileName);
Debug.WriteLine("File path: " + file.LocalFileName);
}
// Form data
//
foreach (var key in multiFormDataStreamProvider.FormData.AllKeys)
{
foreach (var val in multiFormDataStreamProvider.FormData.GetValues(key))
{
Debug.WriteLine(string.Format("{0}: {1}", key, val));
}
}
//Create response
return new FileUploadDetails
{
FilePath = uploadingFileName,
FileName = Path.GetFileName(uploadingFileName),
FileLength = new FileInfo(uploadingFileName).Length,
FileCreatedTime = DateTime.Now.ToLongDateString()
};
return null;
}
So url in POSTFile.aspx page should point to API method in this case,
"http://your.server.com/api/fileUpload"
where fileUpload is api controller name.
If you are using HTTP PUT method then
i) you want to receive it in pro grammatically handle it.Write PUT method similar to POST method in api class.
ii) you want to directly save the file to folder using PUT method.
so URL in this case can be,
"http://your.server.com/Imagefolder"
Yes this can be done with extra IIS setting.
Create virtual directory in Target folder,beside few other thing.

Get image from URL and upload to Amazon S3

I'd like to load an image directly from a URL but without saving it on the server, I want to upload it directly from memory to Amazon S3 server.
This is my code:
Dim wc As New WebClient
Dim fileStream As IO.Stream = wc.OpenRead("http://www.domain.com/image.jpg")
Dim request As New PutObjectRequest()
request.BucketName = "mybucket"
request.Key = "file.jpg"
request.InputStream = fileStream
client.PutObject(request)
The Amazon API gives me the error "Could not determine content length". The stream fileStream ends up as "System.Net.ConnectStream" which I'm not sure if it's correct.
The exact same code works with files from the HttpPostedFile but I need to use it in this way now.
Any ideas how I can convert the stream to become what Amazon API is expecting (with the length intact)?
I had the same problem when I'm using the GetObjectResponse() method and its propertie ResponseStream to copy a file from a folder to another in same bucket. I noted that the AWS SDK (2.3.45) have some faults like a another method called WriteResponseStreamToFile in GetObjectResponse() that simply doesn't work. These lacks of functions needs some workarounds.
I solved the problem openning the file in array of bytes and putting it in a MemoryStream object.
Try this (C# code)
WebClient wc = new WebClient();
Stream fileStream = wc.OpenRead("http://www.domain.com/image.jpg");
byte[] fileBytes = fileStream.ToArrayBytes();
PutObjectRequest request = new PutObjectRequest();
request.BucketName = "mybucket";
request.Key = "file.jpg";
request.InputStream = new MemoryStream(fileBytes);
client.PutObject(request);
The extesion method
public static byte[] ToArrayBytes(this Stream input)
{
byte[] buffer = new byte[16 * 1024];
using (MemoryStream ms = new MemoryStream())
{
int read;
while ((read = input.Read(buffer, 0, buffer.Length)) > 0)
{
ms.Write(buffer, 0, read);
}
return ms.ToArray();
}
}
You can also create a MemoryStream without an array of bytes. But after the first PutObject in S3, the MemoryStream will be discarted. If you need to put others objects, I recommend the first option
WebClient wc = new WebClient();
Stream fileStream = wc.OpenRead("http://www.domain.com/image.jpg");
MemoryStream fileMemoryStream = fileStream.ToMemoryStream();
PutObjectRequest request = new PutObjectRequest();
request.BucketName = "mybucket";
request.Key = "file.jpg";
request.InputStream = fileMemoryStream ;
client.PutObject(request);
The extesion method
public static MemoryStream ToMemoryStream(this Stream input)
{
byte[] buffer = new byte[16 * 1024];
int read;
MemoryStream ms = new MemoryStream();
while ((read = input.Read(buffer, 0, buffer.Length)) > 0)
{
ms.Write(buffer, 0, read);
}
return ms;
}
I had the same problem in a similar scenario.
The reason for the error is that to upload an object the SDK needs to know the whole content length that is going to be uploaded. To be able to obtain stream length it must be seekable, but the stream returned from WebClient is not. To indicate the expected length set Headers.ContentLength in PutObjectRequest. The SDK will use this value if it cannot determine length from the stream object.
To make your code work, obtain content length from the response headers returned by the call made by WebClient. Then set PutObjectRequest.Headers.ContentLength. Of course this relies on the server returned content length value.
Dim wc As New WebClient
Dim fileStream As IO.Stream = wc.OpenRead("http://www.example.com/image.jpg")
Dim contentLength As Long = Long.Parse(client.ResponseHeaders("Content-Length"))
Dim request As New PutObjectRequest()
request.BucketName = "mybucket"
request.Key = "file.jpg"
request.InputStream = fileStream
request.Headers.ContentLength = contentLength
client.PutObject(request)
I came up with a solution that uses UploadPart when the length is not available by any other means, plus this does not load the entire file into memory.
if (args.DocumentContents.CanSeek)
{
PutObjectRequest r = new PutObjectRequest();
r.InputStream = args.DocumentContents;
r.BucketName = s3Id.BucketName;
r.Key = s3Id.ObjectKey;
foreach (var item in args.CustomData)
{
r.Metadata[item.Key] = item.Value;
}
await S3Client.PutObjectAsync(r);
}
else
{
// if stream does not allow seeking, S3 client will throw error:
// Amazon.S3.AmazonS3Exception : Could not determine content length
// as a work around, if cannot use length property, will chunk
// file into sections and use UploadPart, so do not have to load
// entire file into memory as a single MemoryStream.
var r = new InitiateMultipartUploadRequest();
r.BucketName = s3Id.BucketName;
r.Key = s3Id.ObjectKey;
foreach (var item in args.CustomData)
{
r.Metadata[item.Key] = item.Value;
}
var multipartResponse = await S3Client.InitiateMultipartUploadAsync(r);
try
{
var completeRequest = new CompleteMultipartUploadRequest
{
UploadId = multipartResponse.UploadId,
BucketName = s3Id.BucketName,
Key = s3Id.ObjectKey,
};
// just using this size, because it is the max for Azure File Share, but it could be any size
// for S3, even a configured value
const int blockSize = 4194304;
// BinaryReader gives us access to ReadBytes
using (var reader = new BinaryReader(args.DocumentContents))
{
var partCounter = 1;
while (true)
{
byte[] buffer = reader.ReadBytes(blockSize);
if (buffer.Length == 0)
break;
using (MemoryStream uploadChunk = new MemoryStream(buffer))
{
uploadChunk.Position = 0;
var uploadRequest = new UploadPartRequest
{
BucketName = s3Id.BucketName,
Key = s3Id.ObjectKey,
UploadId = multipartResponse.UploadId,
PartNumber = partCounter,
InputStream = uploadChunk,
};
// could call UploadPart on multiple threads, instead of using await, but that would
// cause more data to be loaded into memory, which might be too much
var part2Task = await S3Client.UploadPartAsync(uploadRequest);
completeRequest.AddPartETags(part2Task);
}
partCounter++;
}
var completeResponse = await S3Client.CompleteMultipartUploadAsync(completeRequest);
}
}
catch
{
await S3Client.AbortMultipartUploadAsync(s3Id.BucketName, s3Id.ObjectKey
, multipartResponse.UploadId);
throw;
}
}

EndGetResponse can only be called once for each asynchronous operation?

Trying to implement a WebRequest and return to the caller synchronously.
I have tried various implementations and I think this would be the most appropriate so far.
Unfortunately the following code throws an InvalidOperationException with the message
EndGetResponse can only be called once for each asynchronous operation
I really struggled enough to make this happen and its really vital to the library I build to use the WebRequest like this.
The following code is intend to use in Windows Phone 8 and Windows 8 platforms.
I already understand the async/await pattern and used it, but it is REALLY vital for me to use the synchronous version of the web service request in a part of my library.
The code:
public void ExecuteRequest(string url, string requestData)
{
WebRequest request = WebRequest.Create(new Uri(url));
request.Method = "POST";
request.ContentType = "application/x-www-form-urlencoded";
request.Headers["Header-Key"] = "AKey";
DTOWebRequest webRequestState = new DTOWebRequest
{
Data = requestData,
Request = request
};
ManualResetEventSlim resetEventSlim = new ManualResetEventSlim(false);
// Begin the request using a delegate
request.BeginGetRequestStream(ar =>
{
DTOWebRequest requestDataObj = (DTOWebRequest )ar.AsyncState;
HttpWebRequest requestStream = (HttpWebRequest)requestDataObj.Request;
string data = requestDataObj.Data;
// Convert the string into a byte array.
byte[] postBytes = Encoding.UTF8.GetBytes(data);
try
{
// End the operation
using (Stream endGetRequestStream = requestStream.EndGetRequestStream(ar))
{
// Write to the request stream.
endGetRequestStream.Write(postBytes, 0, postBytes.Length);
}
// Get the response using a delegate
requestStream.BeginGetResponse(result =>
{
DTOWebRequest requestDataObjResult = (DTOWebRequest )ar.AsyncState;
HttpWebRequest requestResult = (HttpWebRequest)requestDataObjResult.Request;
try
{
// End the operation
using (HttpWebResponse response = (HttpWebResponse)requestResult.EndGetResponse(ar)) // Here the exception is thrown.
{
HttpStatusCode rcode = response.StatusCode;
Stream streamResponse = response.GetResponseStream();
StreamReader streamRead = new StreamReader(streamResponse);
// The Response
string responseString = streamRead.ReadToEnd();
if (!string.IsNullOrWhiteSpace(requestDataObjResult.FileName))
{
FileRepository fileRepo = new FileRepository();
fileRepo.Delete(requestDataObjResult.FileName);
}
Debug.WriteLine("Response : {0}", responseString);
}
}
catch (WebException webEx)
{
WebExceptionStatus status = webEx.Status;
WebResponse responseEx = webEx.Response;
Debug.WriteLine(webEx.ToString());
}
resetEventSlim.Set(); // Signal to return handler
}, requestDataObj);
}
catch (WebException webEx)
{
WebExceptionStatus status = webEx.Status;
WebResponse responseEx = webEx.Response;
Debug.WriteLine(webEx.ToString());
}
}, webRequestState);
resetEventSlim.Wait(5000); // Wait either for Set() or a timeout 5 secs.
}
}
Thank you.
You can't do synchronous web calls in Windows Phone and that's why you aren't.
If you were, you'd be calling GetRequestStream instead of BeginGetRequestStram/EndGetRequestStream.
The only reason to be synchronous on Windows Phone is to block the UI which is a very bad idea.
You should use an HttpClient and àsync-await` instead.
But if you really think you should (and can) do asynchronous calls on Windows Phone, you can always try something like this:
public void ExecuteRequest(string url, string requestData)
{
try
{
WebRequest request = WebRequest.Create(new Uri(url));
request.Method = "POST";
request.ContentType = "application/x-www-form-urlencoded";
request.Headers["Header-Key"] = "AKey";
// Convert the string into a byte array.
byte[] postBytes = Encoding.UTF8.GetBytes(requestData);
using (var requestStream = request.EndGetRequestStream(request.BeginGetRequestStream(null, null)))
{
// Write to the request stream.
endGetRequestStream.Write(postBytes, 0, postBytes.Length);
}
using (var response = request.EndGetResponse(request.BeginGetResponse(null, null)))
{
using (var streamRead = new StreamReader(response.GetResponseStream()))
{
// The Response
string responseString = streamRead.ReadToEnd();
if (!string.IsNullOrWhiteSpace(requestDataObjResult.FileName))
{
var fileRepo = new FileRepository();
fileRepo.Delete(request.FileName);
}
Debug.WriteLine("Response : {0}", responseString);
}
}
}
catch (WebException webEx)
{
WebExceptionStatus status = webEx.Status;
WebResponse responseEx = webEx.Response;
Debug.WriteLine(webEx.ToString());
}
}
But I really think you should revise your decision/need.

Download large file in small chunks in C#

I need to download some file which is more than 25 MB large, but my network only allow to request a file of 25 MB only.
I am using following code
const long DefaultSize = 26214400;
long Chunk = 26214400;
long offset = 0;
byte[] bytesInStream;
public void Download(string url, string filename)
{
long size = Size(url);
int blocksize = Convert.ToInt32(size / DefaultSize);
int remainder = Convert.ToInt32(size % DefaultSize);
if (remainder > 0) { blocksize++; }
FileStream fileStream = File.Create(#"D:\Download TEST\" + filename);
for (int i = 0; i < blocksize; i++)
{
if (i == blocksize - 1)
{
Chunk = remainder;
}
HttpWebRequest req = (HttpWebRequest)System.Net.WebRequest.Create(url);
req.Method = "GET";
req.AddRange(Convert.ToInt32(offset), Convert.ToInt32(Chunk+offset));
HttpWebResponse resp = (HttpWebResponse)req.GetResponse();
// StreamReader sr = new StreamReader(resp.GetResponseStream());
using (Stream responseStream = resp.GetResponseStream())
{
bytesInStream = new byte[Chunk];
responseStream.Read(bytesInStream, 0, (int)bytesInStream.Length);
// Use FileStream object to write to the specified file
fileStream.Seek((int)offset, SeekOrigin.Begin);
fileStream.Write(bytesInStream,0, bytesInStream.Length);
}
offset += Chunk;
}
fileStream.Close();
}
public long Size(string url)
{
System.Net.WebRequest req = System.Net.HttpWebRequest.Create(url);
req.Method = "HEAD";
System.Net.WebResponse resp = req.GetResponse();
resp.Close();
return resp.ContentLength;
}
It is properly writing content on disk but content is not working
You should check how much was read before write, something like this (and you don't need to remember the offset to seek, the seek is automatic when you write):
int read;
do
{
read = responseStream.Read(bytesInStream, 0, (int)bytesInStream.Length);
if (read > 0)
fileStream.Write(bytesInStream, 0, read);
}
while(read > 0);
There is a similar SO questions that might help you
Segmented C# file downloader
and
How to open multiple connections to download single file?
Also this code project article
http://www.codeproject.com/Tips/307548/Resume-Suppoert-Downloading
Range is zero based and you should subtract 1 from upper bound.
request.Headers.Range = new System.Net.Http.Headers.RangeHeaderValue(offset, chunkSize + offset - 1);
I published correct code fragment at the following link:
https://stackoverflow.com/a/48019611/1099716
Akka streams can help download file in small chunks from a System.IO.Stream using multithreading. https://getakka.net/articles/intro/what-is-akka.html
The Download method will append the bytes to the file starting with long fileStart. If the file does not exist, fileStart value must be 0.
using Akka.Actor;
using Akka.IO;
using Akka.Streams;
using Akka.Streams.Dsl;
using Akka.Streams.IO;
private static Sink<ByteString, Task<IOResult>> FileSink(string filename)
{
return Flow.Create<ByteString>()
.ToMaterialized(FileIO.ToFile(new FileInfo(filename), FileMode.Append), Keep.Right);
}
private async Task Download(string path, Uri uri, long fileStart)
{
using (var system = ActorSystem.Create("system"))
using (var materializer = system.Materializer())
{
HttpWebRequest request = WebRequest.Create(uri) as HttpWebRequest;
request.AddRange(fileStart);
using (WebResponse response = request.GetResponse())
{
Stream stream = response.GetResponseStream();
await StreamConverters.FromInputStream(() => stream, chunkSize: 1024)
.RunWith(FileSink(path), materializer);
}
}
}

High response time from new StreamReader(webRsp.GetResponseStream())

I am trying to get response from web url.
but while we are throwing some load on it, lets say 100 user load. this line of code work very slowly. After reading the response from below code I have to send myXML to calling function for some use.
using (StreamReader rspStr = new StreamReader(webRsp.GetResponseStream()))
{
myXML = rspStr.ReadToEnd().Trim();
}
Is there any way to get good response time even after throwing 100 or 1000 users load.
I would try with an Async approach, just to avoid locking the execution on Stream opening or slow network waits, everything is explained here:
Making Asynchronous Requests
snippet:
WebRequest wreq = WebRequest.Create(httpSite);
// Create the state object.
RequestState rs = new RequestState();
// Put the request into the state object so it can be passed around.
rs.Request = wreq;
// Issue the async request.
IAsyncResult r = (IAsyncResult) wreq.BeginGetResponse(
new AsyncCallback(RespCallback), rs);
private static void RespCallback(IAsyncResult ar)
{
// Get the RequestState object from the async result.
RequestState rs = (RequestState) ar.AsyncState;
// Get the WebRequest from RequestState.
WebRequest req = rs.Request;
// Call EndGetResponse, which produces the WebResponse object
// that came from the request issued above.
WebResponse resp = req.EndGetResponse(ar);
// Start reading data from the response stream.
Stream ResponseStream = resp.GetResponseStream();
// Store the response stream in RequestState to read
// the stream asynchronously.
rs.ResponseStream = ResponseStream;
// Pass rs.BufferRead to BeginRead. Read data into rs.BufferRead
IAsyncResult iarRead = ResponseStream.BeginRead(rs.BufferRead, 0,
BUFFER_SIZE, new AsyncCallback(ReadCallBack), rs);
}
private static void ReadCallBack(IAsyncResult asyncResult)
{
// Get the RequestState object from AsyncResult.
RequestState rs = (RequestState)asyncResult.AsyncState;
// Retrieve the ResponseStream that was set in RespCallback.
Stream responseStream = rs.ResponseStream;
// Read rs.BufferRead to verify that it contains data.
int read = responseStream.EndRead( asyncResult );
if (read > 0)
{
// Prepare a Char array buffer for converting to Unicode.
Char[] charBuffer = new Char[BUFFER_SIZE];
// Convert byte stream to Char array and then to String.
// len contains the number of characters converted to Unicode.
int len =
rs.StreamDecode.GetChars(rs.BufferRead, 0, read, charBuffer, 0);
String str = new String(charBuffer, 0, len);
// Append the recently read data to the RequestData stringbuilder
// object contained in RequestState.
rs.RequestData.Append(
Encoding.ASCII.GetString(rs.BufferRead, 0, read));
// Continue reading data until
// responseStream.EndRead returns –1.
IAsyncResult ar = responseStream.BeginRead(
rs.BufferRead, 0, BUFFER_SIZE,
new AsyncCallback(ReadCallBack), rs);
}
else
{
if(rs.RequestData.Length>0)
{
// Display data to the console.
string strContent;
strContent = rs.RequestData.ToString();
}
// Close down the response stream.
responseStream.Close();
// Set the ManualResetEvent so the main thread can exit.
allDone.Set();
}
return;
}

Resources