GRPC Python - Error received from peer ipv4 - grpc-python

I am trying to call server method from client using GRPC but getting below error:
Error received from peer ipv4
I tried to find a solution for this but its more than a day now and unable to figure out, please someone help. Any help is really appreciated
Server proto file (chunk.proto):
syntax = "proto3";
service FileServer {
rpc upload_chunk_stream(stream Chunk) returns (Reply) {}
rpc upload_single_chunk(Chunk) returns (Reply) {}
rpc download_chunk_stream(Request) returns (stream Chunk) {}
rpc get_available_memory_bytes(Empty_request) returns (Reply_double) {}
rpc get_stored_hashes_list_iterator(Empty_request) returns (stream Reply_string) {}
rpc hash_id_exists_in_memory(Request) returns (Reply) {}
}
message Chunk {
bytes buffer = 1;
}
message Request {
string hash_id = 1;
}
message Reply {
bool success = 1;
}
message Reply_double {
double bytes = 1;
}
message Empty_request {}
message Reply_string {
string hash_id = 1;
}
Generated code on server side, files generated:
chunk_pb2_grpc.py and chunk_pb2.py
Below are server files:
StorageManager.py
import sys
sys.path.append('./')
import grpc
import time
import chunk_pb2, chunk_pb2_grpc
from MemoryManager import MemoryManager
CHUNK_SIZE_ = 1024
class StorageManagerServer(chunk_pb2_grpc.FileServerServicer):
def __init__(self, memory_node_bytes, page_memory_size_bytes):
self.memory_manager = MemoryManager(memory_node_bytes, page_memory_size_bytes)
def upload_chunk_stream(self, request_iterator, context):
hash_id = ""
chunk_size = 0
number_of_chunks = 0
print("inside")
for key, value in context.invocation_metadata():
if key == "key-hash-id":
hash_id = value
elif key == "key-chunk-size":
chunk_size = int(value)
elif key == "key-number-of-chunks":
number_of_chunks = int(value)
assert hash_id != ""
assert chunk_size != 0
assert number_of_chunks != 0
success = self.memory_manager.put_data(request_iterator, hash_id, number_of_chunks, False)
return chunk_pb2.Reply(success=success)
StartNodeExample.py
import sys
sys.path.append('./')
from StorageManager import StorageManagerServer
import grpc
import time
import chunk_pb2, chunk_pb2_grpc
from concurrent import futures
if __name__ == '__main__':
print("Starting Storage Manager.")
server_grpc = grpc.server(futures.ThreadPoolExecutor(max_workers=1))
total_memory_node_bytes = 1 * 1024 * 1024 * 1024 # start with 1 GB
CHUNK_SIZE_ = 1024
total_page_memory_size_bytes = CHUNK_SIZE_
chunk_pb2_grpc.add_FileServerServicer_to_server(StorageManagerServer(total_memory_node_bytes, total_page_memory_size_bytes), server_grpc)
# port = 9999
# server_grpc.add_insecure_port(f'[::]:{port}')
server_grpc.add_insecure_port('[::]:9999')
server_grpc.start()
print("Storage Manager is READY.")
try:
while True:
time.sleep(60 * 60 * 24) # should infinity
except KeyboardInterrupt:
server_grpc.stop(0)
Client side proto file(chunk.proto):
syntax = "proto3";
service FileServer {
rpc upload_chunk_stream(stream Chunk) returns (Reply) {}
rpc upload_single_chunk(Chunk) returns (Reply) {}
rpc download_chunk_stream(Request) returns (stream Chunk) {}
}
message Chunk {
bytes buffer = 1;
}
message Request {
string hash_id = 1;
}
message Reply {
bool success = 1;
}
Generated code on client side, files generated: chunk_pb2_grpc.py and chunk_pb2.py
Below are client side files:
grpc_client.py
import grpc
import chunk_pb2
import chunk_pb2_grpc
import threading
import io
import hashlib
CHUNK_SIZE = 1024 * 1024 * 4 # 4MB
def get_file_byte_chunks(f):
while True:
piece = f.read(CHUNK_SIZE)
if len(piece) == 0:
return
yield chunk_pb2.Chunk(buffer=piece)
class Client:
def __init__(self, address):
channel = grpc.insecure_channel(address)
self.stub = chunk_pb2_grpc.FileServerStub(channel)
def upload(self, f, f_name):
print("Inside here")
hash_object = hashlib.sha1(f_name.encode())
hex_dig = hash_object.hexdigest()
print(hex_dig)
chunks_generator = get_file_byte_chunks(f)
metadata = (
('key-hash-id', hex_dig),
('key-chunk-size', str(CHUNK_SIZE))
)
response = self.stub.upload_chunk_stream(chunks_generator, metadata=metadata)
server.py(Flask rest API server)
from flask import Flask, url_for, send_from_directory, request
import logging, os
from werkzeug.utils import secure_filename
from flask import jsonify, make_response
# import worker
import grpc_client
app = Flask(__name__)
#app.route('/', methods = ['POST'])
def api_root():
app.logger.info(PROJECT_HOME)
if request.method == 'POST' and request.files['image']:
app.logger.info(app.config['UPLOAD_FOLDER'])
img = request.files['image']
img_name = secure_filename(img.filename)
client = grpc_client.Client('127.0.0.1:9999')
client.upload(img, img_name)
return make_response(jsonify({"success":True}),200)
else:
return "Where is the file?"
Error received:
[2019-12-15 03:08:55,973] ERROR in app: Exception on / [POST]
Traceback (most recent call last):
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/flask/app.py", line 1982, in wsgi_app
response = self.full_dispatch_request()
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/flask/app.py", line 1614, in full_dispatch_request
rv = self.handle_user_exception(e)
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/flask/app.py", line 1517, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/flask/_compat.py", line 33, in reraise
raise value
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/flask/app.py", line 1612, in full_dispatch_request
rv = self.dispatch_request()
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/flask/app.py", line 1598, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "server.py", line 32, in api_root
client.upload(img, img_name)
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/grpc_client.py", line 32, in upload
response = self.stub.upload_chunk_stream(chunks_generator, metadata=metadata)
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/grpc/_channel.py", line 871, in __call__
return _end_unary_response_blocking(state, call, False, None)
File "/Users/wamiqueansari/Documents/275_gash/project/final/Tracking/venv/lib/python3.7/site-packages/grpc/_channel.py", line 592, in _end_unary_response_blocking
raise _Rendezvous(state, None, None, deadline)
grpc._channel._Rendezvous: <_Rendezvous of RPC that terminated with:
status = StatusCode.UNKNOWN
details = "Exception calling application: "
debug_error_string = "{"created":"#1576408135.973233000","description":"Error received from peer ipv4:127.0.0.1:9999","file":"src/core/lib/surface/call.cc","file_line":1055,"grpc_message":"Exception calling application: ","grpc_status":2}"

Related

How to get return values form FastAPI global dependencies

FastAPI 0.68.0
Python 3.8
from fastapi import Depends, FastAPI, Header, HTTPException
async def verify_key(x_key: str = Header(...)):
if x_key != "fake-super-secret-key":
raise HTTPException(status_code=400, detail="X-Key header invalid")
return x_key
app = FastAPI(dependencies=[Depends(verify_key)])
#app.get("/items/")
async def read_items():
return [{"item": "Portal Gun"}, {"item": "Plumbus"}]
This is a example from FastAPI doucuments (Omit part of the code)
Is there any way to get x_key in read_items()
from fastapi import Request, Depends
async def verify_key(request: Request, x_key: str = Header(...)):
if x_key != "fake-super-secret-key":
raise HTTPException(status_code=400, detail="X-Key header invalid")
# save x_key
request.state.x_key = x_key
return x_key
app = FastAPI(dependencies=[Depends(verify_key)])
#app.get("/items/")
async def read_items(request: Request):
# get 'x_key' from request.state
x_key = request.state.x_key
return [{"item": "Portal Gun"}, {"item": "Plumbus"}]
flask/bottle mimic
# current_request.py
import contextvars
from fastapi import Request
_CURRENT_REQUEST = contextvars.ContextVar("_CURRENT_REQUEST", default=None)
class CurrentRequest:
def __getattr__(self, a):
current_request = _CURRENT_REQUEST.get()
if current_request is None:
raise RuntimeError('Out of context')
return getattr(current_request, a)
request: Request = CurrentRequest()
async def set_current_request(request: Request):
_CURRENT_REQUEST.set(request)
# fastapi app/subroute
from fastapi import Request, Depends
from .current_request import request, set_current_request
async def verify_key(x_key: str = Header(...)):
if x_key != "fake-super-secret-key":
raise HTTPException(status_code=400, detail="X-Key header invalid")
# save x_key
request.state.x_key = x_key
return x_key
# ! set_current_request should go first
app = FastAPI(dependencies=[Depends(set_current_request), Depends(verify_key)])
#app.get("/items/")
async def read_items():
# get 'x_key' from request.state
x_key = request.state.x_key
return [{"item": "Portal Gun"}, {"item": "Plumbus"}]
You can inject verify_key function into read_items as dependency to read its value:
from fastapi import Depends
async def verify_key(x_key: str = Header(...)):
...
#app.get("/items/")
async def read_items(key: str = Depends(verify_key)):
// use key

Scala.js ReferenceError in sbt shell but not in browser

I'm new to ScalaJS, so I'm a little perplexed with this issue.
I'm trying to write a very simple facade for the peer.js library. I have this:
#js.native
#JSGlobal
class Peer() extends js.Object {
def this(id: String = ???, options: js.Object = ???) = this()
def connect(id: String, options: js.Object = ???): DataConnection = js.native
def on(event: String, callback: js.Function): Unit = js.native
def disconnect(): Unit = js.native
def reconnect(): Unit = js.native
def destroy(): Unit = js.native
def id: String = js.native
def connections: js.Object = js.native
def disconnected: Boolean = js.native
def destroyed: Boolean = js.native
}
And here is the simple code I'm trying to run:
object index {
def main(args: Array[String]): Unit = {
val peer = new Peer()
peer.on("open", (id: String) => println(id))
}
}
This small piece of code works perfectly fine in the browser, however when I try to run it in the sbt shell, I get this error:
ReferenceError: Peer is not defined
ReferenceError: Peer is not defined
at $c_Lcom_nicolaswinsten_peerscalajs_index$.main__AT__V (file:///C:/Users/mjwin/IdeaProjects/peer-scalajs/target/scala-2.13/peer-scalajs-fastopt/main.js:840:14)
at $s_Lcom_nicolaswinsten_peerscalajs_index__main__AT__V (file:///C:/Users/mjwin/IdeaProjects/peer-scalajs/target/scala-2.13/peer-scalajs-fastopt/main.js:826:47)
at file:///C:/Users/mjwin/IdeaProjects/peer-scalajs/target/scala-2.13/peer-scalajs-fastopt/main.js:2078:1
at file:///C:/Users/mjwin/IdeaProjects/peer-scalajs/target/scala-2.13/peer-scalajs-fastopt/main.js:2079:4
at Script.runInContext (vm.js:143:18)
at Object.runInContext (vm.js:294:6)
at processJavaScript (C:\Users\mjwin\IdeaProjects\peer-scalajs\node_modules\jsdom\lib\jsdom\living\nodes\HTMLScriptElement-impl.js:241:10)
at HTMLScriptElementImpl._innerEval (C:\Users\mjwin\IdeaProjects\peer-scalajs\node_modules\jsdom\lib\jsdom\living\nodes\HTMLScriptElement-impl.js:176:5)
at onLoadExternalScript (C:\Users\mjwin\IdeaProjects\peer-scalajs\node_modules\jsdom\lib\jsdom\living\nodes\HTMLScriptElement-impl.js:98:12)
at onLoadWrapped (C:\Users\mjwin\IdeaProjects\peer-scalajs\node_modules\jsdom\lib\jsdom\browser\resources\per-document-resource-loader.js:53:33)
[error] org.scalajs.jsenv.ExternalJSRun$NonZeroExitException: exited with code 1
[error] at org.scalajs.jsenv.ExternalJSRun$$anon$1.run(ExternalJSRun.scala:186)
[error] stack trace is suppressed; run 'last Compile / run' for the full output
[error] (Compile / run) org.scalajs.jsenv.ExternalJSRun$NonZeroExitException: exited with code 1
I'm sure it's something really simple, but I'm stumped. Any guesses?
Thank you

fs2 stream to zip-compressed fs2stream

I have a stream of fs2 streams and I'd like to create a compressed stream ready to be written into file with *.zip extension or for downloading.
The problem is that stream never terminates. Here is the code:
package backup
import java.io.OutputStream
import cats.effect._
import cats.effect.implicits._
import cats.implicits._
import fs2.{Chunk, Pipe, Stream, io}
import java.util.zip.{ZipEntry, ZipOutputStream}
import fs2.concurrent.Queue
import scala.concurrent.{ExecutionContext, SyncVar}
// https://github.com/slamdata/fs2-gzip/blob/master/core/src/main/scala/fs2/gzip/package.scala
// https://github.com/scalavision/fs2-helper/blob/master/src/main/scala/fs2helper/zip.scala
// https://github.com/eikek/sharry/blob/2f1dbfeae3c73bf2623f65c3591d0b3e0691d4e5/modules/common/src/main/scala/sharry/common/zip.scala
object Fs2Zip {
private def writeEntry[F[_]](zos: ZipOutputStream)(implicit F: Concurrent[F],
blockingEc: ExecutionContext,
contextShift: ContextShift[F]): Pipe[F, (String, Stream[F, Byte]), Unit] =
_.flatMap {
case (name, data) =>
val createEntry = Stream.eval(F.delay {
zos.putNextEntry(new ZipEntry(name))
})
val writeEntry = data.through(io.writeOutputStream(F.delay(zos.asInstanceOf[OutputStream]), blockingEc, closeAfterUse = false))
val closeEntry = Stream.eval(F.delay(zos.closeEntry()))
createEntry ++ writeEntry ++ closeEntry
}
private def zipP1[F[_]](implicit F: ConcurrentEffect[F],
blockingEc: ExecutionContext,
contextShift: ContextShift[F]): Pipe[F, (String, Stream[F, Byte]), Byte] = entries => {
Stream.eval(Queue.unbounded[F, Option[Chunk[Byte]]]).flatMap { q =>
Stream.suspend {
val os = new java.io.OutputStream {
private def enqueueChunkSync(a: Option[Chunk[Byte]]) = {
println(s"enqueueChunkSync $a")
val done = new SyncVar[Either[Throwable, Unit]]
q.enqueue1(a).start.flatMap(_.join).runAsync(e => IO(done.put(e))).unsafeRunSync
done.get.fold(throw _, identity)
println(s"enqueueChunkSync done $a")
}
#scala.annotation.tailrec
private def addChunk(c: Chunk[Byte]): Unit = {
val free = 1024 - bufferedChunk.size
if (c.size > free) {
enqueueChunkSync(Some(Chunk.vector(bufferedChunk.toVector ++ c.take(free).toVector)))
bufferedChunk = Chunk.empty
addChunk(c.drop(free))
} else {
bufferedChunk = Chunk.vector(bufferedChunk.toVector ++ c.toVector)
}
}
private var bufferedChunk: Chunk[Byte] = Chunk.empty
override def close(): Unit = {
// flush remaining chunk
enqueueChunkSync(Some(bufferedChunk))
bufferedChunk = Chunk.empty
// terminate the queue
enqueueChunkSync(None)
}
override def write(bytes: Array[Byte]): Unit =
Chunk.bytes(bytes)
override def write(bytes: Array[Byte], off: Int, len: Int): Unit =
addChunk(Chunk.bytes(bytes, off, len))
override def write(b: Int): Unit =
addChunk(Chunk.singleton(b.toByte))
}
val write: Stream[F, Unit] = Stream
.bracket(F.delay(new ZipOutputStream(os)))((zos: ZipOutputStream) => F.delay(zos.close()))
.flatMap((zos: ZipOutputStream) => entries.through(writeEntry(zos)))
val read = q.dequeue
.unNoneTerminate
.flatMap(Stream.chunk(_))
read.concurrently(write)
}
}
}
def zip[F[_]: ConcurrentEffect: ContextShift](entries: Stream[F, (String, Stream[F, Byte])])(
implicit ec: ExecutionContext): Stream[F, Byte] =
entries.through(zipP1)
}
The code is shamelessly copied from https://github.com/eikek/sharry/blob/master/modules/common/src/main/scala/sharry/common/zip.scala
and updated to compile with the latest fs2 and cats-effect
I narrowed the problem to enqueueChunkSync:
private def enqueueChunkSync(a: Option[Chunk[Byte]]) = {
val done = new SyncVar[Either[Throwable, Unit]]
q.enqueue1(a).start.flatMap(_.join).runAsync(e => IO(done.put(e))).unsafeRunSync
done.get.fold(throw _, identity)
}
which blocks on the last chunk. When I put a println in there and make the buffer smaller I see that chunks are flushed successfully until the last one.
When I remove the blocking bit done.get.fold(throw _, identity) it seems to work, but then I imagine the bytes are flushed to the stream all at once?
How is the last chunk different from the previous ones?

CorDapp is working in CRaSH Shell but API is not recognizing CorDapp

I'm trying to expose an API for a CorDapp and the functions are not displaying. When looking at an example (https://github.com/roger3cev/obligation-cordapp), I receive the following page: https://imgur.com/a/ifOdrAd, however when I load my CorDapp, it says there are no installed Cordapps and /api on the localhost returns a 404. In the project, I feel the problem lies somewhere here: https://github.com/PronoyC/InsureFlight/tree/master/cordapp/src/main/kotlin/com/insureflight. I know this is very vague but I cannot find any errors that indicate a specific area. Any help would be appreciated.
InsureFlightApi.kt:
package com.insureflight
import net.corda.core.contracts.Amount
import net.corda.core.contracts.UniqueIdentifier
import net.corda.core.messaging.CordaRPCOps
import net.corda.core.utilities.OpaqueBytes
import net.corda.core.utilities.getOrThrow
import com.insureflight.flows.IssuePolicy
import com.insureflight.flows.PayoutPolicy
import net.corda.finance.contracts.asset.Cash
import net.corda.finance.contracts.getCashBalances
import net.corda.finance.flows.CashIssueFlow
import java.util.*
import javax.ws.rs.GET
import javax.ws.rs.Path
import javax.ws.rs.Produces
import javax.ws.rs.QueryParam
import javax.ws.rs.core.MediaType
import javax.ws.rs.core.Response
import javax.ws.rs.core.Response.Status.BAD_REQUEST
import javax.ws.rs.core.Response.Status.CREATED
#Path("insureflight")
class InsureFlightApi(val rpcOps: CordaRPCOps) {
private val myIdentity = rpcOps.nodeInfo().legalIdentities.first()
#GET
#Path("me")
#Produces(MediaType.APPLICATION_JSON)
fun me() = mapOf("me" to myIdentity)
#GET
#Path("peers")
#Produces(MediaType.APPLICATION_JSON)
fun peers() = mapOf("peers" to rpcOps.networkMapSnapshot()
.filter { nodeInfo -> nodeInfo.legalIdentities.first() != myIdentity }
.map { it.legalIdentities.first().name.organisation })
#GET
#Path("policies")
#Produces(MediaType.APPLICATION_JSON)
fun policies() = rpcOps.vaultQuery(Policy::class.java).states
#GET
#Path("cash")
#Produces(MediaType.APPLICATION_JSON)
fun cash() = rpcOps.vaultQuery(Cash.State::class.java).states
#GET
#Path("cash-balances")
#Produces(MediaType.APPLICATION_JSON)
fun getCashBalances() = rpcOps.getCashBalances()
#GET
#Path("self-issue-cash")
fun selfIssueCash(#QueryParam(value = "amount") amount: Int,
#QueryParam(value = "currency") currency: String): Response {
// 1. Prepare issue request.
val issueAmount = Amount(amount.toLong(), Currency.getInstance(currency))
val notary = rpcOps.notaryIdentities().firstOrNull() ?: throw IllegalStateException("Could not find a notary.")
val issueRef = OpaqueBytes.of(0)
val issueRequest = CashIssueFlow.IssueRequest(issueAmount, issueRef, notary)
// 2. Start flow and wait for response.
val (status, message) = try {
val flowHandle = rpcOps.startFlowDynamic(CashIssueFlow::class.java, issueRequest)
val result = flowHandle.use { it.returnValue.getOrThrow() }
CREATED to result.stx.tx.outputs.single().data
} catch (e: Exception) {
BAD_REQUEST to e.message
}
// 3. Return the response.
return Response.status(status).entity(message).build()
}
#GET
#Path("issue-policy")
fun issuePolicy(#QueryParam(value = "premium") premium: Int,
#QueryParam(value = "currency") currency: String,
#QueryParam(value = "client") client: String,
#QueryParam(value = "underwriter") underwriter: String,
#QueryParam(value = "flight") flight: String,
#QueryParam(value = "fStatus") fStatus: String): Response {
// 1. Create a premium object.
val issuePremium = Amount(premium.toLong() * 100, Currency.getInstance(currency))
// 2. Start the IssuePolicy flow. We block and wait for the flow to return.
val (status, message) = try {
val flowHandle = rpcOps.startFlowDynamic(
IssuePolicy.Initiator::class.java,
issuePremium,
client,
underwriter,
flight,
fStatus,
true
)
val result = flowHandle.use { it.returnValue.getOrThrow() }
CREATED to "Transaction id ${result.id} committed to ledger.\n${result.tx.outputs.single().data}"
} catch (e: Exception) {
BAD_REQUEST to e.message
}
// 3. Return the result.
return Response.status(status).entity(message).build()
}
#GET
#Path("payout-policy")
fun settlePolicy(#QueryParam(value = "id") id: String,
#QueryParam(value = "delayedMinutes") delayedMinutes: Int,
#QueryParam(value = "fStatus") fStatus: String): Response {
// 1. Get party objects for the counterparty.
val linearId = UniqueIdentifier.fromString(id)
// 2. Start the SettlePolicy flow. We block and wait for the flow to return.
val (status, message) = try {
val flowHandle = rpcOps.startFlowDynamic(
PayoutPolicy.Initiator::class.java,
linearId,
delayedMinutes,
fStatus,
true
)
flowHandle.use { flowHandle.returnValue.getOrThrow() }
CREATED to "Policy $linearId has been settled."
} catch (e: Exception) {
BAD_REQUEST to e.message
}
// 3. Return the result.
return Response.status(status).entity(message).build()
}
}
InsureFlightPlugin.kt (ObligationPlugin.kt is very similar to this):
package com.insureflight
import net.corda.core.messaging.CordaRPCOps
import net.corda.webserver.services.WebServerPluginRegistry
import java.util.function.Function
class InsureFlightPlugin : WebServerPluginRegistry {
override val webApis: List<Function<CordaRPCOps, out Any>> = listOf(Function(::InsureFlightApi))
override val staticServeDirs: Map<String, String> = mapOf(
"policy" to javaClass.classLoader.getResource("policyWeb").toExternalForm()
)
}
Kid101 is correct. You've to register the InsureFlightPlugin, like done here: obligation-cordapp/kotlin-source/src/main/resources/META-INF/services/net.corda.webserver.services.WebServerPluginRegistry

too many open files, when using net/http

I am writing REST service using go-json-rest, which inturn using net/http.
My server code is simply, get the request and pass it to a channel
Here is my server code
package main
import (
"github.com/ant0ine/go-json-rest/rest"
"log"
"net/http"
"strconv"
"time"
)
const workerCount = 4
var evChannel = make(chan Event)
var workers = make([]*LogWorker, workerCount)
const maxLogFileSize = 100 // In MB
const maxLogFileBackups = 30
const maxLogFileAge = 5
const logFileName = "/home/sam/tmp/go_logs/event_"
func main() {
// Initialize workers
// Four workers is being created
for i := 0; i < workerCount; i++ {
var fileName = logFileName + strconv.Itoa(i)
workers[i] = NewLogWorker(fileName, maxLogFileSize, maxLogFileBackups, maxLogFileAge)
go workers[i].Work(evChannel)
}
// Initialize REST API
api := rest.NewApi()
//api.Use(rest.DefaultDevStack...)
api.Use(rest.DefaultCommonStack...)
router, err := rest.MakeRouter(
rest.Post("/events", StoreEvents),
)
if err != nil {
log.Fatal(err)
}
api.SetApp(router)
log.Fatal(http.ListenAndServe(":4545", api.MakeHandler()))
}
func StoreEvents(w rest.ResponseWriter, r *rest.Request) {
event := Event{}
err := r.DecodeJsonPayload(&event)
if err != nil {
rest.Error(w, err.Error(), http.StatusInternalServerError)
return
}
// TODO : Add validation if needed
// Add code to parse the request and add further information to event
// log.Println()
select {
case evChannel <- event:
case <- time.After(5 * time.Second):
// throw away the message, so sad
}
// evChannel <- event
//log.Println(Csv(event))
w.WriteHeader(http.StatusOK)
}
When I execute it continuously using jmeter I am occasionally getting the below error
http: Accept error: accept tcp [::]:4545: too many open files; retrying in 10ms
Does net/http open files for every request?
Posting elithrar comment as answer
Sockets, yes. You may need to increase your fd limit (via ulimit or sysctl).

Resources