We are seeing these errors on the client side sporadically.
Caused by: io.grpc.StatusRuntimeException: UNKNOWN: channel closed
at io.grpc.Status.asRuntimeException(Status.java:532)
at io.grpc.stub.ClientCalls$StreamObserverToCallListenerAdapter.onClose(ClientCalls.java:434)
at io.grpc.PartialForwardingClientCallListener.onClose(PartialForwardingClientCallListener.java:39)
at io.grpc.ForwardingClientCallListener.onClose(ForwardingClientCallListener.java:23)
at io.grpc.ForwardingClientCallListener$SimpleForwardingClientCallListener.onClose(ForwardingClientCallListener.java:40)
at io.grpc.internal.CensusStatsModule$StatsClientInterceptor$1$1.onClose(CensusStatsModule.java:700)
at io.grpc.PartialForwardingClientCallListener.onClose(PartialForwardingClientCallListener.java:39)
at io.grpc.ForwardingClientCallListener.onClose(ForwardingClientCallListener.java:23)
at io.grpc.ForwardingClientCallListener$SimpleForwardingClientCallListener.onClose(ForwardingClientCallListener.java:40)
at io.grpc.internal.CensusTracingModule$TracingClientInterceptor$1$1.onClose(CensusTracingModule.java:398)
at io.grpc.internal.ClientCallImpl.closeObserver(ClientCallImpl.java:459)
at io.grpc.internal.ClientCallImpl.access$300(ClientCallImpl.java:63)
at io.grpc.internal.ClientCallImpl$ClientStreamListenerImpl.close(ClientCallImpl.java:546)
at io.grpc.internal.ClientCallImpl$ClientStreamListenerImpl.access$600(ClientCallImpl.java:467)
at io.grpc.internal.ClientCallImpl$ClientStreamListenerImpl$1StreamClosed.runInContext(ClientCallImpl.java:584)
at io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
at io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:123)
at java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)
at java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)
at java.base/java.lang.Thread.run(Thread.java:834)
Caused by: java.nio.channels.ClosedChannelException: null
at io.grpc.netty.Utils.statusFromThrowable(Utils.java:166)
at io.grpc.netty.NettyClientHandler.onConnectionError(NettyClientHandler.java:474)
at io.netty.handler.codec.http2.Http2ConnectionHandler.onError(Http2ConnectionHandler.java:641)
at io.netty.handler.codec.http2.DefaultHttp2ConnectionEncoder.writeHeaders(DefaultHttp2ConnectionEncoder.java:225)
at io.netty.handler.codec.http2.DecoratingHttp2FrameWriter.writeHeaders(DecoratingHttp2FrameWriter.java:53)
at io.netty.handler.codec.http2.StreamBufferingEncoder.writeHeaders(StreamBufferingEncoder.java:157)
at io.netty.handler.codec.http2.StreamBufferingEncoder.writeHeaders(StreamBufferingEncoder.java:141)
at io.grpc.netty.NettyClientHandler.createStream(NettyClientHandler.java:543)
at io.grpc.netty.NettyClientHandler.write(NettyClientHandler.java:312)
at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:716)
at io.netty.channel.AbstractChannelHandlerContext.invokeWrite(AbstractChannelHandlerContext.java:708)
at io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:791)
at io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:701)
at io.netty.channel.DefaultChannelPipeline.write(DefaultChannelPipeline.java:1026)
at io.netty.channel.AbstractChannel.write(AbstractChannel.java:288)
at io.grpc.netty.WriteQueue$AbstractQueuedCommand.run(WriteQueue.java:174)
at io.grpc.netty.WriteQueue.flush(WriteQueue.java:112)
at io.grpc.netty.WriteQueue.access$000(WriteQueue.java:32)
at io.grpc.netty.WriteQueue$1.run(WriteQueue.java:44)
at io.netty.util.concurrent.AbstractEventExecutor.safeExecute(AbstractEventExecutor.java:163)
at io.netty.util.concurrent.SingleThreadEventExecutor.runAllTasks(SingleThreadEventExecutor.java:416)
at io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:515)
at io.netty.util.concurrent.SingleThreadEventExecutor$5.run(SingleThreadEventExecutor.java:918)
at io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74)
at io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30)
... 1 common frames omitted
Caused by: java.nio.channels.ClosedChannelException: null
at io.netty.channel.AbstractChannel$AbstractUnsafe.newClosedChannelException(AbstractChannel.java:955)
at io.netty.channel.AbstractChannel$AbstractUnsafe.write(AbstractChannel.java:863)
at io.netty.channel.DefaultChannelPipeline$HeadContext.write(DefaultChannelPipeline.java:1378)
at io.netty.channel.AbstractChannelHandlerContext.invokeWrite0(AbstractChannelHandlerContext.java:716)
at io.netty.channel.AbstractChannelHandlerContext.invokeWrite(AbstractChannelHandlerContext.java:708)
at io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:791)
at io.netty.channel.AbstractChannelHandlerContext.write(AbstractChannelHandlerContext.java:701)
at io.netty.handler.codec.http2.DefaultHttp2FrameWriter.writeHeadersInternal(DefaultHttp2FrameWriter.java:528)
at io.netty.handler.codec.http2.DefaultHttp2FrameWriter.writeHeaders(DefaultHttp2FrameWriter.java:268)
at io.netty.handler.codec.http2.Http2OutboundFrameLogger.writeHeaders(Http2OutboundFrameLogger.java:60)
at io.netty.handler.codec.http2.DefaultHttp2ConnectionEncoder.writeHeaders(DefaultHttp2ConnectionEncoder.java:208)
... 22 common frames omitted
Caused by: java.io.IOException: Connection reset by peer
at java.base/sun.nio.ch.FileDispatcherImpl.write0(Native Method)
at java.base/sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:47)
at java.base/sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:113)
at java.base/sun.nio.ch.IOUtil.write(IOUtil.java:58)
at java.base/sun.nio.ch.IOUtil.write(IOUtil.java:50)
at java.base/sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:466)
at io.netty.channel.socket.nio.NioSocketChannel.doWrite(NioSocketChannel.java:405)
at io.netty.channel.AbstractChannel$AbstractUnsafe.flush0(AbstractChannel.java:928)
at io.netty.channel.nio.AbstractNioChannel$AbstractNioUnsafe.flush0(AbstractNioChannel.java:356)
at io.netty.channel.AbstractChannel$AbstractUnsafe.flush(AbstractChannel.java:895)
at io.netty.channel.DefaultChannelPipeline$HeadContext.flush(DefaultChannelPipeline.java:1383)
at io.netty.channel.AbstractChannelHandlerContext.invokeFlush0(AbstractChannelHandlerContext.java:749)
at io.netty.channel.AbstractChannelHandlerContext.invokeFlush(AbstractChannelHandlerContext.java:741)
at io.netty.channel.AbstractChannelHandlerContext.flush(AbstractChannelHandlerContext.java:727)
at io.grpc.netty.NettyClientHandler.sendPingFrame(NettyClientHandler.java:646)
at io.grpc.netty.NettyClientHandler.write(NettyClientHandler.java:318)
... 17 common frames omitted
Both the server and the client run in Kubernetes with the server being a ClusterIP service. The client's channel builder looks like this:
NettyChannelBuilder.forAddress("some-service", 8090)
.nameResolverFactory(DnsNameResolverProvider.asFactory())
.defaultLoadBalancingPolicy("round_robin")
.idleTimeout(60000, TimeUnit.MILLISECONDS)
.usePlaintext();
The client sends requests in a burst of 4-5 (concurrently using project-reactor) every 5 minutes. We see failures happening once or twice a day. We have tried setting both keepAlive and idleTimeout on separate occasions but nothing seems to be working. We have retries defined for UNAVAILABLE and DEADLINE_EXCEEDED status codes but not UNKNOWN since that could mean retrying on potentially non-retriable errors. Is there a way to fix this on the client side without having to retry on UNKNOWN errors?
Client gRPC version: 1.20.0
Server gRPC version: 1.20.0
Netty Version: 4.1.39.Final
Related
I'm using JMeter to load test middleware server using the TCP Sampler (TCPClientImpl class) for sending the following message:message format
Tcp Sampler setting
upon sending the request I am getting the 500 response code
ERROR
Response code:500
Response message:org.apache.jmeter.protocol.tcp.sampler.ReadException: Error reading from server, bytes read: 0
LOGS
2022-03-03 12:46:37,265 ERROR o.a.j.p.t.s.TCPSampler:
org.apache.jmeter.protocol.tcp.sampler.ReadException: Error reading from server, bytes read: 0
at org.apache.jmeter.protocol.tcp.sampler.TCPClientImpl.read(TCPClientImpl.java:122) ~[ApacheJMeter_tcp.jar:5.4.3]
at org.apache.jmeter.protocol.tcp.sampler.TCPSampler.sample(TCPSampler.java:398) [ApacheJMeter_tcp.jar:5.4.3]
at org.apache.jmeter.threads.JMeterThread.doSampling(JMeterThread.java:638) [ApacheJMeter_core.jar:5.4.3]
at org.apache.jmeter.threads.JMeterThread.executeSamplePackage(JMeterThread.java:558) [ApacheJMeter_core.jar:5.4.3]
at org.apache.jmeter.threads.JMeterThread.processSampler(JMeterThread.java:489) [ApacheJMeter_core.jar:5.4.3]
at org.apache.jmeter.threads.JMeterThread.run(JMeterThread.java:256) [ApacheJMeter_core.jar:5.4.3]
at java.lang.Thread.run(Thread.java:833) [?:?]
Caused by: java.net.SocketTimeoutException: Read timed out
at sun.nio.ch.NioSocketImpl.timedRead(NioSocketImpl.java:283) ~[?:?]
at sun.nio.ch.NioSocketImpl.implRead(NioSocketImpl.java:309) ~[?:?]
at sun.nio.ch.NioSocketImpl.read(NioSocketImpl.java:350) ~[?:?]
at sun.nio.ch.NioSocketImpl$1.read(NioSocketImpl.java:803) ~[?:?]
at java.net.Socket$SocketInputStream.read(Socket.java:966) ~[?:?]
at java.io.InputStream.read(InputStream.java:218) ~[?:?]
at org.apache.jmeter.protocol.tcp.sampler.TCPClientImpl.read(TCPClientImpl.java:105) ~[ApacheJMeter_tcp.jar:5.4.3]
It means that the sampler failed to receive the response within the bounds of the timeout you set to 2 seconds.
Try increasing the timeout in the TCP Sampler (or even better use TCP Sampler Config, it acts like HTTP Request Defaults for the HTTP Request samplers and the host, port, timeouts, etc. will be applied to all TCP Samplers you have)
If the error remains double check the network connectivity to the host/port combination where your TCP service lives using telnet client or equivalent software
I have deploy API manager 4.0.0 All-in-one on 2 VMs, front the system with a load balancer.
When one node shutdown by command "sh api-manager.sh stop", another swithes success and runs well , but there are some error in console like below:
TID: [-1] [] [2022-03-14 10:14:13,270] ERROR {org.wso2.carbon.databridge.agent.endpoint.DataEndpointConnectionWorker} - Error while trying to connect
to the endpoint. Cannot borrow client for ssl://10.32.73.10:9711 org.wso2.carbon.databridge.agent.exception.DataEndpointAuthenticationException: Cannot borrow client for ssl://10.32.73.10:9711 at org.wso2.carbon.databridge.agent.endpoint.DataEndpointConnectionWorker.connect(DataEndpointConnectionWorker.java:147)
at org.wso2.carbon.databridge.agent.endpoint.DataEndpointConnectionWorker.run(DataEndpointConnectionWorker.java:59)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.wso2.carbon.databridge.agent.exception.DataEndpointException: Error while opening socket to 10.32.73.10:9711. Connection refused (Conne
ction refused) at org.wso2.carbon.databridge.agent.endpoint.binary.BinarySecureClientPoolFactory.createClient(BinarySecureClientPoolFactory.java:75)
at org.wso2.carbon.databridge.agent.client.AbstractClientPoolFactory.makeObject(AbstractClientPoolFactory.java:39)
at org.apache.commons.pool.impl.GenericKeyedObjectPool.borrowObject(GenericKeyedObjectPool.java:1212)
at org.wso2.carbon.databridge.agent.endpoint.DataEndpointConnectionWorker.connect(DataEndpointConnectionWorker.java:137)
... 6 more
Caused by: java.net.ConnectException: Connection refused (Connection refused)
at java.net.PlainSocketImpl.socketConnect(Native Method)
at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:476)
at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:218)
at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:200)
at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:394)
at java.net.Socket.connect(Socket.java:606)
at sun.security.ssl.SSLSocketImpl.connect(SSLSocketImpl.java:287)
at sun.security.ssl.SSLSocketImpl.<init>(SSLSocketImpl.java:146)
at sun.security.ssl.SSLSocketFactoryImpl.createSocket(SSLSocketFactoryImpl.java:88)
at org.wso2.carbon.databridge.agent.endpoint.binary.BinarySecureClientPoolFactory.createClient(BinarySecureClientPoolFactory.java:58)
... 9 more
TID: [-1] [] [2022-03-14 10:14:15,158] WARN {org.wso2.carbon.databridge.agent.endpoint.DataEndpointGroup} - No receiver is reachable at URL Endpoint/
Endpoints [tcp://10.32.73.10:9611], will try to reconnect every 30 sec
Are there anything wrong in the deployment.toml?
In APIM active-active setup, each node is publishing throttling data to itself and to the other node. When you stop the other node, it can't publish the throttling data to the other node. Hence you see connection refused errors and this is expected. No harm having these error logs. It will recover when the other node is started. If you look at the deployment.toml, can find the other node details under the throttling configurations.
With a successfully migrated Alfresco (5.2 to) 7.0 repository, I get PSQLException: SSL error: readHandshakeRecord (see stracktrace below) every morning at 4:00, which then causes the repository to stop responding.
Could someone please help me decipher this stack trace? Why is this job running around 4am? I can't find a suitable quartz job. Does anyone know how to manually force this call to fix this problem? At first I thought it might be related to the contentStoreCleaner running at 4:00 am, but disabling this job doesn't change anything.
The only work around I found so far was to disable the activity workflow engine.
2021-08-08 04:35:39,396 ERROR [org.activiti.engine.impl.jobexecutor.AcquireJobsRunnableImpl] [Thread-46] exception during job acquisition: Could not open JDBC Connection for transaction; nested exception is org.postgresql.util.PSQLException: SSL error: readHandshakeRecord
org.springframework.transaction.CannotCreateTransactionException: Could not open JDBC Connection for transaction; nested exception is org.postgresql.util.PSQLException: SSL error: readHandshakeRecord
at org.springframework.jdbc.datasource.DataSourceTransactionManager.doBegin(DataSourceTransactionManager.java:309)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.startTransaction(AbstractPlatformTransactionManager.java:400)
at org.springframework.transaction.support.AbstractPlatformTransactionManager.getTransaction(AbstractPlatformTransactionManager.java:373)
at org.springframework.transaction.support.TransactionTemplate.execute(TransactionTemplate.java:137)
at org.activiti.spring.SpringTransactionInterceptor.execute(SpringTransactionInterceptor.java:45)
at org.activiti.engine.impl.interceptor.LogInterceptor.execute(LogInterceptor.java:31)
at org.activiti.engine.impl.cfg.CommandExecutorImpl.execute(CommandExecutorImpl.java:40)
at org.activiti.engine.impl.cfg.CommandExecutorImpl.execute(CommandExecutorImpl.java:35)
at org.activiti.engine.impl.jobexecutor.AcquireJobsRunnableImpl.run(AcquireJobsRunnableImpl.java:54)
at java.base/java.lang.Thread.run(Thread.java:829)
Caused by: org.postgresql.util.PSQLException: SSL error: readHandshakeRecord
at org.postgresql.ssl.MakeSSL.convert(MakeSSL.java:43)
at org.postgresql.core.v3.ConnectionFactoryImpl.enableSSL(ConnectionFactoryImpl.java:534)
at org.postgresql.core.v3.ConnectionFactoryImpl.tryConnect(ConnectionFactoryImpl.java:149)
at org.postgresql.core.v3.ConnectionFactoryImpl.openConnectionImpl(ConnectionFactoryImpl.java:213)
at org.postgresql.core.ConnectionFactory.openConnection(ConnectionFactory.java:51)
at org.postgresql.jdbc.PgConnection.<init>(PgConnection.java:223)
at org.postgresql.Driver.makeConnection(Driver.java:465)
at org.postgresql.Driver.connect(Driver.java:264)
at org.apache.commons.dbcp.DriverConnectionFactory.createConnection(DriverConnectionFactory.java:38)
at org.apache.commons.dbcp.PoolableConnectionFactory.makeObject(PoolableConnectionFactory.java:582)
at org.apache.commons.pool.impl.GenericObjectPool.borrowObject(GenericObjectPool.java:1188)
at org.apache.commons.dbcp.PoolingDataSource.getConnection(PoolingDataSource.java:106)
at org.apache.commons.dbcp.BasicDataSource.getConnection(BasicDataSource.java:1044)
at org.springframework.jdbc.datasource.DataSourceTransactionManager.doBegin(DataSourceTransactionManager.java:265)
... 9 more
Caused by: javax.net.ssl.SSLException: readHandshakeRecord
at java.base/sun.security.ssl.SSLSocketImpl.readHandshakeRecord(SSLSocketImpl.java:1335)
at java.base/sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:440)
at java.base/sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:411)
at org.postgresql.ssl.MakeSSL.convert(MakeSSL.java:41)
... 22 more
Suppressed: java.net.SocketException: Broken pipe (Write failed)
at java.base/java.net.SocketOutputStream.socketWrite0(Native Method)
at java.base/java.net.SocketOutputStream.socketWrite(SocketOutputStream.java:110)
at java.base/java.net.SocketOutputStream.write(SocketOutputStream.java:150)
at java.base/sun.security.ssl.SSLSocketOutputRecord.encodeAlert(SSLSocketOutputRecord.java:81)
at java.base/sun.security.ssl.TransportContext.fatal(TransportContext.java:380)
at java.base/sun.security.ssl.TransportContext.fatal(TransportContext.java:292)
at java.base/sun.security.ssl.SSLSocketImpl.startHandshake(SSLSocketImpl.java:450)
... 24 more
Caused by: java.net.SocketException: Broken pipe (Write failed)
at java.base/java.net.SocketOutputStream.socketWrite0(Native Method)
at java.base/java.net.SocketOutputStream.socketWrite(SocketOutputStream.java:110)
at java.base/java.net.SocketOutputStream.write(SocketOutputStream.java:150)
at java.base/sun.security.ssl.SSLSocketOutputRecord.flush(SSLSocketOutputRecord.java:251)
at java.base/sun.security.ssl.HandshakeOutStream.flush(HandshakeOutStream.java:89)
at java.base/sun.security.ssl.Finished$T13FinishedProducer.onProduceFinished(Finished.java:679)
at java.base/sun.security.ssl.Finished$T13FinishedProducer.produce(Finished.java:658)
at java.base/sun.security.ssl.SSLHandshake.produce(SSLHandshake.java:436)
at java.base/sun.security.ssl.Finished$T13FinishedConsumer.onConsumeFinished(Finished.java:1011)
at java.base/sun.security.ssl.Finished$T13FinishedConsumer.consume(Finished.java:874)
at java.base/sun.security.ssl.SSLHandshake.consume(SSLHandshake.java:392)
at java.base/sun.security.ssl.HandshakeContext.dispatch(HandshakeContext.java:443)
at java.base/sun.security.ssl.HandshakeContext.dispatch(HandshakeContext.java:421)
at java.base/sun.security.ssl.TransportContext.dispatch(TransportContext.java:182)
at java.base/sun.security.ssl.SSLTransport.decode(SSLTransport.java:171)
at java.base/sun.security.ssl.SSLSocketImpl.decode(SSLSocketImpl.java:1418)
at java.base/sun.security.ssl.SSLSocketImpl.readHandshakeRecord(SSLSocketImpl.java:1324)
... 25 more
The stacktrace was misleading - the jdbc connection problem was caused by memory filling up by the Alfresco trashcan cleaner module: OutOfMemoryError: Java heap space due to no limit on getChildAssocs.
We have ~ 4 million nodes in the trashcan and the module retrieves in every batch run all nodes again and again until the memory has been filled up ...
I'm using grakn core 1.8.4 with Windows 10. The grakn server and grakn storage are starting up normally, but when trying to load a schema, Grakn returns the following error message:
Unable to create connection to Grakn instance at localhost:48555
Cause: io.grpc.StatusRuntimeException
UNKNOWN: ID block allocation on partition(30)-namespace(0) timed out in 2.000 min. Please check server logs for the stack trace.
I already checked and there are no other processes listening to the same port. I also disabled the Firewall and that did not solve the problem. Does anyone have any indication of how I should proceed?
Below is part of the log:
2021-01-05 14:19:24,387 [JanusGraphID(30)(0)[0]] WARN g.c.g.d.i.ConsistentKeyIDAuthority - Temporary storage exception while acquiring id block - retrying in PT0.32S: {}
grakn.core.graph.diskstorage.TemporaryBackendException: Wrote claim for id block [1, 10001) in PT0.016S => too slow, threshold is: PT0.01S
at grakn.core.graph.diskstorage.idmanagement.ConsistentKeyIDAuthority.getIDBlock(ConsistentKeyIDAuthority.java:320)
at grakn.core.graph.graphdb.database.idassigner.StandardIDPool$IDBlockGetter.call(StandardIDPool.java:262)
at grakn.core.graph.graphdb.database.idassigner.StandardIDPool$IDBlockGetter.call(StandardIDPool.java:232)
at java.util.concurrent.FutureTask.run(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
2021-01-05 14:19:24,688 [grpc-request-handler-1] ERROR grakn.core.server.rpc.SessionService - An error has occurred
grakn.core.graph.core.JanusGraphException: ID block allocation on partition(30)-namespace(0) timed out in 2.000 min
at grakn.core.graph.graphdb.database.idassigner.StandardIDPool.waitForIDBlockGetter(StandardIDPool.java:146)
at grakn.core.graph.graphdb.database.idassigner.StandardIDPool.nextBlock(StandardIDPool.java:165)
at grakn.core.graph.graphdb.database.idassigner.StandardIDPool.nextID(StandardIDPool.java:185)
at grakn.core.graph.graphdb.database.idassigner.VertexIDAssigner.assignID(VertexIDAssigner.java:334)
at grakn.core.graph.graphdb.database.idassigner.VertexIDAssigner.assignID(VertexIDAssigner.java:184)
at grakn.core.graph.graphdb.database.idassigner.VertexIDAssigner.assignID(VertexIDAssigner.java:154)
at grakn.core.graph.graphdb.database.StandardJanusGraph.assignID(StandardJanusGraph.java:416)
at grakn.core.graph.graphdb.transaction.StandardJanusGraphTx.addVertex(StandardJanusGraphTx.java:636)
at grakn.core.graph.graphdb.transaction.StandardJanusGraphTx.addVertex(StandardJanusGraphTx.java:653)
at grakn.core.graph.graphdb.transaction.StandardJanusGraphTx.addVertex(StandardJanusGraphTx.java:649)
at grakn.core.concept.structure.ElementFactory.addVertexElement(ElementFactory.java:104)
at grakn.core.concept.manager.ConceptManagerImpl.addTypeVertex(ConceptManagerImpl.java:188)
at grakn.core.server.session.TransactionImpl.createMetaConcepts(TransactionImpl.java:1297)
at grakn.core.server.session.SessionImpl.initialiseMetaConcepts(SessionImpl.java:123)
at grakn.core.server.session.SessionImpl.<init>(SessionImpl.java:85)
at grakn.core.server.session.SessionFactory.session(SessionFactory.java:115)
at grakn.core.server.rpc.ServerOpenRequest.open(ServerOpenRequest.java:40)
at grakn.core.server.rpc.SessionService.open(SessionService.java:122)
at grakn.protocol.session.SessionServiceGrpc$MethodHandlers.invoke(SessionServiceGrpc.java:339)
at io.grpc.stub.ServerCalls$UnaryServerCallHandler$UnaryServerCallListener.onHalfClose(ServerCalls.java:172)
at io.grpc.internal.ServerCallImpl$ServerStreamListenerImpl.halfClosed(ServerCallImpl.java:331)
at io.grpc.internal.ServerImpl$JumpToApplicationThreadServerStreamListener$1HalfClosed.runInContext(ServerImpl.java:814)
at io.grpc.internal.ContextRunnable.run(ContextRunnable.java:37)
at io.grpc.internal.SerializingExecutor.run(SerializingExecutor.java:123)
at java.util.concurrent.ThreadPoolExecutor.runWorker(Unknown Source)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(Unknown Source)
at java.lang.Thread.run(Unknown Source)
Caused by: java.util.concurrent.TimeoutException: null
at java.util.concurrent.FutureTask.get(Unknown Source)
at grakn.core.graph.graphdb.database.idassigner.StandardIDPool.waitForIDBlockGetter(StandardIDPool.java:126)
... 26 common frames omitted
If you get this error, you can try to increase the wait time with the following two parameters in the grakn.properties file:
# The number of milliseconds that the JanusGraph id pool manager will wait before
# giving up on allocating a new block of ids
root.ids.renew-timeout=10
# The number of milliseconds the system waits for an ID block reservation to be acknowledged by the storage backend
ids.authority.wait-time=10
I solved this by increasing the below parameters from 10 o 100 in the .\grakn\server\conf\grakn.properties configuration file.
Not sure if this affects performances.
# The number of milliseconds that the JanusGraph id pool manager will wait before
# giving up on allocating a new block of ids
root.ids.renew-timeout=100
# The number of milliseconds the system waits for an ID block reservation to be acknowledged by the storage backend
ids.authority.wait-time=100
I am netty 3.10.6 version, while communicating to server i am getting following error:
Decoding WebSocket Frame opCode=10
2019-04-30T14:31:36,002 UTC DEBUG (New I/O worker #5) [netty(?:?)] Component:DASHBOARD [org.jboss.netty.handler.codec.http.websocketx.WebSocket08FrameDecoder] Decoding WebSocket Frame length=0
2019-04-30T14:31:36,002 UTC DEBUG (New I/O worker #2) [netty(?:?)] Component:DASHBOARD [org.jboss.netty.handler.ssl.SslHandler] SSLEngine.closeInbound() raised an exception after a handshake failure.
javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack?
at sun.security.ssl.Alerts.getSSLException(Alerts.java:208)
at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666)
at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634)
at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561)
at org.jboss.netty.handler.ssl.SslHandler.setHandshakeFailure(SslHandler.java:1451)
at org.jboss.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1315)
at org.jboss.netty.handler.ssl.SslHandler.decode(SslHandler.java:852)
at org.jboss.netty.handler.codec.frame.FrameDecoder.callDecode(FrameDecoder.java:425)
at org.jboss.netty.handler.codec.frame.FrameDecoder.messageReceived(FrameDecoder.java:303)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.atomiton.sff.imp.netty.SffRawMetering.messageReceived(SffRawMetering.java:149)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)
at com.atomiton.sff.imp.netty.NettyTransport$NettyPipeline.sendUpstream(NettyTransport.java:914)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
2019-04-30T14:31:36,003 UTC DEBUG (New I/O worker #4) [netty(?:?)] Component:DASHBOARD [org.jboss.netty.handler.codec.http.websocketx.WebSocket08FrameDecoder] Decoding WebSocket Frame opCode=10
2019-04-30T14:31:36,004 UTC DEBUG (New I/O worker #4) [netty(?:?)] Component:DASHBOARD [org.jboss.netty.handler.codec.http.websocketx.WebSocket08FrameDecoder] Decoding WebSocket Frame length=0
2019-04-30T14:31:36,004 UTC WARN (New I/O worker #2) [SffTcpServer(log:855)] Component:DASHBOARD IO error in null+7012320048641541604:ssl<NioAcceptedSocketChannel[id: 0xaf52a017, /180.151.199.170:56987 => /172.31.14.2:9000]; Caused by: javax.net.ssl.SSLException: Received fatal alert: certificate_unknown
2019-04-30T14:31:36,044 UTC DEBUG (New I/O worker #1) [netty(?:?)] Component:DASHBOARD [org.jboss.netty.handler.ssl.SslHandler] SSLEngine.closeInbound() raised an exception after a handshake failure.
javax.net.ssl.SSLException: Inbound closed before receiving peer's close_notify: possible truncation attack?
at sun.security.ssl.Alerts.getSSLException(Alerts.java:208)
at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1666)
at sun.security.ssl.SSLEngineImpl.fatal(SSLEngineImpl.java:1634)
at sun.security.ssl.SSLEngineImpl.closeInbound(SSLEngineImpl.java:1561)
at org.jboss.netty.handler.ssl.SslHandler.setHandshakeFailure(SslHandler.java:1451)
at org.jboss.netty.handler.ssl.SslHandler.unwrap(SslHandler.java:1315)
at org.jboss.netty.handler.ssl.SslHandler.decode(SslHandler.java:852)
at org.jboss.netty.handler.codec.frame.FrameDecoder.callDecode(FrameDecoder.java:425)
at org.jboss.netty.handler.codec.frame.FrameDecoder.messageReceived(FrameDecoder.java:303)
at org.jboss.netty.channel.SimpleChannelUpstreamHandler.handleUpstream(SimpleChannelUpstreamHandler.java:70)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline$DefaultChannelHandlerContext.sendUpstream(DefaultChannelPipeline.java:791)
at org.jboss.netty.channel.SimpleChannelHandler.messageReceived(SimpleChannelHandler.java:142)
at com.atomiton.sff.imp.netty.SffRawMetering.messageReceived(SffRawMetering.java:149)
at org.jboss.netty.channel.SimpleChannelHandler.handleUpstream(SimpleChannelHandler.java:88)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:564)
at org.jboss.netty.channel.DefaultChannelPipeline.sendUpstream(DefaultChannelPipeline.java:559)
at com.atomiton.sff.imp.netty.NettyTransport$NettyPipeline.sendUpstream(NettyTransport.java:914)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:268)
at org.jboss.netty.channel.Channels.fireMessageReceived(Channels.java:255)
at org.jboss.netty.channel.socket.nio.NioWorker.read(NioWorker.java:88)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.process(AbstractNioWorker.java:108)
at org.jboss.netty.channel.socket.nio.AbstractNioSelector.run(AbstractNioSelector.java:337)
at org.jboss.netty.channel.socket.nio.AbstractNioWorker.run(AbstractNioWorker.java:89)
at org.jboss.netty.channel.socket.nio.NioWorker.run(NioWorker.java:178)
at org.jboss.netty.util.ThreadRenamingRunnable.run(ThreadRenamingRunnable.java:108)
at org.jboss.netty.util.internal.DeadLockProofWorker$1.run(DeadLockProofWorker.java:42)
at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
2019-04-30T14:31:36,044 UTC WARN (New I/O worker #1) [SffTcpServer(log:855)] Component:DASHBOARD IO error in null+7012320048641541607:ssl<NioAcceptedSocketChannel[id: 0x14620731, /180.151.199.170:56986 => /172.31.14.2:9000]; Caused by: javax.net.ssl.SSLException: Received fatal alert: certificate_unknown
Note that the 'error' is actually debug statement.
If you do not want to ignore it, you can try to decrease the client connection timeout (to become lower than the server connection timeout).