+(void)load
{
[super load];
[self aspect_hookSelector:#selector(viewWillAppear:) withOptions:0 usingBlock:^(id<AspectInfo> info, BOOL animated) {
HDFAppLog(#"**************==");
NSString *currentPageName = [[info instance] hdf_className]; //页面名称,如:HDFSearchHospitalViewController
//FireBaseAnalytics
[FIRAnalytics logEventWithName:"page" parameters:{
"pageName":currentPageName
}];
//GoogleAnalystics
id<GAITracker> tracker = [GAI sharedInstance].defaultTracker; //调用默认跟踪器
[tracker set:kGAIScreenName value:currentPageName];
[tracker send:[[GAIDictionaryBuilder createScreenView]build]];
} error:NULL];
}
end
use FIRAnalytics like this,
but it print error below:
FIRAnalytics/DEBUG> No network. Upload task will not be scheduled
and these:
2016-10-10 15:01:58.038 newPatient[8480:] FIRAnalytics/DEBUG> Do not schedule an upload task. Task already exists
2016-10-10 15:02:07.134 newPatient[8480:] FIRAnalytics/DEBUG> Network status has changed. Code, status: 1, Disconnected
2016-10-10 15:02:07.136 newPatient[8480:] FIRAnalytics/ERROR> Encounter network error. Code, error: -1003, Error Domain=NSURLErrorDomain Code=-1003 "未能找到使用指定主机名的服务器。" UserInfo={NSUnderlyingError=0x7fbf305dcd30 {Error Domain=kCFErrorDomainCFNetwork Code=-1003 "(null)" UserInfo={_kCFStreamErrorCodeKey=8, _kCFStreamErrorDomainKey=12}}, NSErrorFailingURLStringKey=https://app-measurement.com/config/app/1:442821079824:ios:88cc404211cdcfea?platform=ios&app_instance_id=1419B4CCA10A4607861CEDB35CB95174&gmp_version=3403, NSErrorFailingURLKey=https://app-measurement.com/config/app/1:442821079824:ios:88cc404211cdcfea?platform=ios&app_instance_id=1419B4CCA10A4607861CEDB35CB95174&gmp_version=3403, _kCFStreamErrorDomainKey=12, _kCFStreamErrorCodeKey=8, NSLocalizedDescription=未能找到使用指定主机名的服务器。}
2016-10-10 15:02:07.138 newPatient[8480:] FIRAnalytics/DEBUG> Fetched configuration. Status code: 0
2016-10-10 15:02:07.138 newPatient[8480:] FIRAnalytics/DEBUG> Unable to get the configuration from server. Network request failed. Code, Error: 0, Error Domain=NSURLErrorDomain Code=-1003 "未能找到使用指定主机名的服务器。" UserInfo={NSUnderlyingError=0x7fbf305dcd30 {Error Domain=kCFErrorDomainCFNetwork Code=-1003 "(null)" UserInfo={_kCFStreamErrorCodeKey=8, _kCFStreamErrorDomainKey=12}}, NSErrorFailingURLStringKey=https://app-measurement.com/config/app/1:442821079824:ios:88cc404211cdcfea?platform=ios&app_instance_id=1419B4CCA10A4607861CEDB35CB95174&gmp_version=3403, NSErrorFailingURLKey=https://app-measurement.com/config/app/1:442821079824:ios:88cc404211cdcfea?platform=ios&app_instance_id=1419B4CCA10A4607861CEDB35CB95174&gmp_version=3403, _kCFStreamErrorDomainKey=12, _kCFStreamErrorCodeKey=8, NSLocalizedDescription=未能找到使用指定主机名的服务器。}
2016-10-10 15:02:07.139 newPatient[8480:] FIRAnalytics/DEBUG> Network fetch failed. Will retry later. Code, error: 0, Error Domain=NSURLErrorDomain Code=-1003 "未能找到使用指定主机名的服务器。" UserInfo={NSUnderlyingError=0x7fbf305dcd30 {Error Domain=kCFErrorDomainCFNetwork Code=-1003 "(null)" UserInfo={_kCFStreamErrorCodeKey=8, _kCFStreamErrorDomainKey=12}}, NSErrorFailingURLStringKey=https://app-measurement.com/config/app/1:442821079824:ios:88cc404211cdcfea?platform=ios&app_instance_id=1419B4CCA10A4607861CEDB35CB95174&gmp_version=3403, NSErrorFailingURLKey=https://app-measurement.com/config/app/1:442821079824:ios:88cc404211cdcfea?platform=ios&app_instance_id=1419B4CCA10A4607861CEDB35CB95174&gmp_version=3403, _kCFStreamErrorDomainKey=12, _kCFStreamErrorCodeKey=8, NSLocalizedDescription=未能找到使用指定主机名的服务器。}
2016-10-10 15:02:07.139 newPatient[8480:] FIRAnalytics/DEBUG> No network. Upload task will not be scheduled
2016-10-10 15:02:07.139 newPatient[8480:] FIRAnalytics/DEBUG> Canceling active timer
2016-10-10 15:02:27.958 newPatient[8480:13764850] Firebase/Network/ERROR> Encounter network error. Code, error: -1001, Error Domain=NSURLErrorDomain Code=-1001 "请求超时。" UserInfo={NSErrorFailingURLStringKey=https://play.googleapis.com/log, NSErrorFailingURLKey=https://play.googleapis.com/log, _kCFStreamErrorDomainKey=4, _kCFStreamErrorCodeKey=-2103, NSLocalizedDescription=请求超时。}
2016-10-10 15:02:27.961 newPatient[8480] [Firebase/Core][I-COR000020] Error posting to Clearcut: Error Domain=NSURLErrorDomain Code=-1001 "请求超时。" UserInfo={NSErrorFailingURLStringKey=https://play.googleapis.com/log, NSErrorFailingURLKey=https://play.googleapis.com/log, _kCFStreamErrorDomainKey=4, _kCFStreamErrorCodeKey=-2103, NSLocalizedDescription=请求超时。}, with Status Code: 0
debug logs below:
2016-10-10 11:38:58.152 newPatient[7428:] FIRAnalytics/DEBUG> Debug mode is enabled. Marking event as debug and real-time. Event name, parameters: page, {
"_dbg" = 1;
"_o" = app;
"_r" = 1;
pageName = HDFPhDoctorIntroduceViewController;
}
There is either no network connection or your network condition is flaky that it could not send data to the server. If there is no network, it will not schedule upload task. Sometimes, depending on where you are from, the network traffic might be filtered so it also fail to upload data. I think this is normal behavior.
Related
We use reactiveKafkaConsumerTemplate to receive messages, then Acknowledge the offset after processing the message. We enabled the out of order commit (maxDeferredCommits=250) and noticed the consumer paused infinitely in certain situations.
The events pattern are:
There might be some network glitch or kafka server maintenance. RetriableCommitFailedException triggered
Consumer pause with “Paused - commits are retrying”
Consumer “Resume” and “Emitting records”. But there is no more “Async committing” log. (no message acknowledgement exception identify)
After some “Emitting records” logs, Consumer pause with “Paused - too many deferred commits”
No more “ConsumerEventLoop” log
Rebalance fixes the issue. (We have 3 consumers deployed on 3 hosts, remove 1 host fix the issue)
reactor-kafka-1.3.13.jar
logging:
level:
reactor:
kafka:
receiver: DEBUG
maxDeferredCommits: 250
ConsumerConfig
auto.commit.interval.ms = 1000
auto.offset.reset = earliest
connections.max.idle.ms = 540000
enable.auto.commit = false
heartbeat.interval.ms = 1000
max.poll.interval.ms = 300000
max.poll.records = 500
request.timeout.ms = 30000
session.timeout.ms = 10000
Logs:
11/24/22 6:50:06.386 AM DEBUG r.k.r.internals.ConsumerEventLoop Async committing: {
test-0=OffsetAndMetadata{offset=12206778, leaderEpoch=null, metadata=''},
test-1=OffsetAndMetadata{offset=12253822, leaderEpoch=null, metadata=''}
test-2=OffsetAndMetadata{offset=12257066, leaderEpoch=null, metadata=''}
test-3=OffsetAndMetadata{offset=12265134, leaderEpoch=null, metadata=''}}
No more “Async committing” after this
11/24/22 6:50:06.451 AM WARN r.k.r.internals.ConsumerEventLoop Commit failed with org.apache.kafka.clients.consumer.RetriableCommitFailedException: Offset commit failed with a retriable exception. You should retry committing the latest consumed offsets. Caused by: org.apache.kafka.common.errors.DisconnectException: null
11/24/22 6:50:06.452 AM WARN r.k.r.internals.ConsumerEventLoop Commit failed with exceptionorg.apache.kafka.clients.consumer.RetriableCommitFailedException: Offset commit failed with a retriable exception. You should retry committing the latest consumed offsets., retries remaining 99
…
11/24/22 6:50:06.452 AM WARN r.k.r.internals.Commit failed with exceptionorg.apache.kafka.clients.consumer.RetriableCommitFailedException: Offset commit failed with a retriable exception. You should retry committing the latest consumed offsets., retries remaining 93
11/24/22 6:50:06.486 DEBUG r.k.r.internals.ConsumerEventLoop -Paused - commits are retrying
11/24/22 6:50:06.987 DEBUG r.k.r.internals.ConsumerEventLoop -Resumed
11/24/22 6:50:07.387 DEBUG r.k.r.internals.ConsumerEventLoop -Emitting 1 records, requested now 1
11/24/22 6:50:07.387 DEBUG r.k.r.internals.ConsumerEventLoop -onRequest.toAdd 1, paused false
…
11/24/22 6:51:05.248 DEBUG r.k.r.internals.ConsumerEventLoop -Paused - too many deferred commits
11/24/22 6:51:05.248 DEBUG r.k.r.internals.ConsumerEventLoop -Consumer woken
No more “ConsumerEventLoop” log after this until rebalance
code detail:
consumeMessge() {
ReceiverOptions basicReceiverOptions = ReceiverOptions.create(
consumerProperties)
.maxDeferredCommits(250)
.commitInterval(Duration.ofMillis(commitInterval))
.subscription(topics);
reactiveKafkaConsumerTemplate=new ReactiveKafkaConsumerTemplate<>(basicReceiverOptions);
return reactiveKafkaConsumerTemplate
.receive()
.publishOn(Schedulers.boundedElastic())
.flatMap(x -> Mono.just(x)
.delayElement(Duration.ofMillis(500),10))
.flatMap(receiverRecord ->
//process the record
messageServiceImpl.process(receiverRecord)
.doFinally(x -> {
//ack offset
log.info("MessageConsumer ACK offset={} ", receiverRecord.offset());
receiverRecord.receiverOffset().acknowledge();
})
.subscribeOn(Schedulers.boundedElastic())
)
.....
}
Looks like it might be a bug; please open an issue on GitHub.
Getting below error in exception in BizTalk application. Any clue?
Exception have below fault msg
<![CDATA[<s:Fault xmlns:s="http://schemas.xmlsoap.org/soap/envelope/">
<faultcode xmlns:bpm="http://schemas.cordys.com/bpm/_null">bpm:5000</faultcode><
faultstring xml:lang="en-US">Internal Error</faultstring>
<faultactor>http://schemas.cordys.com/bpm/execution/1.0</faultactor><detail>
<cordys:FaultDetails xmlns:cordys="http://schemas.cordys.com/General/1.0/">
<cordys:LocalizableMessage><cordys:MessageCode>Cordys.BPM.Messages.processDefinedException</cordys:MessageCode>
<cordys:Insertion>Internal Error</cordys:Insertion>
</cordys:LocalizableMessage></cordys:FaultDetails>
<bpm:FaultDetails xmlns:bpm="http://schemas.cordys.com/bpm/1.0/" xmlns:cordys="http://schemas.cordys.com/General/1.0/">
<cordys:FaultDetailString>Internal Error</cordys:FaultDetailString>
<cordys:LocalizableMessage>
Hi, I am trying to connect to snowflake from R Workbench. This is the error received while connecting with okta.
con <- dbConnect(jdbcDriver, "jdbc:snowflake://company.snowflakecomputing.com/?authenticator=https://company.okta.com/", 'name#company.com', 'pass')
Sep 16, 2021 10:07:42 PM net.snowflake.client.core.SessionUtil handleFederatedFlowError
SEVERE: IOException when authenticating with https://company.okta.com/
java.net.MalformedURLException: no protocol: /login/cert
at java.net.URL.(URL.java:611)
at java.net.URL.(URL.java:508)
at java.net.URL.(URL.java:457)
at net.snowflake.client.core.SessionUtil.isPrefixEqual(SessionUtil.java:1218)
at net.snowflake.client.core.SessionUtil.federatedFlowStep4(SessionUtil.java:999)
at net.snowflake.client.core.SessionUtil.getSamlResponseUsingOkta(SessionUtil.java:1206)
at net.snowflake.client.core.SessionUtil.newSession(SessionUtil.java:378)
at net.snowflake.client.core.SessionUtil.openSession(SessionUtil.java:284)
at net.snowflake.client.core.SFSession.open(SFSession.java:446)
at net.snowflake.client.jdbc.DefaultSFConnectionHandler.initialize(DefaultSFConnectionHandler.java:104)
at net.snowflake.client.jdbc.DefaultSFConnectionHandler.initializeConnection(DefaultSFConnectionHandler.java:79)
at net.snowflake.client.jdbc.SnowflakeConnectionV1.initConnectionWithImpl(SnowflakeConnectionV1.java:116)
at net.snowflake.client.jdbc.SnowflakeConnectionV1.(SnowflakeConnectionV1.java:96)
at net.snowflake.client.jdbc.SnowflakeDriver.connect(SnowflakeDriver.java:164)
at java.sql.DriverManager.getConnection(DriverManager.java:664)
at java.sql.DriverManager.getConnection(DriverManager.java:247)
Sep 16, 2021 10:07:43 PM net.snowflake.client.core.SessionUtil handleFederatedFlowError
SEVERE: IOException when authenticating with https://company.okta.com/
java.net.MalformedURLException: no protocol: /login/cert
at java.net.URL.(URL.java:611)
at java.net.URL.(URL.java:508)
at java.net.URL.(URL.java:457)
at net.snowflake.client.core.SessionUtil.isPrefixEqual(SessionUtil.java:1218)
at net.snowflake.client.core.SessionUtil.federatedFlowStep4(SessionUtil.java:999)
at net.snowflake.client.core.SessionUtil.getSamlResponseUsingOkta(SessionUtil.java:1206)
at net.snowflake.client.core.SessionUtil.newSession(SessionUtil.java:378)
at net.snowflake.client.core.SessionUtil.openSession(SessionUtil.java:284)
at net.snowflake.client.core.SFSession.open(SFSession.java:446)
at net.snowflake.client.jdbc.DefaultSFConnectionHandler.initialize(DefaultSFConnectionHandler.java:104)
at net.snowflake.client.jdbc.DefaultSFConnectionHandler.initializeConnection(DefaultSFConnectionHandler.java:79)
at net.snowflake.client.jdbc.SnowflakeConnectionV1.initConnectionWithImpl(SnowflakeConnectionV1.java:116)
at net.snowflake.client.jdbc.SnowflakeConnectionV1.(SnowflakeConnectionV1.java:96)
at net.snowflake.client.jdbc.SnowflakeDriver.connect(SnowflakeDriver.java:164)
Error in .jcall(drv#jdrv, "Ljava/sql/Connection;", "connect", as.character(url)[1], :
net.snowflake.client.jdbc.SnowflakeSQLException: JDBC driver encountered communication error. Message: Exception encountered when opening connection: no protocol: /login/cert.
I presume that Okta is set up with MFA. If so, the error is because of that since Snowflake drivers does not support Native authentication for Okta with MFA enabled.
You need to use externalbrowser as the authenticator option if the requirement is to use Okta + MFA.
we are trying to using Amazon ElastiCache in place of Redis.
From Dev Box connection getting created but while accessing getting the below error
org.springframework.data.redis.RedisConnectionFailureException: java.net.SocketTimeoutException: Read timed out; nested exception is redis.clients.jedis.exceptions.JedisConnectionException: java.net.SocketTimeoutException: Read timed out
at org.springframework.data.redis.connection.jedis.JedisExceptionConverter.convert(JedisExceptionConverter.java:47)
at org.springframework.data.redis.connection.jedis.JedisExceptionConverter.convert(JedisExceptionConverter.java:36)
at org.springframework.data.redis.PassThroughExceptionTranslationStrategy.translate(PassThroughExceptionTranslationStrategy.java:37)
at org.springframework.data.redis.FallbackExceptionTranslationStrategy.translate(FallbackExceptionTranslationStrategy.java:37)
at org.springframework.data.redis.connection.jedis.JedisConnection.convertJedisAccessException(JedisConnection.java:196)
at org.springframework.data.redis.connection.jedis.JedisConnection.hGetAll(JedisConnection.java:2536)
at org.springframework.data.redis.core.DefaultHashOperations$13.doInRedis(DefaultHashOperations.java:223)
at org.springframework.data.redis.core.DefaultHashOperations$13.doInRedis(DefaultHashOperations.java:220)
at org.springframework.data.redis.core.RedisTemplate.execute(RedisTemplate.java:191)
at org.springframework.data.redis.core.RedisTemplate.execute(RedisTemplate.java:153)
at org.springframework.data.redis.core.AbstractOperations.execute(AbstractOperations.java:86)
at org.springframework.data.redis.core.DefaultHashOperations.entries(DefaultHashOperations.java:220)
at org.springframework.data.redis.core.DefaultBoundHashOperations.entries(DefaultBoundHashOperations.java:101)
at org.springframework.session.data.redis.RedisOperationsSessionRepository.getSession(RedisOperationsSessionRepository.java:432)
at org.springframework.session.data.redis.RedisOperationsSessionRepository.getSession(RedisOperationsSessionRepository.java:402)
at org.springframework.session.data.redis.RedisOperationsSessionRepository.getSession(RedisOperationsSessionRepository.java:245)
at org.springframework.session.web.http.SessionRepositoryFilter$SessionRepositoryRequestWrapper.getSession(SessionRepositoryFilter.java:327)
at org.springframework.session.web.http.SessionRepositoryFilter$SessionRepositoryRequestWrapper.getSession(SessionRepositoryFilter.java:344)
at org.springframework.session.web.http.SessionRepositoryFilter$SessionRepositoryRequestWrapper.getSession(SessionRepositoryFilter.java:390)
at org.springframework.session.web.http.SessionRepositoryFilter$SessionRepositoryRequestWrapper.getSession(SessionRepositoryFilter.java:217)
at com.operative.dashboard.web.multitenancy.filter.TenantContextFilter.setTenantAndUser(TenantContextFilter.java:89)
at com.operative.dashboard.web.multitenancy.filter.TenantContextFilter.doFilter(TenantContextFilter.java:75)
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1637)
at com.operative.dashboard.web.logging.filter.LongRequestDetectionFilter.doFilter(LongRequestDetectionFilter.java:69)
at com.operative.dashboard.web.logging.filter.LongRequestDetectionFilter.doFilter(LongRequestDetectionFilter.java:46)
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1637)
at org.springframework.session.web.http.SessionRepositoryFilter.doFilterInternal(SessionRepositoryFilter.java:167)
at org.springframework.session.web.http.OncePerRequestFilter.doFilter(OncePerRequestFilter.java:80)
at org.springframework.web.filter.DelegatingFilterProxy.invokeDelegate(DelegatingFilterProxy.java:343)
at org.springframework.web.filter.DelegatingFilterProxy.doFilter(DelegatingFilterProxy.java:260)
at org.eclipse.jetty.servlet.ServletHandler$CachedChain.doFilter(ServletHandler.java:1629)
at org.eclipse.jetty.servlet.ServletHandler.doHandle(ServletHandler.java:533)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:143)
at org.eclipse.jetty.security.SecurityHandler.handle(SecurityHandler.java:524)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:190)
at org.eclipse.jetty.server.session.SessionHandler.doHandle(SessionHandler.java:1595)
at org.eclipse.jetty.server.handler.ScopedHandler.nextHandle(ScopedHandler.java:188)
at org.eclipse.jetty.server.handler.ContextHandler.doHandle(ContextHandler.java:1253)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:168)
at org.eclipse.jetty.servlet.ServletHandler.doScope(ServletHandler.java:473)
at org.eclipse.jetty.server.session.SessionHandler.doScope(SessionHandler.java:1564)
at org.eclipse.jetty.server.handler.ScopedHandler.nextScope(ScopedHandler.java:166)
at org.eclipse.jetty.server.handler.ContextHandler.doScope(ContextHandler.java:1155)
at org.eclipse.jetty.server.handler.ScopedHandler.handle(ScopedHandler.java:141)
at org.eclipse.jetty.server.handler.ContextHandlerCollection.handle(ContextHandlerCollection.java:219)
at org.eclipse.jetty.server.handler.HandlerCollection.handle(HandlerCollection.java:126)
at org.eclipse.jetty.server.handler.HandlerWrapper.handle(HandlerWrapper.java:132)
at org.eclipse.jetty.server.Server.handle(Server.java:564)
at org.eclipse.jetty.server.HttpChannel.handle(HttpChannel.java:317)
at org.eclipse.jetty.server.HttpConnection.onFillable(HttpConnection.java:251)
at org.eclipse.jetty.io.AbstractConnection$ReadCallback.succeeded(AbstractConnection.java:279)
at org.eclipse.jetty.io.FillInterest.fillable(FillInterest.java:110)
at org.eclipse.jetty.io.ChannelEndPoint$2.run(ChannelEndPoint.java:124)
at org.eclipse.jetty.util.thread.Invocable.invokePreferred(Invocable.java:128)
at org.eclipse.jetty.util.thread.Invocable$InvocableExecutor.invoke(Invocable.java:222)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.doProduce(EatWhatYouKill.java:294)
at org.eclipse.jetty.util.thread.strategy.EatWhatYouKill.produce(EatWhatYouKill.java:126)
at org.eclipse.jetty.util.thread.QueuedThreadPool.runJob(QueuedThreadPool.java:672)
at org.eclipse.jetty.util.thread.QueuedThreadPool$2.run(QueuedThreadPool.java:590)
at java.lang.Thread.run(Thread.java:745)
Caused by: redis.clients.jedis.exceptions.JedisConnectionException: java.net.SocketTimeoutException: Read timed out
at redis.clients.util.RedisInputStream.ensureFill(RedisInputStream.java:201)
at redis.clients.util.RedisInputStream.read(RedisInputStream.java:180)
at redis.clients.jedis.Protocol.processBulkReply(Protocol.java:158)
at redis.clients.jedis.Protocol.process(Protocol.java:132)
at redis.clients.jedis.Protocol.processMultiBulkReply(Protocol.java:183)
at redis.clients.jedis.Protocol.process(Protocol.java:134)
at redis.clients.jedis.Protocol.read(Protocol.java:192)
at redis.clients.jedis.Connection.readProtocolWithCheckingBroken(Connection.java:282)
at redis.clients.jedis.Connection.getBinaryMultiBulkReply(Connection.java:218)
at redis.clients.jedis.BinaryJedis.hgetAll(BinaryJedis.java:865)
at org.springframework.data.redis.connection.jedis.JedisConnection.hGetAll(JedisConnection.java:2534)
... 55 more
Caused by: java.net.SocketTimeoutException: Read timed out
at java.net.SocketInputStream.socketRead0(Native Method)
at java.net.SocketInputStream.socketRead(SocketInputStream.java:116)
at java.net.SocketInputStream.read(SocketInputStream.java:170)
at java.net.SocketInputStream.read(SocketInputStream.java:141)
at java.net.SocketInputStream.read(SocketInputStream.java:127)
at redis.clients.util.RedisInputStream.ensureFill(RedisInputStream.java:195)
same ElastiCache instance i am able to connect using local.
we gave allowed the Devbox IP from the Amazon ElastiCache .
Not too much sure about IP and VPC. lookes like some access or time out issue.
thanks in Advance
**> rhive.connect(host = "192.168.1.4",port = 9000,defaultFS = "hdfs://localhost:9000")**
***Warning:
+----------------------------------------------------------+
+ / hiveServer2 argument has not been provided correctly. +
+ / RHive will use a default value: hiveServer2=TRUE. +***
+----------------------------------------------------------+
16/08/14 14:12:42 INFO jdbc.Utils: Supplied authorities: 192.168.1.4:9000
16/08/14 14:12:42 INFO jdbc.Utils: Resolved authority: 192.168.1.4:9000
16/08/14 14:12:42 INFO jdbc.HiveConnection: Transport Used for JDBC connection: null
**Exception in thread "Thread-14" java.lang.RuntimeException: java.sql.SQLException: Could not open client transport with JDBC Uri:
jdbc:hive2://192.168.1.4:9000/default: java.net.ConnectException:
Connection refused*
*
at com.nexr.rhive.hive.HiveJdbcClient$HiveJdbcConnector.connect(HiveJdbcClient.java:337)
at com.nexr.rhive.hive.HiveJdbcClient$HiveJdbcConnector.run(HiveJdbcClient.java:322)
Caused by: java.sql.SQLException: Could not open client transport with JDBC Uri: jdbc:hive2://192.168.1.4:9000/default: java.net.ConnectException: Connection refused
at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:208)
at org.apache.hive.jdbc.HiveConnection.<init>(HiveConnection.java:154)
at org.apache.hive.jdbc.HiveDriver.connect(HiveDriver.java:107)
at java.sql.DriverManager.getConnection(DriverManager.java:571)
at java.sql.DriverManager.getConnection(DriverManager.java:215)
at com.nexr.rhive.hive.DatabaseConnection.connect(DatabaseConnection.java:51)
at com.nexr.rhive.hive.HiveJdbcClient$HiveJdbcConnector.connect(HiveJdbcClient.java:330)
... 1 more
Caused by: org.apache.thrift.transport.TTransportException: java.net.ConnectException: Connection refused
at org.apache.thrift.transport.TSocket.open(TSocket.java:226)
at org.apache.thrift.transport.TSaslTransport.open(TSaslTransport.java:266)
at org.apache.thrift.transport.TSaslClientTransport.open(TSaslClientTransport.java:37)
at org.apache.hive.jdbc.HiveConnection.openTransport(HiveConnection.java:183)
... 7 more
Caused by: java.net.ConnectException: Connection refused
at java.net.PlainSocketImpl.socketConnect(Native Method)
at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:339)
at enter code here java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:200)
at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:182)
at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
at java.net.Socket.connect(Socket.java:579)
at org.apache.thrift.transport.TSocket.open(TSocket.java:221)
... 10 more
Error: java.lang.IllegalStateException: Not connected to hiveserver
You are not passing the password.
Try to pass the password as part of your script.
Null is returned in case you don't provide valid credentials.
Example:
jdbc:hive2://192.168.1.4:9000/default username password