Python's BaseHTTPRequestHandler has an issue with forms sent through post!
I have seen other people asking the same question (Why GET method is faster than POST?), but the time difference in my case is too much (1 second)
Python server:
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
import datetime
def get_ms_since_start(start=False):
global start_ms
cur_time = datetime.datetime.now()
# I made sure to stay within hour boundaries while making requests
ms = cur_time.minute*60000 + cur_time.second*1000 + int(cur_time.microsecond/1000)
if start:
start_ms = ms
return 0
else:
return ms - start_ms
class MyServer(BaseHTTPRequestHandler, object):
def do_GET(self):
print "Start get method at %d ms" % get_ms_since_start(True)
field_data = self.path
self.send_response(200)
self.end_headers()
self.wfile.write(str(field_data))
print "Sent response at %d ms" % get_ms_since_start()
return
def do_POST(self):
print "Start post method at %d ms" % get_ms_since_start(True)
length = int(self.headers.getheader('content-length'))
print "Length to read is %d at %d ms" % (length, get_ms_since_start())
field_data = self.rfile.read(length)
print "Reading rfile completed at %d ms" % get_ms_since_start()
self.send_response(200)
self.end_headers()
self.wfile.write(str(field_data))
print "Sent response at %d ms" % get_ms_since_start()
return
if __name__ == '__main__':
server = HTTPServer(('0.0.0.0', 8082), MyServer)
print 'Starting server, use <Ctrl-C> to stop'
server.serve_forever()
Get request with python server is very fast
time curl -i http://0.0.0.0:8082\?one\=1
prints
HTTP/1.0 200 OK
Server: BaseHTTP/0.3 Python/2.7.6
Date: Sun, 18 Sep 2016 07:13:47 GMT
/?one=1curl http://0.0.0.0:8082\?one\=1 0.00s user 0.00s system 45% cpu 0.012 total
and on the server side:
Start get method at 0 ms
127.0.0.1 - - [18/Sep/2016 00:26:30] "GET /?one=1 HTTP/1.1" 200 -
Sent response at 0 ms
Instantaneous!
Post request when sending form to python server is very slow
time curl http://0.0.0.0:8082 -F one=1
prints
--------------------------2b10061ae9d79733
Content-Disposition: form-data; name="one"
1
--------------------------2b10061ae9d79733--
curl http://0.0.0.0:8082 -F one=1 0.00s user 0.00s system 0% cpu 1.015 total
and on the server side:
Start post method at 0 ms
Length to read is 139 at 0 ms
Reading rfile completed at 1002 ms
127.0.0.1 - - [18/Sep/2016 00:27:16] "POST / HTTP/1.1" 200 -
Sent response at 1002 ms
Specifically, self.rfile.read(length) is taking 1 second for very little form data
Post request when sending data (not form) to python server is very fast
time curl -i http://0.0.0.0:8082 -d one=1
prints
HTTP/1.0 200 OK
Server: BaseHTTP/0.3 Python/2.7.6
Date: Sun, 18 Sep 2016 09:09:25 GMT
one=1curl -i http://0.0.0.0:8082 -d one=1 0.00s user 0.00s system 32% cpu 0.022 total
and on the server side:
Start post method at 0
Length to read is 5 at 0
Reading rfile completed at 0
127.0.0.1 - - [18/Sep/2016 02:10:18] "POST / HTTP/1.1" 200 -
Sent response at 0
node.js server:
var http = require('http');
var qs = require('querystring');
http.createServer(function(req, res) {
if (req.method == 'POST') {
whole = ''
req.on('data', function(chunk) {
whole += chunk.toString()
})
req.on('end', function() {
console.log(whole)
res.writeHead(200, 'OK', {'Content-Type': 'text/html'})
res.end('Data received.')
})
}
}).listen(8082)
Post request when sending form to node.js server is very fast
time curl -i http://0.0.0.0:8082 -F one=1
prints:
HTTP/1.1 100 Continue
HTTP/1.1 200 OK
Content-Type: text/html
Date: Sun, 18 Sep 2016 10:31:38 GMT
Connection: keep-alive
Transfer-Encoding: chunked
Data received.curl -i http://0.0.0.0:8082 -F one=1 0.00s user 0.00s system 42% cpu 0.013 total
I think this is the answer to your problem: libcurl delays for 1 second before uploading data, command-line curl does not
libcurl is sending the Expect 100-Continue header, and waiting 1 second for a response before sending the form data (in the case of the -F command).
In the case of -d, it does not send the 100-Continue header, for whatever reason.
Related
In my project I use Karate 0.9.5, Oracle JDK8.
From time to time in our pipeline I see a problem. Karate fails to close earlier started Crome processes. Is there any solution to this problem?
I try to solved by explicitly call close() and quit() it didn't help. After running process was finished I found a couple of active chrome process on the server.
Here's a log:
12:51:45.227 preferred port 9222 not available, will use: 52436
12:51:47.524 request:
1 > GET http://localhost:52436/json
1 > Accept-Encoding: gzip,deflate
1 > Connection: Keep-Alive
1 > Host: localhost:52436
1 > User-Agent: Apache-HttpClient/4.5.11 (Java/1.8.0_181)
12:51:47.584 response time in milliseconds: 58,31
1 < 200
1 < Content-Length: 361
1 < Content-Type: application/json; charset=UTF-8
[ {
"description": "",
"devtoolsFrontendUrl": "/devtools/inspector.html?ws=localhost:52436/devtools/page/4BD6A5C19E01B01D88995CD69367F81F",
"id": "4BD6A5C19E01B01D88995CD69367F81F",
"title": "",
"type": "page",
"url": "about:blank",
"webSocketDebuggerUrl": "ws://localhost:52436/devtools/page/4BD6A5C19E01B01D88995CD69367F81F"
} ]
12:51:47.584 root frame id: 4BD6A5C19E01B01D88995CD69367F81F
12:51:47.632 >> {"method":"Target.activateTarget","params":{"targetId":"4BD6A5C19E01B01D88995CD69367F81F"},"id":1}
12:51:47.638 << {"id":1,"result":{}}
12:51:47.639 >> {"method":"Page.enable","id":2}
12:51:47.883 << {"id":2,"result":{}}
12:51:47.885 >> {"method":"Runtime.enable","id":3}
12:51:47.889 << {"method":"Runtime.executionContextCreated","params":{"context":{"id":1,"origin":"://","name":"","auxData":{"isDefault":true,"type":"default","frameId":"4BD6A5C19E01B01D88995CD69367F81F"}}}}
12:51:47.890 << {"id":3,"result":{}}
12:51:47.890 >> {"method":"Target.setAutoAttach","params":{"autoAttach":true,"waitForDebuggerOnStart":false,"flatten":true},"id":4}
12:51:47.892 << {"id":4,"result":{}}
12:52:02.894 << timed out after milliseconds: 15000 - [id: 4, method: Target.setAutoAttach, params: {autoAttach=true, waitForDebuggerOnStart=false, flatten=true}]
12:52:02.917 driver config / start failed: failed to get reply for: [id: 4, method: Target.setAutoAttach, params: {autoAttach=true, waitForDebuggerOnStart=false, flatten=true}], options: {type=chrome, showDriverLog=true, httpConfig={readTimeout=60000}, headless=true, target=null}
I think the easiest way to solve it is to extend root web driver interface com.intuit.karate.driver.Driver by adding additional method like getPID(). This method must return PID of launched process.
Can you log a bug and also provide some information as to what happened before this, are you using the Docker image etc. It looks like the previous Scenario did not shut down clearly.
But before that do you mind upgrading to 0.9.6.RC3 (just released) and trying that ? There have been a couple of tweaks to the life-cycle especially trying to close Chrome without waiting for a response. Also it may be worth adding a couple of changes a) Target.setAutoAttach without waiting and b) add configure option to not start Chrome if the port is in use
I am trying to get some insights on a chunked endpoint and therefore planned to print what the server sends me chunk by chunk. I failed to do so so I wrote a test to see if OkHttp/Retrofit are working as I expect it.
The following test should deliver some chunks to the console but all I get is the full response.
I am a bit lost what I am missing to even make the MockWebServer of OkHttp3 sending me chunks.
I found this retrofit issue entry but the answer is a bit ambiguous for me: Chunked Transfer Encoding Response
class ChunkTest {
#Rule
#JvmField
val rule = RxImmediateSchedulerRule() // custom rule to run Rx synchronously
#Test
fun `test Chunked Response`() {
val mockWebServer = MockWebServer()
mockWebServer.enqueue(getMockChunkedResponse())
val retrofit = Retrofit.Builder()
.baseUrl(mockWebServer.url("/"))
.addCallAdapterFactory(RxJava2CallAdapterFactory.create())
.client(OkHttpClient.Builder().build())
.build()
val chunkedApi = retrofit.create(ChunkedApi::class.java)
chunkedApi.getChunked()
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe({
System.out.println(it.string())
}, {
System.out.println(it.message)
})
mockWebServer.shutdown()
}
private fun getMockChunkedResponse(): MockResponse {
val mockResponse = MockResponse()
mockResponse.setHeader("Transfer-Encoding", "chunked")
mockResponse.setChunkedBody("THIS IS A CHUNKED RESPONSE!", 5)
return mockResponse
}
}
interface ChunkedApi {
#Streaming
#GET("/")
fun getChunked(): Flowable<ResponseBody>
}
Test console output:
Nov 06, 2018 4:08:15 PM okhttp3.mockwebserver.MockWebServer$2 execute
INFO: MockWebServer[49293] starting to accept connections
Nov 06, 2018 4:08:15 PM okhttp3.mockwebserver.MockWebServer$3 processOneRequest
INFO: MockWebServer[49293] received request: GET / HTTP/1.1 and responded: HTTP/1.1 200 OK
THIS IS A CHUNKED RESPONSE!
Nov 06, 2018 4:08:15 PM okhttp3.mockwebserver.MockWebServer$2 acceptConnections
INFO: MockWebServer[49293] done accepting connections: Socket closed
I expected to be more like (body "cut" every 5 bytes):
Nov 06, 2018 4:08:15 PM okhttp3.mockwebserver.MockWebServer$2 execute
INFO: MockWebServer[49293] starting to accept connections
Nov 06, 2018 4:08:15 PM okhttp3.mockwebserver.MockWebServer$3 processOneRequest
INFO: MockWebServer[49293] received request: GET / HTTP/1.1 and responded: HTTP/1.1 200 OK
THIS
IS A
CHUNKE
D RESPO
NSE!
Nov 06, 2018 4:08:15 PM okhttp3.mockwebserver.MockWebServer$2 acceptConnections
INFO: MockWebServer[49293] done accepting connections: Socket closed
The OkHttp Mockserver does chunk the data, however it looks like the LoggingInterceptor waits until the whole chunks buffer is full then it displays it.
From this nice summary about HTTP streaming:
The use of Transfer-Encoding: chunked is what allows streaming within a single request or response. This means that the data is transmitted in a chunked manner, and does not impact the representation of the content.
With that in mind, we are dealing with 1 "request / response", which means we'll have to do our chunks retrieval before getting the entire response. Then pushing each chunk in our own buffer, all that on an OkHttp network interceptor.
Here is an example of said NetworkInterceptor:
class ChunksInterceptor: Interceptor {
val Utf8Charset = Charset.forName ("UTF-8")
override fun intercept (chain: Interceptor.Chain): Response {
val originalResponse = chain.proceed (chain.request ())
val responseBody = originalResponse.body ()
val source = responseBody!!.source ()
val buffer = Buffer () // We create our own Buffer
// Returns true if there are no more bytes in this source
while (!source.exhausted ()) {
val readBytes = source.read (buffer, Long.MAX_VALUE) // We read the whole buffer
val data = buffer.readString (Utf8Charset)
println ("Read: $readBytes bytes")
println ("Content: \n $data \n")
}
return originalResponse
}
}
Then of course we register this Network Interceptor on the OkHttp client.
I am trying to access a private repository on Github using httr. I am able to do so with no problem if I add my github token (stored as an environment variable in GITHUB_TOKEN):
httr::GET("https://api.github.com/repos/aammd/miniature-meme/releases/assets/2859674",
httr::write_disk("test.rds", overwrite = TRUE),
httr::progress("down"),
httr::add_headers(Authorization = paste("token", Sys.getenv("GITHUB_TOKEN"))))
However, if I try to specify another header, I get an error. In this case, I want to download the binary file associated with a release (the "asset", in github terminology):
httr::GET("https://api.github.com/repos/aammd/miniature-meme/releases/assets/2859674",
httr::write_disk("test.rds", overwrite = TRUE),
httr::progress("down"),
httr::add_headers(Authorization = paste("token", Sys.getenv("GITHUB_TOKEN"))),
httr::add_headers(Accept = "application/octet-stream"))
?xml version="1.0" encoding="UTF-8"?>
<Error><Code>InvalidArgument</Code><Message>Only one auth mechanism allowed; only the X-Amz-Algorithm query parameter, Signature query string parameter or the Authorization header should be specified</Message>
That's only part of the message (the rest includes my token).
Apparently my authorization is being sent twice! How can I prevent this? Is it related to httr::handle_pool()
EDIT -- connection info
It appears that the original request receives a reply, which contains a signature. This signature, along with my token is then sent back, causing an error. A similar thing happened to these people
-> GET /repos/aammd/miniature-meme/releases/assets/2859674 HTTP/1.1
-> Host: api.github.com
-> User-Agent: libcurl/7.43.0 r-curl/2.3 httr/1.2.1.9000
-> Accept-Encoding: gzip, deflate
-> Authorization: token tttttttt
-> Accept: application/octet-stream
->
<- HTTP/1.1 302 Found
<- Server: GitHub.com
<- Date: Tue, 17 Jan 2017 13:28:12 GMT
<- Content-Type: text/html;charset=utf-8
<- Content-Length: 0
<- Status: 302 Found
<- X-RateLimit-Limit: 5000
<- X-RateLimit-Remaining: 4984
<- X-RateLimit-Reset: 1484662101
<- location: https://github-cloud.s3.amazonaws.com/releases/76993567/aee5d0d6-c70a-11e6-9078-b5bee39f9fbc.RDS?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAISTNZFOVBIJMK3TQ%2F20170117%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20170117T132812Z&X-Amz-Expires=300&X-Amz-Signature=ssssssssss&X-Amz-SignedHeaders=host&actor_id=1198242&response-content-disposition=attachment%3B%20filename%3Dff.RDS&response-content-type=application%2Foctet-stream
<- Access-Control-Expose-Headers: ETag, Link, X-GitHub-OTP, X-RateLimit-Limit, X-RateLimit-Remaining, X-RateLimit-Reset, X-OAuth-Scopes, X-Accepted-OAuth-Scopes, X-Poll-Interval
<- Access-Control-Allow-Origin: *
<- Content-Security-Policy: default-src 'none'
<- Strict-Transport-Security: max-age=31536000; includeSubdomains; preload
<- X-Content-Type-Options: nosniff
<- X-Frame-Options: deny
<- X-XSS-Protection: 1; mode=block
<- Vary: Accept-Encoding
<- X-Served-By: 3e3b9690823fb031da84658eb58aa83b
<- X-GitHub-Request-Id: 82782802:6E1B:E9F0BE:587E1BEC
<-
-> GET /releases/76993567/aee5d0d6-c70a-11e6-9078-b5bee39f9fbc.RDS?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAISTNZFOVBIJMK3TQ%2F20170117%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20170117T132812Z&X-Amz-Expires=300&X-Amz-Signature=sssssssssssssss&X-Amz-SignedHeaders=host&actor_id=1198242&response-content-disposition=attachment%3B%20filename%3Dff.RDS&response-content-type=application%2Foctet-stream HTTP/1.1
-> Host: github-cloud.s3.amazonaws.com
-> User-Agent: libcurl/7.43.0 r-curl/2.3 httr/1.2.1.9000
-> Accept-Encoding: gzip, deflate
-> Authorization: token ttttttttttttt
-> Accept: application/octet-stream
->
<- HTTP/1.1 400 Bad Request
<- x-amz-request-id: FA56B3D23B468704
<- x-amz-id-2: 49X1mT5j5BrZ4HApeR/+wb7iVOWA8yn1obrgMoeOy44RH414bo/Ov8AAWSx2baEXO0H/WHX5jK0=
<- Content-Type: application/xml
<- Transfer-Encoding: chunked
<- Date: Tue, 17 Jan 2017 13:28:12 GMT
<- Connection: close
<- Server: AmazonS3
<-
gh doesn't work either
I created a public repo to test this idea out. the JSON can be returned from the API, but not the binary file:
# this works fine
gh::gh("https://api.github.com/repos/aammd/test_idea/releases/assets/2998763")
# this does not
gh::gh("https://api.github.com/repos/aammd/test_idea/releases/assets/2998763", .send_headers = c("Accept" = "application/octet-stream"))
wget might work, however
I've found a gist that shows how to do this with wget. The key component seems to be:
wget -q --auth-no-challenge --header='Accept:application/octet-stream' \
https://$TOKEN:#api.github.com/repos/$REPO/releases/assets/$asset_id \
-O $2
However if I try to replicate that in httr::GET I am not successful:
auth_url <- sprintf("https://%s:#api.github.com/repos/aammd/miniature-meme/releases/assets/2859674", Sys.getenv("GITHUB_TOKEN"))
httr::GET(auth_url,
httr::write_disk("test.rds", overwrite = TRUE),
httr::progress("down"),
httr::add_headers(Accept = "application/octet-stream"))
Calling wget from R DOES work, but this solution is not totally satisfying because I can't guarantee that all my users have wget installed (unless there is a way to do that?).
system(sprintf("wget --auth-no-challenge --header='Accept:application/octet-stream' %s -O testwget.rds", auth_url))
output of wget (note the absence of -q above) included here (again, tokens and signatures redacted, hopefully):
--2017-01-18 13:21:55-- https://ttttt:*password*#api.github.com/repos/aammd/miniature-meme/releases/assets/2859674
Resolving api.github.com... 192.30.253.117, 192.30.253.116
Connecting to api.github.com|192.30.253.117|:443... connected.
HTTP request sent, awaiting response... 302 Found
Location: https://github-cloud.s3.amazonaws.com/releases/76993567/aee5d0d6-c70a-11e6-9078-b5bee39f9fbc.RDS?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAISTNZFOVBIJMK3TQ%2F20170118%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20170118T122156Z&X-Amz-Expires=300&X-Amz-Signature=SSSSSSSS-Amz-SignedHeaders=host&actor_id=1198242&response-content-disposition=attachment%3B%20filename%3Dff.RDS&response-content-type=application%2Foctet-stream [following]
--2017-01-18 13:21:55-- https://github-cloud.s3.amazonaws.com/releases/76993567/aee5d0d6-c70a-11e6-9078-b5bee39f9fbc.RDS?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAISTNZFOVBIJMK3TQ%2F20170118%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20170118T122156Z&X-Amz-Expires=300&X-Amz-Signature=SSSSSSSSSSSS-Amz-SignedHeaders=host&actor_id=1198242&response-content-disposition=attachment%3B%20filename%3Dff.RDS&response-content-type=application%2Foctet-stream
Resolving github-cloud.s3.amazonaws.com... 52.216.226.120
Connecting to github-cloud.s3.amazonaws.com|52.216.226.120|:443... connected.
HTTP request sent, awaiting response... 200 OK
Length: 682 [application/octet-stream]
Saving to: ‘testwget.rds’
0K 100% 15.5M=0s
2017-01-18 13:21:56 (15.5 MB/s) - ‘testwget.rds’ saved [682/682]
It turns out that there are two possible solutions to this problem!
solution the first: token as parameter
As suggested by #user7433058, we can indeed pass the token through as a parameter! note however that we have to use paste0. This is the approach suggested by Github themselves on their API documentation
## pass oauth in the url
httr::GET(paste0("https://api.github.com/repos/aammd/miniature-meme/releases/assets/2859674?access_token=", Sys.getenv("GITHUB_TOKEN")),
httr::write_disk("test.rds", overwrite = TRUE),
httr::progress("down"),
httr::add_headers(Accept = "application/octet-stream"))
tt <- readRDS("test.rds")
Solution the second: ask again
Another solution is to make the request the first time, then extract the URL and use it to make a second request. Since the problem is caused by sending Authorization information twice -- once in the URL, once in the header -- we can avoid the problem by only using the URL.
## alternatively, get the query url (containing signature) from the (failed) html request made the first time
firsttry <- httr::GET("https://api.github.com/repos/aammd/miniature-meme/releases/assets/2859674",
httr::add_headers(Authorization = paste("token", Sys.getenv("GITHUB_TOKEN")),
Accept = "application/octet-stream"))
httr::GET(firsttry$url, httr::write_disk("test.rds", overwrite = TRUE),
httr::write_disk("test2.rds", overwrite = TRUE),
httr::progress("down"),
httr::add_headers(Accept = "application/octet-stream"))
tt2 <- readRDS("test2.rds")
This is, I suppose, a bit less efficient (making 3 requests total instead of 2). However, since only the first request is to the actual github API, it only counts for 1 towards your rate-limiting step.
a small refinement: no redirect from httr
We can make only 2, not 3, http requests if you tell httr not to follow redirects. To do this use httr::config(followlocation = FALSE) in the first of the two requests (i.e. to get firsttry)
Try sending the auth token as a query param instead of an auth header. That way when GitHub's Oauth redirects you it'll strip the original token & the X-Amz-Algorithm param will be left to do it's job.
httr::GET(paste("https://api.github.com/repos/aammd/miniature-meme/releases/assets/2859674?access_token=", Sys.getenv("GITHUB_TOKEN")),
httr::write_disk("test.rds", overwrite = TRUE),
httr::progress("down"))
I have to perform three HTTP requests from a Play Application. These calls are directed towards three subprojects of the main application. The architecture looks like this:
main app
|
--modules
|
--component1
|
--component2
|
--component3
each component* is an individual sbt subprojects.
The main class in main app runs this Action:
def service = Action.async(BodyParsers.parse.json) { implicit request =>
val query = request.body
val url1 = "http://localhost:9000/one"
val url2 = "http://localhost:9000/two"
val url3 = "http://localhost:9000/three"
val sync_calls = for {
a <- ws.url(url1).withRequestTimeout(Duration.Inf).withHeaders("Content-Type"->"application/json")
.withBody(query).get()
b <- ws.url(url2).withRequestTimeout(Duration.Inf).withHeaders("Content-Type"->"application/json")
.withBody(a.body).get()
c <- ws.url(url3).withRequestTimeout(Duration.Inf).withHeaders("Content-Type"->"application/json")
.withBody(b.body).get()
} yield c.body
sync_calls.map(x => Ok(x))
}
The components need to be activated one after the other, so they need to be a sequence. Each of the does a spark job. However when I call the service action I get this error:
[error] application -
! #71og96ol6 - Internal server error, for (GET) [/automatic] ->
play.api.http.HttpErrorHandlerExceptions$$anon$1: Execution exception[[TimeoutException: Read timeout to localhost/127.0.0.1:9000 after 120000 ms]] [TimeoutException: Read timeout to localhost/127.0.0.1:9000 after 120000 ms]
at play.api.http.HttpErrorHandlerExceptions$.throwableToUsefulException(HttpErrorHandler.scala:280)
at play.api.http.DefaultHttpErrorHandler.onServerError(HttpErrorHandler.scala:206)
at play.api.GlobalSettings$class.onError(GlobalSettings.scala:160)
at play.api.DefaultGlobal$.onError(GlobalSettings.scala:188)
at play.api.http.GlobalSettingsHttpErrorHandler.onServerError(HttpErrorHandler.scala:98) │
at play.core.server.netty.PlayRequestHandler$$anonfun$2$$anonfun$apply$1.applyOrElse(PlayRequestHandler.scala:1│
00)
at play.core.server.netty.PlayRequestHandler$$anonfun$2$$anonfun$apply$1.applyOrElse(PlayRequestHandler.scala:9│
9)
at scala.concurrent.Future$$anonfun$recoverWith$1.apply(Future.scala:346)
at scala.concurrent.Future$$anonfun$recoverWith$1.apply(Future.scala:345)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:32)
Caused by: java.util.concurrent.TimeoutException: Read timeout to localhost/127.0.0.1:9000 after 120000 ms
at org.asynchttpclient.netty.timeout.TimeoutTimerTask.expire(TimeoutTimerTask.java:43)
at org.asynchttpclient.netty.timeout.ReadTimeoutTimerTask.run(ReadTimeoutTimerTask.java:54)
at io.netty.util.HashedWheelTimer$HashedWheelTimeout.expire(HashedWheelTimer.java:581)
at io.netty.util.HashedWheelTimer$HashedWheelBucket.expireTimeouts(HashedWheelTimer.java:655)
at io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:367)
at java.lang.Thread.run(Thread.java:745)
and
HTTP/1.1 500 Internal Server Error
Content-Length: 4931
Content-Type: text/html; charset=utf-8
Date: Tue, 25 Oct 2016 21:06:17 GMT
<body id="play-error-page">
<p id="detail" class="pre">[TimeoutException: Read timeout to localhost/127.0.0.1:9000 after 120000 ms]</p>
</body>
I specifically set the Timeout for each call to Duration.Inf for the purpose of avoiding timouts. Why is this happening and how do I fix it?
When I load a html page I have a 5 strings written about a second apart.
<br>1</br>
...... 1 second ......
<br>2</br>
...... 1 second ......
<br>3</br>
...... 1 second ......
<br>4</br>
...... 1 second ......
<br>5</br>
...... 1 second ......
--- end request ---
Chromium and Firefox both load and display the first br then the next as received. (Firefox requires a content encoding however). But Safari refuses to display any of the tags until the request is ended.
Chromium seems to just do it.
Firefox first needs to determine the content encoding https://bugzilla.mozilla.org/show_bug.cgi?id=647203
But Safari seems to just refuse. Is a different response code or header needed? I try setting explicitly the content type to text/html. Didn't work.
I have confirmed in Wireshark that the strings are being sent a second apart, i.e they are not being cached and sent all at once.
I have also confirmed this occurs if I go through localhost or I use my public ip address.
I have tried content-length and keep alive, former just closes request automatically, latter seems to have no effect.
Headers and Responses from Wireshark
Firefox (working)
GET /pe HTTP/1.1
Host: 127.0.01:8080
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10.10; rv:42.0) Gecko/20100101 Firefox/42.0
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
Accept-Language: en-US,en;q=0.5
Accept-Encoding: gzip, deflate
DNT: 1
Connection: keep-alive
Cache-Control: max-age=0
HTTP/1.1 200 OK
Transfer-Encoding: chunked
Date: Tue, 10 Nov 2015 17:10:20 GMT
Connection: keep-alive
Content-Type: text/html; charset=utf-8
Server: TwistedWeb/13.2.0
1f
<html>
<title>PE</title>
<body>
2e
<br> This is the 1th time I've written. </br>
2e
<br> This is the 2th time I've written. </br>
2e
<br> This is the 3th time I've written. </br>
2e
<br> This is the 4th time I've written. </br>
2e
<br> This is the 5th time I've written. </br>
8
</body>
8
</html>
0
Safari (not working)
GET /pe HTTP/1.1
Host: 127.0.0.01:8080
Accept-Encoding: gzip, deflate
Accept: text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8
User-Agent: Mozilla/5.0 (Macintosh; Intel Mac OS X 10_10_5) AppleWebKit/601.1.56 (KHTML, like Gecko) Version/9.0 Safari/601.1.56
Accept-Language: en-us
DNT: 1
Connection: keep-alive
HTTP/1.1 200 OK
Transfer-Encoding: chunked
Date: Tue, 10 Nov 2015 17:12:55 GMT
Connection: keep-alive
Content-Type: text/html; charset=utf-8
Server: TwistedWeb/13.2.0
1f
<html>
<title>PE</title>
<body>
2e
<br> This is the 1th time I've written. </br>
2e
<br> This is the 2th time I've written. </br>
2e
<br> This is the 3th time I've written. </br>
2e
<br> This is the 4th time I've written. </br>
2e
<br> This is the 5th time I've written. </br>
8
</body>
8
</html>
0
Demo
import twisted
from twisted.python import log
import sys
log.startLogging(sys.stdout)
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor
class PersistantExample(Resource):
'''Gives an example of a persistant request'''
# does not exist on Safari until stopping browser / ending connection
isLeaf = True
def render_GET(self, request):
log.msg("Ooooh a render request")
# schedule the reoccuring thing (this could be something else like a deferred result)
reactor.callLater(1.1, self.keeps_going, request, 0) # 1.1 seconds just to show it can take floats
# firefox require the char set see https://bugzilla.mozilla.org/show_bug.cgi?id=647203
request.responseHeaders.addRawHeader("Content-Type",
"text/html; charset=utf-8") # set the MIME header (charset needed for firefox)
# this will cause the connection to keep only open for x length
# (only helpful if the length is known, also does NOT make it render right away)
# request.responseHeaders.addRawHeader("Content-Length", "150")
request.write("<html>\n<title>PE</title>\n<body>")
return NOT_DONE_YET
def keeps_going(self, request, i):
log.msg("I'm going again....")
i = i + 1
request.write("\n<br> This is the %sth time I've written. <br>" % i) ## probably not best to use <br> tag
if i < 5:
reactor.callLater(1.1, self.keeps_going, request, i) # 1.1 seconds just to show it can take floats
if i >= 5 and not request.finished:
log.msg("Done")
request.write("\n</body>")
request.write("\n</html>")
# safari will only render when finished
request.finish()
class Root(Resource):
isLeaf = False
def render_GET(self, request):
return "<html><body>Demo is here</body></html>"
class Site(Site):
pass
root = Root()
pe = PersistantExample()
site = Site(root)
root.putChild("", root)
root.putChild("index", root)
root.putChild("pe", pe)
# listen
if __name__ == "__main__":
reactor.listenTCP(8080, site)
reactor.run()
Are you trying this on Safari for Windows? If yes then it not officially supported any more. If you are try to solve this on Safari for Mac please create a demo or share the link if you already have.
I can test and help you over this.
Ok in order for Safari to render the html at least 1024 bytes have to be written before it starts to render as received.
You can see a demo showing both a working and non working version below. A similar thing happens in Firefox (as it needs 1024 bytes to determine encoding) but you can set the encoding to bypass it in Firefox. Not sure if there is a way to do that in Safari.
+---------------------------------------------------+
| What you send | How much you need |
+---------------------------------------------------+
| Nothing | 1024 bytes |
+---------------------------------------------------+
| Meta Charset | 512 bytes |
+---------------------------------------------------+
| Header Charset | 512 bytes (not including header) |
+---------------------------------------------------+
import twisted
from twisted.python import log
import sys
log.startLogging(sys.stdout)
from twisted.web.server import Site, NOT_DONE_YET
from twisted.web.resource import Resource
from twisted.internet import reactor
import time
# max 133
# 133 * 8 = 1064
html_string_working = "<p>" # 3 bytes
html_string_working += "I'm the version that works! I show up right away!" # 49 bytes
html_string_working += "<!---" # 5 bytes
html_string_working += " " * (1024 - (3+49+5+3+4)) # filler
html_string_working += "-->" # 3 bytes
html_string_working += "</p>" # 4 bytes
print len(html_string_working)
html_string_non_working = "<p>" # 3 bytes
html_string_non_working += "I'm the version that does not work! I don't show up right away!" # 63 bytes
html_string_non_working += "</p>" # 4 bytes
html_string_non_working += "<!---" # 5 bytes
html_string_non_working += " " * (1023 - (3+63+5+3+4)) # filler but one byte short (notice 1023 instead of 1024)
html_string_non_working += "-->" # 3 bytes
print len(html_string_non_working)
# charset maybe? Firefox won't start parsing until 1024
# unless it has charset specified see https://bugzilla.mozilla.org/show_bug.cgi?id=647203
# "Your script does not appear to declare the character encoding. When the character
# encoding is not declared, Gecko won't start parsing until it has received 1024
# bytes of HTTP payload."
# charset works but requires 512 bytes
html_string_charset = "<meta charset=utf-8>" # 20 bytes
html_string_charset += "<p>" # 3 bytes
html_string_charset += "I'm the meta charset version. I may work!" # 41 bytes
html_string_charset += "</p>" # 4 bytes
html_string_charset += "<!---" # 5 bytes
html_string_charset += " " * (512 - (20+3+41+5+3+4)) # filler but one byte short (512 bytes; 511 doesn't work)
html_string_charset += "-->" # 3 bytes
# what about in header form
# charset works but requires 512 bytes not including the headers (so same as meta version)
html_string_charset_headers = "<p>" # 3 bytes
html_string_charset_headers += "I'm the header charset version. I may work!" # 43 bytes
html_string_charset_headers += "</p>" # 4 bytes
html_string_charset_headers += "<!---" # 5 bytes
html_string_charset_headers += " " * (512 - (3+43+5+3+4)) # filler but one byte short (512 bytes; 511 doesn't work)
html_string_charset_headers += "-->" # 3 bytes
print len(html_string_charset_headers)
later = "<p> I'm written later. Now it suddenly will work because bytes >= 1024." # 71 bytes
class PersistantExample(Resource):
'''Gives an example of a persistant request'''
# does not exist on Safari until stopping browser / ending connection
isLeaf = True
def render_GET(self, request):
log.msg("Ooooh a render request")
# schedule the reoccuring thing (this could be something else like a deferred result)
# firefox require the char set see https://bugzilla.mozilla.org/show_bug.cgi?id=647203
# render working version or not
try:
w = int(request.args["w"][0])
except:
w = 1
if w == 1:
request.write(html_string_working)
elif w == 2:
request.write(html_string_non_working)
elif w == 3:
request.write(html_string_charset)
elif w == 4:
# 12 + 24 = 36 bytes but doesn't count towards 512 total
request.responseHeaders.addRawHeader("Content-Type", "text/html; charset=utf-8")
request.write(html_string_charset_headers)
reactor.callLater(2, self.run_later, request)
return NOT_DONE_YET
def run_later(self, request):
request.write(later)
request.finish()
class Root(Resource):
isLeaf = False
def render_GET(self, request):
return """<html><body>
<p>Working version here</p>
<p>Non working version here</p>
<p>Meta charset version here</p>
<p>Header charset version here</p>
</body></html>"""
class Site(Site):
pass
root = Root()
pe = PersistantExample()
site = Site(root)
root.putChild("", root)
root.putChild("index", root)
root.putChild("pe", pe)
# listen
if __name__ == "__main__":
print "Running"
reactor.listenTCP(8080, site)
reactor.run()
Use <br /> or <br> instead of </br> which is not a valid tag.