I m trying to figure out how i can implement a function to feed to tls.Config.GetCertificate with self signed certificates.
I used this bin source as a base, https://golang.org/src/crypto/tls/generate_cert.go
Also read this,
https://ericchiang.github.io/tls/go/https/2015/06/21/go-tls.html
Unfortunately, so far i m stuck with this error
2016/11/03 23:18:20 http2: server: error reading preface from client 127.0.0.1:34346: remote error: tls: unknown certificate authority
I think i need to generate a CA cert and then sign the key with it, but i m not sure how to proceed (....).
Here is my code, can someone help with that ?
package gssc
import (
"crypto/rand"
"crypto/rsa"
"crypto/tls"
"crypto/x509"
"crypto/x509/pkix"
"github.com/pkg/errors"
"math/big"
"net"
"strings"
"time"
)
func GetCertificate(arg interface{}) func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
var opts Certopts
var err error
if host, ok := arg.(string); ok {
opts = Certopts{
RsaBits: 2048,
Host: host,
ValidFrom: time.Now(),
}
} else if o, ok := arg.(Certopts); ok {
opts = o
} else {
err = errors.New("Invalid arg type, must be string(hostname) or Certopt{...}")
}
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
if err != nil {
return nil, err
}
return generate(opts)
}
}
type Certopts struct {
RsaBits int
Host string
IsCA bool
ValidFrom time.Time
ValidFor time.Duration
}
func generate(opts Certopts) (*tls.Certificate, error) {
priv, err := rsa.GenerateKey(rand.Reader, opts.RsaBits)
if err != nil {
return nil, errors.Wrap(err, "failed to generate private key")
}
notAfter := opts.ValidFrom.Add(opts.ValidFor)
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, errors.Wrap(err, "Failed to generate serial number\n")
}
template := x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
Organization: []string{"Acme Co"},
},
NotBefore: opts.ValidFrom,
NotAfter: notAfter,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageServerAuth},
BasicConstraintsValid: true,
}
hosts := strings.Split(opts.Host, ",")
for _, h := range hosts {
if ip := net.ParseIP(h); ip != nil {
template.IPAddresses = append(template.IPAddresses, ip)
} else {
template.DNSNames = append(template.DNSNames, h)
}
}
if opts.IsCA {
template.IsCA = true
template.KeyUsage |= x509.KeyUsageCertSign
}
derBytes, err := x509.CreateCertificate(rand.Reader, &template, &template, &priv.PublicKey, priv)
if err != nil {
return nil, errors.Wrap(err, "Failed to create certificate")
}
return &tls.Certificate{
Certificate: [][]byte{derBytes},
PrivateKey: priv,
}, nil
}
This is the test code i use
package main
import (
"crypto/tls"
"github.com/mh-cbon/gssc"
"net/http"
)
type ww struct{}
func (s *ww) ServeHTTP(w http.ResponseWriter, req *http.Request) {
w.Header().Set("Content-Type", "text/plain")
w.Write([]byte("This is an example server.\n"))
}
func main() {
s := &http.Server{
Handler: &ww{},
Addr: ":8080",
TLSConfig: &tls.Config{
InsecureSkipVerify: true,
GetCertificate: gssc.GetCertificate("example.org"),
},
}
s.ListenAndServeTLS("", "")
}
Thanks a lot!
Your implementation of tls.Config.GetCertificate is causing the problem.
You are generating a certificate each time tls.Config.GetCertificate is called. You need to generate the certificate once and then return it in the anonymous function.
In gssc.GetCertificate :
cert, err := generate(opts)
return func(clientHello *tls.ClientHelloInfo) (*tls.Certificate, error) {
if err != nil {
return nil, err
}
return cert, err
}
Related
Per this doc, we can trace http.Client with httptrace in this way
t := &transport{}
req, _ := http.NewRequest("GET", "https://google.com", nil)
trace := &httptrace.ClientTrace{
GotConn: t.GotConn,
}
req = req.WithContext(httptrace.WithClientTrace(req.Context(), trace))
client := &http.Client{Transport: t}
For google API client, here are the one wrapper codes
func NewWithClient(jsonKey []byte, cli *http.Client) (*Client, error) {
if cli == nil {
return nil, fmt.Errorf("client is nil")
}
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, cli)
conf, err := google.JWTConfigFromJSON(jsonKey, androidpublisher.AndroidpublisherScope)
if err != nil {
return nil, err
}
service, err := androidpublisher.NewService(ctx, option.WithHTTPClient(conf.Client(ctx)))
if err != nil {
return nil, err
}
return &Client{service}, err
}
We want to apply httptrace to the http.Client argument of NewWithClient to do HTTP trace.
What we have tried
type TraceTransport struct {
}
var traceTransport = &TraceTransport{}
var trace = &httptrace.ClientTrace{
GotConn: traceTransport.GotConn,
}
func (t *TraceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
return http.DefaultTransport.RoundTrip(req)
}
func (t *TraceTransport) GotConn(info httptrace.GotConnInfo) {
fmt.Printf("Connection reused for %v \n", info.Reused)
}
type ClientWrapper struct {
defaultClient *http.Client
}
var clientWrapperTrace = &httptrace.ClientTrace{GotConn: traceTransport.GotConn}
func (c *ClientWrapper) Do(req *http.Request) (*http.Response, error) {
req = req.WithContext(httptrace.WithClientTrace(req.Context(), clientWrapperTrace))
return c.defaultClient.Do(req)
}
func NewClientTrace(jsonKey []byte) (*Client, error) {
cli := &http.Client{
Transport: traceTransport,
Timeout: time.Duration(10) * time.Second,
}
cliWrapper := &ClientWrapper{defaultClient: cli}
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, cliWrapper)
conf, err := google.JWTConfigFromJSON(jsonKey, androidpublisher.AndroidpublisherScope)
if err != nil {
return nil, err
}
service, err := androidpublisher.NewService(ctx, option.WithHTTPClient(conf.Client(ctx)))
if err != nil {
return nil, err
}
return &Client{service}, err
}
type Client struct {
service *androidpublisher.Service
}
func (c *Client) VerifyProduct(
ctx context.Context,
packageName string,
productID string,
token string,
) (*androidpublisher.ProductPurchase, error) {
ps := androidpublisher.NewPurchasesProductsService(c.service)
result, err := ps.Get(packageName, productID, token).Context(ctx).Do()
return result, err
}
// test codes
c, err := NewClientTrace([]byte(privateKey))
if err != nil {
return
}
packageName := "package.name"
productID := "product_id"
token := "xxxxx"
r, err := c.VerifyProduct(context.Background(), packageName, productID, token)
However, it is failed to trace http.Client, There is no output of GotConn. Could someone help us to figure out the issue of the above codes?
Requests from google/oauth2 are not traceable by httptrace. your ClientWrapper passed with context.WithValue will be ignored here, and oauth2 has it's own http.Client, it just use the Transport method of *http.Client from context.Value.
Requests from androidpublisher can be traced by httptrace like this:
ctx := httptrace.WithClientTrace(context.Background(), clientWrapperTrace)
r, err := c.VerifyProduct(ctx, packageName, productID, token)
If you just want to count the requests, i think overwrite the http.Client.Transport is a easy way.
type TraceTransport struct {
}
func (t *TraceTransport) RoundTrip(req *http.Request) (*http.Response, error) {
fmt.Printf("RoundTrip hook %v\n", req.URL)
return http.DefaultTransport.RoundTrip(req)
}
func NewClientTrace(jsonKey []byte) (*Client, error) {
cli := &http.Client{Transport: &TraceTransport{}}
ctx := context.WithValue(context.Background(), oauth2.HTTPClient, cli)
// ...
service, err := androidpublisher.NewService(ctx, option.WithHTTPClient(conf.Client(ctx)))
// ....
}
I need a golang client that can upgrade from an http get response to a websocket connection.
I have a JS client that works and I've seen direct ws client connections but I have to upgrade from http. I have tried looking for other 3GL solutions (Java, C#, Python) but I need to be able to implement the upgrade in Go. I have seen Dart detaching the socket and creating a websocket from it.
WebSocket.fromUpgradedSocket
I noticed Client does not support Hijack but the discussion didn't get me anywhere.
I am using github.com/gorilla/websocket but can change that if it helps.
Server:
func main() {
srv := Srv{}
count = 0
http.HandleFunc("/", srv.handleRoot)
http.HandleFunc("/ws", srv.handleWs)
log.Fatal(http.ListenAndServe(":5002", nil))
}
func (tool *Srv) handleRoot(w http.ResponseWriter, r *http.Request) {
webSocketKey := hdr.Get("Sec-WebSocket-Key")
log.Printf("Socket key = '%v'", webSocketKey)
secWsAccept := computeAcceptKey(webSocketKey)
log.Printf("Accept = '%v'", secWsAccept)
w.Header().Add("sec-websocket-accept", secWsAccept)
w.Header().Add("upgrade", "websockt")
w.Header().Add("connection", "upgrade")
w.WriteHeader(101)
}
func (tool *Srv) handleWs(w http.ResponseWriter, r *http.Request) {
var upgrader = websocket.Upgrader{}
conn, err := upgrader.Upgrade(w, r, nil)
if err != nil {
log.Fatalf("Websocket fatal error. %v", err)
}
tool.conn = conn
go tool.serviceWsRequests()
}
func (tool *Srv) serviceWsRequests() {
for {
log.Printf("starting ws")
req := request{}
err := tool.conn.ReadJSON(&req)
if err != nil {
log.Printf("Failed to decode ws message. %v", err)
break
}
fmt.Printf("Got request. %v\n", req)
if req.Method == "ping" {
fmt.Printf("Param=%v\n", req.Parameters)
}
}
}
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
func computeAcceptKey(challengeKey string) string {
h := sha1.New()
h.Write([]byte(challengeKey))
h.Write(keyGUID)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
Client:
func main() {
tr := &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
DisableCompression: true,
}
client := &http.Client{
Transport: tr,
// Do NOT follow redirects
CheckRedirect: func(req *http.Request, via []*http.Request) error {
return http.ErrUseLastResponse
},
}
wsKey, err := generateKey()
if err != nil {
log.Printf("Cannot generate challenge key %v", err)
}
// Get request for ws upgrade.
req, err := http.NewRequest("GET", "http://localhost:5002", nil)
req.Header.Add("Connection", "Upgrade")
req.Header.Add("Upgrade", "websocket")
req.Header.Add("Sec-WebSocket-Version", "13")
req.Header.Add("Sec-WebSocket-Key", wsKey)
log.Printf("ws key '%v'", wsKey)
resp, err := client.Do(req)
if err != nil {
log.Printf("Get error %v", err)
}
defer func() {
if resp != nil {
err = resp.Body.Close()
}
}()
log.Printf("Status='%v', proto='%v'", resp.Status, resp.Proto)
body, err := ioutil.ReadAll(resp.Body)
hdr := resp.Header
for k, v := range hdr{
log.Printf("%v : %v", k, v)
}
log.Printf("Body = %v", string(body))
resp, err = http.Get("ws://localhost:5002/ws")
if err != nil {
log.Printf("Error '%v'", err)
}
}
func generateKey() (string, error) {
p := make([]byte, 16)
if _, err := io.ReadFull(rand.Reader, p); err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(p), nil
}
var keyGUID = []byte("258EAFA5-E914-47DA-95CA-C5AB0DC85B11")
func computeAcceptKey(challengeKey string) string {
h := sha1.New()
h.Write([]byte(challengeKey))
h.Write(keyGUID)
return base64.StdEncoding.EncodeToString(h.Sum(nil))
}
I get an error
Error 'Get ws://localhost:5002/ws: unsupported protocol scheme "ws"'
Which doesn't surprise me because I haven't upgraded the connection.
So how do I go an upgrade in Go?
Use the Gorilla client to dial websocket connections:
func main() {
c, _ , err := websocket.DefaultDialer.Dial("ws://localhost:5002/ws", nil)
if err != nil {
// handle error
}
defer c.Close()
// do something with c, a *websocket.Conn
}
The Dial method issues a GET to the server requesting an upgrade to the WebSocket protocol. On successful completion of the upgrade, Dial returns a *websocket.Conn.
I'm working on some tests in Go and I have spent the past 2 days trying to make it work but I couldn't. My problem is that the test returns 400 even when the user does exist.
This is my getUser function
func (handler *UserHandler) getUser(w http.ResponseWriter, ID int) {
logfile, err := os.OpenFile("events.log", os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
if err != nil {
log.Fatalf("Error opening file: %v", err)
}
defer logfile.Close()
log.SetOutput(logfile)
user := db.Fetch(ID)
userJSON, err := json.Marshal(user)
if err != nil {
log.Printf("Error while marshaling the user into JSON: %v", err)
return
}
w.Header().Set("Content-Type", "application/json")
w.WriteHeader(http.StatusOK)
// userJSON is sent as http Response
w.Write(userJSON)
}
This is my UserHandler
type UserHandler struct{}
func (handle *UserHandler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
var head string
head, r.URL.Path = ShiftPath(r.URL.Path)
id, err := strconv.Atoi(head)
if err != nil {
http.Error(w, fmt.Sprintf("Invalid user ID %q", head), http.StatusBadRequest)
return
}
switch r.Method {
case "GET":
handle.getUser(w, id)
default:
http.Error(w, "Only GET is allowed", http.StatusMethodNotAllowed)
}
}
func ShiftPath(p string) (head, tail string) {
p = path.Clean("/" + p)
i := strings.Index(p[1:], "/") + 1
if i <= 0 {
return p[1:], "/"
}
return p[1:i], p[i:]
}
And this is my test
func TestGetUser(t *testing.T) {
handler := new(UserHandler)
mux := http.NewServeMux()
mux.HandleFunc("/user/", handler.ServeHTTP)
writer := httptest.NewRecorder()
request, _ := http.NewRequest("GET", "/user/12", nil)
mux.ServeHTTP(writer, request)
if writer.Code != 200 {
t.Errorf("Response code is %v", writer.Code)
}
}
Issue with code ====> id, err := strconv.Atoi(head)
Due to error you see a return and hence you see 400 error.
Have your server code fully functional with valid logic.
Suggestion: Always print or debug line by line. You can find the issue and root cause.
If you use the http.FileServer in Go like:
func main() {
port := flag.String("p", "8100", "port to serve on")
directory := flag.String("d", ".", "the directory of static file to host")
flag.Parse()
http.Handle("/", http.FileServer(http.Dir(*directory)))
log.Printf("Serving %s on HTTP port: %s\n", *directory, *port)
log.Fatal(http.ListenAndServe(":"+*port, nil))
}
Then accessing a directory will give you a listing of files. Often this is disabled for web services and instead responds with 404 and I would like this behaviour too.
http.FileServer has no options for this AFAIK and I have seen a proposed way to solve this here https://groups.google.com/forum/#!topic/golang-nuts/bStLPdIVM6w what they do is wrapping the http.FileSystem type and implementing an own Open method. However this doesn't give a 404 when the path is a directory, it just gives a blank page, and it's unclear how to modify it to accomodate this. This is what they do:
type justFilesFilesystem struct {
fs http.FileSystem
}
func (fs justFilesFilesystem) Open(name string) (http.File, error) {
f, err := fs.fs.Open(name)
if err != nil {
return nil, err
}
return neuteredReaddirFile{f}, nil
}
type neuteredReaddirFile struct {
http.File
}
func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) {
return nil, nil
}
func main() {
fs := justFilesFilesystem{http.Dir("/tmp/")}
http.ListenAndServe(":8080", http.FileServer(fs))
}
Note: if you make Readdir return nil, os.ErrNotExist then you get a 500 response with "Error reading directory" - not 404.
Any ideas on how to neatly present a 404 and still preserving the feature of automatically finding an index.html if present?
This behavior can be changed if you substitute not a Readdir method, but the Stat.
Please take a look at working code below. It supports serving of index.html files if they are inside of requested directory and returns 404 in case there is no index.html and it is a directory.
package main
import (
"io"
"net/http"
"os"
)
type justFilesFilesystem struct {
fs http.FileSystem
// readDirBatchSize - configuration parameter for `Readdir` func
readDirBatchSize int
}
func (fs justFilesFilesystem) Open(name string) (http.File, error) {
f, err := fs.fs.Open(name)
if err != nil {
return nil, err
}
return neuteredStatFile{File: f, readDirBatchSize: fs.readDirBatchSize}, nil
}
type neuteredStatFile struct {
http.File
readDirBatchSize int
}
func (e neuteredStatFile) Stat() (os.FileInfo, error) {
s, err := e.File.Stat()
if err != nil {
return nil, err
}
if s.IsDir() {
LOOP:
for {
fl, err := e.File.Readdir(e.readDirBatchSize)
switch err {
case io.EOF:
break LOOP
case nil:
for _, f := range fl {
if f.Name() == "index.html" {
return s, err
}
}
default:
return nil, err
}
}
return nil, os.ErrNotExist
}
return s, err
}
func main() {
fs := justFilesFilesystem{fs: http.Dir("/tmp/"), readDirBatchSize: 2}
fss := http.FileServer(fs)
http.ListenAndServe(":8080", fss)
}
What I'm doing is fairly straight-forward. I need to create a "proxy" server that is very minimal and fast. Currently I have a baseline server that is proxied to (nodejs) and a proxy-service (go). Please excuse the lack of actual "proxy'ing" - just testing for now.
Baseline Service
var http = require('http');
http.createServer(function (req, res) {
// console.log("received request");
res.writeHead(200, {'Content-Type': 'text/plain'});
res.end('Hello World\n');
}).listen(8080, '127.0.0.1');
console.log('Server running at http://127.0.0.1:8080/');
Proxy Service
package main
import (
"flag"
"log"
"net/http"
"net/url"
)
var (
listen = flag.String("listen", "0.0.0.0:9000", "listen on address")
logp = flag.Bool("log", false, "enable logging")
)
func main() {
flag.Parse()
proxyHandler := http.HandlerFunc(proxyHandlerFunc)
log.Fatal(http.ListenAndServe(*listen, proxyHandler))
log.Println("Started router-server on 0.0.0.0:9000")
}
func proxyHandlerFunc(w http.ResponseWriter, r *http.Request) {
// Log if requested
if *logp {
log.Println(r.URL)
}
/*
* Tweak the request as appropriate:
* - RequestURI may not be sent to client
* - Set new URL
*/
r.RequestURI = ""
u, err := url.Parse("http://localhost:8080/")
if err != nil {
log.Fatal(err)
}
r.URL = u
// And proxy
// resp, err := client.Do(r)
c := make(chan *http.Response)
go doRequest(c)
resp := <-c
if resp != nil {
err := resp.Write(w)
if err != nil {
log.Println("Error writing response")
} else {
resp.Body.Close()
}
}
}
func doRequest(c chan *http.Response) {
// new client for every request.
client := &http.Client{}
resp, err := client.Get("http://127.0.0.1:8080/test")
if err != nil {
log.Println(err)
c <- nil
} else {
c <- resp
}
}
My issue, as mentioned within the title, is that I am getting errors stating 2013/10/28 21:22:30 Get http://127.0.0.1:8080/test: dial tcp 127.0.0.1:8080: can't assign requested address from the doRequest function, and I have no clue why. Googling this particular error yields seemingly irrelevant results.
There are 2 major problems with this code.
You are not handling the client stalling or using keep alives (handled below by getTimeoutServer)
You are not handling the server (what your http.Client is talking to) timing out (handled below by TimeoutConn).
This is probably why you are exhausting your local ports. I know from past experience node.js will keep-alive you very aggressively.
There are lots of little issues, creating objects every-time when you don't need to. Creating unneeded goroutines (each incoming request is in its own goroutine before you handle it).
Here is a quick stab (that I don't have time to test well). Hopefully it will put you on the right track: (You will want to upgrade this to not buffer the responses locally)
package main
import (
"bytes"
"errors"
"flag"
"fmt"
"log"
"net"
"net/http"
"net/url"
"runtime"
"strconv"
"time"
)
const DEFAULT_IDLE_TIMEOUT = 5 * time.Second
var (
listen string
logOn bool
localhost, _ = url.Parse("http://localhost:8080/")
client = &http.Client{
Transport: &http.Transport{
Proxy: NoProxyAllowed,
Dial: func(network, addr string) (net.Conn, error) {
return NewTimeoutConnDial(network, addr, DEFAULT_IDLE_TIMEOUT)
},
},
}
)
func main() {
runtime.GOMAXPROCS(runtime.NumCPU())
flag.StringVar(&listen, "listen", "0.0.0.0:9000", "listen on address")
flag.BoolVar(&logOn, "log", true, "enable logging")
flag.Parse()
server := getTimeoutServer(listen, http.HandlerFunc(proxyHandlerFunc))
log.Printf("Starting router-server on %s\n", listen)
log.Fatal(server.ListenAndServe())
}
func proxyHandlerFunc(w http.ResponseWriter, req *http.Request) {
if logOn {
log.Printf("%+v\n", req)
}
// Setup request URL
origURL := req.URL
req.URL = new(url.URL)
*req.URL = *localhost
req.URL.Path, req.URL.RawQuery, req.URL.Fragment = origURL.Path, origURL.RawQuery, origURL.Fragment
req.RequestURI, req.Host = "", req.URL.Host
// Perform request
resp, err := client.Do(req)
if err != nil {
w.WriteHeader(http.StatusBadGateway)
w.Write([]byte(fmt.Sprintf("%d - StatusBadGateway: %s", http.StatusBadGateway, err)))
return
}
defer resp.Body.Close()
var respBuffer *bytes.Buffer
if resp.ContentLength != -1 {
respBuffer = bytes.NewBuffer(make([]byte, 0, resp.ContentLength))
} else {
respBuffer = new(bytes.Buffer)
}
if _, err = respBuffer.ReadFrom(resp.Body); err != nil {
w.WriteHeader(http.StatusBadGateway)
w.Write([]byte(fmt.Sprintf("%d - StatusBadGateway: %s", http.StatusBadGateway, err)))
return
}
// Write result of request
headers := w.Header()
var key string
var val []string
for key, val = range resp.Header {
headers[key] = val
}
headers.Set("Content-Length", strconv.Itoa(respBuffer.Len()))
w.WriteHeader(resp.StatusCode)
w.Write(respBuffer.Bytes())
}
func getTimeoutServer(addr string, handler http.Handler) *http.Server {
//keeps people who are slow or are sending keep-alives from eating all our sockets
const (
HTTP_READ_TO = DEFAULT_IDLE_TIMEOUT
HTTP_WRITE_TO = DEFAULT_IDLE_TIMEOUT
)
return &http.Server{
Addr: addr,
Handler: handler,
ReadTimeout: HTTP_READ_TO,
WriteTimeout: HTTP_WRITE_TO,
}
}
func NoProxyAllowed(request *http.Request) (*url.URL, error) {
return nil, nil
}
//TimeoutConn-------------------------
//Put me in my own TimeoutConn.go ?
type TimeoutConn struct {
net.Conn
readTimeout, writeTimeout time.Duration
}
var invalidOperationError = errors.New("TimeoutConn does not support or allow .SetDeadline operations")
func NewTimeoutConn(conn net.Conn, ioTimeout time.Duration) (*TimeoutConn, error) {
return NewTimeoutConnReadWriteTO(conn, ioTimeout, ioTimeout)
}
func NewTimeoutConnReadWriteTO(conn net.Conn, readTimeout, writeTimeout time.Duration) (*TimeoutConn, error) {
this := &TimeoutConn{
Conn: conn,
readTimeout: readTimeout,
writeTimeout: writeTimeout,
}
now := time.Now()
err := this.Conn.SetReadDeadline(now.Add(this.readTimeout))
if err != nil {
return nil, err
}
err = this.Conn.SetWriteDeadline(now.Add(this.writeTimeout))
if err != nil {
return nil, err
}
return this, nil
}
func NewTimeoutConnDial(network, addr string, ioTimeout time.Duration) (net.Conn, error) {
conn, err := net.DialTimeout(network, addr, ioTimeout)
if err != nil {
return nil, err
}
if conn, err = NewTimeoutConn(conn, ioTimeout); err != nil {
return nil, err
}
return conn, nil
}
func (this *TimeoutConn) Read(data []byte) (int, error) {
this.Conn.SetReadDeadline(time.Now().Add(this.readTimeout))
return this.Conn.Read(data)
}
func (this *TimeoutConn) Write(data []byte) (int, error) {
this.Conn.SetWriteDeadline(time.Now().Add(this.writeTimeout))
return this.Conn.Write(data)
}
func (this *TimeoutConn) SetDeadline(time time.Time) error {
return invalidOperationError
}
func (this *TimeoutConn) SetReadDeadline(time time.Time) error {
return invalidOperationError
}
func (this *TimeoutConn) SetWriteDeadline(time time.Time) error {
return invalidOperationError
}
We ran into this and after a lot of time trying to debug, I came across this: https://code.google.com/p/go/source/detail?r=d4e1ec84876c
This shifts the burden onto clients to read their whole response
bodies if they want the advantage of reusing TCP connections.
So be sure you read the entire body before closing, there are a couple of ways to do it. This function can come in handy to close to let you see whether you have this issue by logging the extra bytes that haven't been read and cleaning the stream out for you so it can reuse the connection:
func closeResponse(response *http.Response) error {
// ensure we read the entire body
bs, err2 := ioutil.ReadAll(response.Body)
if err2 != nil {
log.Println("Error during ReadAll!!", err2)
}
if len(bs) > 0 {
log.Println("Had to read some bytes, not good!", bs, string(bs))
}
return response.Body.Close()
}
Or if you really don't care about the body, you can just discard it with this:
io.Copy(ioutil.Discard, response.Body)
I have encountered this problem too, and i add an option {DisableKeepAlives: true} to http.Transport fixed this issue, you can have a try.
I came here when running a massive amount of SQL queries per second on a system without limiting the number of idle connections over a long period of time. As pointed out in this issue comment on github explicitly setting db.SetMaxIdleConns(5) completely solved my problem.