1
0
mirror of https://github.com/docker/cli.git synced 2026-01-26 15:41:42 +03:00

vendor: golang.org/x/net v0.48.0

- trace: fix data race in RenderEvents
- http2, webdav, websocket: fix %q verb uses with wrong type
- http2: don't PING a responsive server when resetting a stream
- http2: support net/http.Transport.NewClientConn

full diff: https://github.com/golang/net/compare/v0.47.0...v0.48.0

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn
2025-12-23 22:51:05 +01:00
parent 647ab775d0
commit 0cd2c18580
5 changed files with 153 additions and 17 deletions

View File

@@ -98,7 +98,7 @@ require (
go.opentelemetry.io/auto/sdk v1.2.1 // indirect
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.38.0 // indirect
go.opentelemetry.io/proto/otlp v1.7.1 // indirect
golang.org/x/net v0.47.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/time v0.14.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250825161204-c5933d9347a5 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250825161204-c5933d9347a5 // indirect

View File

@@ -243,8 +243,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.47.0 h1:Mx+4dIFzqraBXUugkia1OOvlD6LemFo1ALMHjrXDOhY=
golang.org/x/net v0.47.0/go.mod h1:/jNxtkgq5yWUGYkaZGqo27cfGZ1c5Nen03aYrrKpVRU=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=

View File

@@ -376,11 +376,24 @@ type ClientConn struct {
// completely unresponsive connection.
pendingResets int
// readBeforeStreamID is the smallest stream ID that has not been followed by
// a frame read from the peer. We use this to determine when a request may
// have been sent to a completely unresponsive connection:
// If the request ID is less than readBeforeStreamID, then we have had some
// indication of life on the connection since sending the request.
readBeforeStreamID uint32
// reqHeaderMu is a 1-element semaphore channel controlling access to sending new requests.
// Write to reqHeaderMu to lock it, read from it to unlock.
// Lock reqmu BEFORE mu or wmu.
reqHeaderMu chan struct{}
// internalStateHook reports state changes back to the net/http.ClientConn.
// Note that this is different from the user state hook registered by
// net/http.ClientConn.SetStateHook: The internal hook calls ClientConn,
// which calls the user hook.
internalStateHook func()
// wmu is held while writing.
// Acquire BEFORE mu when holding both, to avoid blocking mu on network writes.
// Only acquire both at the same time when changing peer settings.
@@ -710,7 +723,7 @@ func canRetryError(err error) bool {
func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse bool) (*ClientConn, error) {
if t.transportTestHooks != nil {
return t.newClientConn(nil, singleUse)
return t.newClientConn(nil, singleUse, nil)
}
host, _, err := net.SplitHostPort(addr)
if err != nil {
@@ -720,7 +733,7 @@ func (t *Transport) dialClientConn(ctx context.Context, addr string, singleUse b
if err != nil {
return nil, err
}
return t.newClientConn(tconn, singleUse)
return t.newClientConn(tconn, singleUse, nil)
}
func (t *Transport) newTLSConfig(host string) *tls.Config {
@@ -772,10 +785,10 @@ func (t *Transport) expectContinueTimeout() time.Duration {
}
func (t *Transport) NewClientConn(c net.Conn) (*ClientConn, error) {
return t.newClientConn(c, t.disableKeepAlives())
return t.newClientConn(c, t.disableKeepAlives(), nil)
}
func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, error) {
func (t *Transport) newClientConn(c net.Conn, singleUse bool, internalStateHook func()) (*ClientConn, error) {
conf := configFromTransport(t)
cc := &ClientConn{
t: t,
@@ -797,6 +810,7 @@ func (t *Transport) newClientConn(c net.Conn, singleUse bool) (*ClientConn, erro
pings: make(map[[8]byte]chan struct{}),
reqHeaderMu: make(chan struct{}, 1),
lastActive: time.Now(),
internalStateHook: internalStateHook,
}
if t.transportTestHooks != nil {
t.transportTestHooks.newclientconn(cc)
@@ -1037,10 +1051,7 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
maxConcurrentOkay = cc.currentRequestCountLocked() < int(cc.maxConcurrentStreams)
}
st.canTakeNewRequest = cc.goAway == nil && !cc.closed && !cc.closing && maxConcurrentOkay &&
!cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
st.canTakeNewRequest = maxConcurrentOkay && cc.isUsableLocked()
// If this connection has never been used for a request and is closed,
// then let it take a request (which will fail).
@@ -1056,6 +1067,31 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
return
}
func (cc *ClientConn) isUsableLocked() bool {
return cc.goAway == nil &&
!cc.closed &&
!cc.closing &&
!cc.doNotReuse &&
int64(cc.nextStreamID)+2*int64(cc.pendingRequests) < math.MaxInt32 &&
!cc.tooIdleLocked()
}
// canReserveLocked reports whether a net/http.ClientConn can reserve a slot on this conn.
//
// This follows slightly different rules than clientConnIdleState.canTakeNewRequest.
// We only permit reservations up to the conn's concurrency limit.
// This differs from ClientConn.ReserveNewRequest, which permits reservations
// past the limit when StrictMaxConcurrentStreams is set.
func (cc *ClientConn) canReserveLocked() bool {
if cc.currentRequestCountLocked() >= int(cc.maxConcurrentStreams) {
return false
}
if !cc.isUsableLocked() {
return false
}
return true
}
// currentRequestCountLocked reports the number of concurrency slots currently in use,
// including active streams, reserved slots, and reset streams waiting for acknowledgement.
func (cc *ClientConn) currentRequestCountLocked() int {
@@ -1067,6 +1103,14 @@ func (cc *ClientConn) canTakeNewRequestLocked() bool {
return st.canTakeNewRequest
}
// availableLocked reports the number of concurrency slots available.
func (cc *ClientConn) availableLocked() int {
if !cc.canTakeNewRequestLocked() {
return 0
}
return max(0, int(cc.maxConcurrentStreams)-cc.currentRequestCountLocked())
}
// tooIdleLocked reports whether this connection has been been sitting idle
// for too much wall time.
func (cc *ClientConn) tooIdleLocked() bool {
@@ -1091,6 +1135,7 @@ func (cc *ClientConn) closeConn() {
t := time.AfterFunc(250*time.Millisecond, cc.forceCloseConn)
defer t.Stop()
cc.tconn.Close()
cc.maybeCallStateHook()
}
// A tls.Conn.Close can hang for a long time if the peer is unresponsive.
@@ -1616,6 +1661,8 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
}
bodyClosed := cs.reqBodyClosed
closeOnIdle := cc.singleUse || cc.doNotReuse || cc.t.disableKeepAlives() || cc.goAway != nil
// Have we read any frames from the connection since sending this request?
readSinceStream := cc.readBeforeStreamID > cs.ID
cc.mu.Unlock()
if mustCloseBody {
cs.reqBody.Close()
@@ -1647,8 +1694,10 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
//
// This could be due to the server becoming unresponsive.
// To avoid sending too many requests on a dead connection,
// we let the request continue to consume a concurrency slot
// until we can confirm the server is still responding.
// if we haven't read any frames from the connection since
// sending this request, we let it continue to consume
// a concurrency slot until we can confirm the server is
// still responding.
// We do this by sending a PING frame along with the RST_STREAM
// (unless a ping is already in flight).
//
@@ -1659,7 +1708,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
// because it's short lived and will probably be closed before
// we get the ping response.
ping := false
if !closeOnIdle {
if !closeOnIdle && !readSinceStream {
cc.mu.Lock()
// rstStreamPingsBlocked works around a gRPC behavior:
// see comment on the field for details.
@@ -1693,6 +1742,7 @@ func (cs *clientStream) cleanupWriteRequest(err error) {
}
close(cs.donec)
cc.maybeCallStateHook()
}
// awaitOpenSlotForStreamLocked waits until len(streams) < maxConcurrentStreams.
@@ -2745,6 +2795,7 @@ func (rl *clientConnReadLoop) streamByID(id uint32, headerOrData bool) *clientSt
// See comment on ClientConn.rstStreamPingsBlocked for details.
rl.cc.rstStreamPingsBlocked = false
}
rl.cc.readBeforeStreamID = rl.cc.nextStreamID
cs := rl.cc.streams[id]
if cs != nil && !cs.readAborted {
return cs
@@ -2795,6 +2846,7 @@ func (rl *clientConnReadLoop) processSettings(f *SettingsFrame) error {
func (rl *clientConnReadLoop) processSettingsNoWrite(f *SettingsFrame) error {
cc := rl.cc
defer cc.maybeCallStateHook()
cc.mu.Lock()
defer cc.mu.Unlock()
@@ -2975,6 +3027,7 @@ func (cc *ClientConn) Ping(ctx context.Context) error {
func (rl *clientConnReadLoop) processPing(f *PingFrame) error {
if f.IsAck() {
cc := rl.cc
defer cc.maybeCallStateHook()
cc.mu.Lock()
defer cc.mu.Unlock()
// If ack, notify listener if any
@@ -3198,9 +3251,13 @@ func registerHTTPSProtocol(t *http.Transport, rt noDialH2RoundTripper) (err erro
}
// noDialH2RoundTripper is a RoundTripper which only tries to complete the request
// if there's already has a cached connection to the host.
// if there's already a cached connection to the host.
// (The field is exported so it can be accessed via reflect from net/http; tested
// by TestNoDialH2RoundTripperType)
//
// A noDialH2RoundTripper is registered with http1.Transport.RegisterProtocol,
// and the http1.Transport can use type assertions to call non-RoundTrip methods on it.
// This lets us expose, for example, NewClientConn to net/http.
type noDialH2RoundTripper struct{ *Transport }
func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, error) {
@@ -3211,6 +3268,85 @@ func (rt noDialH2RoundTripper) RoundTrip(req *http.Request) (*http.Response, err
return res, err
}
func (rt noDialH2RoundTripper) NewClientConn(conn net.Conn, internalStateHook func()) (http.RoundTripper, error) {
tr := rt.Transport
cc, err := tr.newClientConn(conn, tr.disableKeepAlives(), internalStateHook)
if err != nil {
return nil, err
}
// RoundTrip should block when the conn is at its concurrency limit,
// not return an error. Setting strictMaxConcurrentStreams enables this.
cc.strictMaxConcurrentStreams = true
return netHTTPClientConn{cc}, nil
}
// netHTTPClientConn wraps ClientConn and implements the interface net/http expects from
// the RoundTripper returned by NewClientConn.
type netHTTPClientConn struct {
cc *ClientConn
}
func (cc netHTTPClientConn) RoundTrip(req *http.Request) (*http.Response, error) {
return cc.cc.RoundTrip(req)
}
func (cc netHTTPClientConn) Close() error {
return cc.cc.Close()
}
func (cc netHTTPClientConn) Err() error {
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
if cc.cc.closed {
return errors.New("connection closed")
}
return nil
}
func (cc netHTTPClientConn) Reserve() error {
defer cc.cc.maybeCallStateHook()
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
if !cc.cc.canReserveLocked() {
return errors.New("connection is unavailable")
}
cc.cc.streamsReserved++
return nil
}
func (cc netHTTPClientConn) Release() {
defer cc.cc.maybeCallStateHook()
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
// We don't complain if streamsReserved is 0.
//
// This is consistent with RoundTrip: both Release and RoundTrip will
// consume a reservation iff one exists.
if cc.cc.streamsReserved > 0 {
cc.cc.streamsReserved--
}
}
func (cc netHTTPClientConn) Available() int {
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
return cc.cc.availableLocked()
}
func (cc netHTTPClientConn) InFlight() int {
cc.cc.mu.Lock()
defer cc.cc.mu.Unlock()
return cc.cc.currentRequestCountLocked()
}
func (cc *ClientConn) maybeCallStateHook() {
if cc.internalStateHook != nil {
cc.internalStateHook()
}
}
func (t *Transport) idleConnTimeout() time.Duration {
// to keep things backwards compatible, we use non-zero values of
// IdleConnTimeout, followed by using the IdleConnTimeout on the underlying

View File

@@ -58,8 +58,8 @@ func RenderEvents(w http.ResponseWriter, req *http.Request, sensitive bool) {
Buckets: buckets,
}
data.Families = make([]string, 0, len(families))
famMu.RLock()
data.Families = make([]string, 0, len(families))
for name := range families {
data.Families = append(data.Families, name)
}

2
vendor/modules.txt vendored
View File

@@ -380,7 +380,7 @@ go.opentelemetry.io/proto/otlp/trace/v1
# go.yaml.in/yaml/v3 v3.0.4
## explicit; go 1.16
go.yaml.in/yaml/v3
# golang.org/x/net v0.47.0
# golang.org/x/net v0.48.0
## explicit; go 1.24.0
golang.org/x/net/http/httpguts
golang.org/x/net/http2