mirror of
https://github.com/owncloud/ocis.git
synced 2025-04-18 23:44:07 +03:00
Bump github.com/nats-io/nats-server/v2 from 2.10.22 to 2.10.27
Bumps [github.com/nats-io/nats-server/v2](https://github.com/nats-io/nats-server) from 2.10.22 to 2.10.27. - [Release notes](https://github.com/nats-io/nats-server/releases) - [Changelog](https://github.com/nats-io/nats-server/blob/main/.goreleaser.yml) - [Commits](https://github.com/nats-io/nats-server/compare/v2.10.22...v2.10.27) --- updated-dependencies: - dependency-name: github.com/nats-io/nats-server/v2 dependency-version: 2.10.27 dependency-type: direct:production ... Signed-off-by: dependabot[bot] <support@github.com>
This commit is contained in:
parent
20f32d4f2a
commit
9ed9825e5a
12
go.mod
12
go.mod
@ -57,8 +57,8 @@ require (
|
||||
github.com/mitchellh/mapstructure v1.5.0
|
||||
github.com/mna/pigeon v1.3.0
|
||||
github.com/mohae/deepcopy v0.0.0-20170929034955-c48cc78d4826
|
||||
github.com/nats-io/nats-server/v2 v2.10.22
|
||||
github.com/nats-io/nats.go v1.37.0
|
||||
github.com/nats-io/nats-server/v2 v2.10.27
|
||||
github.com/nats-io/nats.go v1.39.1
|
||||
github.com/oklog/run v1.1.0
|
||||
github.com/olekukonko/tablewriter v0.0.5
|
||||
github.com/onsi/ginkgo v1.16.5
|
||||
@ -244,7 +244,7 @@ require (
|
||||
github.com/json-iterator/go v1.1.12 // indirect
|
||||
github.com/juliangruber/go-intersect v1.1.0 // indirect
|
||||
github.com/kevinburke/ssh_config v1.2.0 // indirect
|
||||
github.com/klauspost/compress v1.17.11 // indirect
|
||||
github.com/klauspost/compress v1.18.0 // indirect
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 // indirect
|
||||
github.com/leodido/go-urn v1.4.0 // indirect
|
||||
github.com/libregraph/oidc-go v1.1.0 // indirect
|
||||
@ -268,8 +268,8 @@ require (
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/mschoch/smat v0.2.0 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.5.8 // indirect
|
||||
github.com/nats-io/nkeys v0.4.7 // indirect
|
||||
github.com/nats-io/jwt/v2 v2.7.3 // indirect
|
||||
github.com/nats-io/nkeys v0.4.10 // indirect
|
||||
github.com/nats-io/nuid v1.0.1 // indirect
|
||||
github.com/nxadm/tail v1.4.8 // indirect
|
||||
github.com/opencontainers/runtime-spec v1.1.0 // indirect
|
||||
@ -328,7 +328,7 @@ require (
|
||||
go.uber.org/zap v1.23.0 // indirect
|
||||
golang.org/x/mod v0.21.0 // indirect
|
||||
golang.org/x/sys v0.31.0 // indirect
|
||||
golang.org/x/time v0.7.0 // indirect
|
||||
golang.org/x/time v0.10.0 // indirect
|
||||
golang.org/x/tools v0.26.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220907171357-04be3eba64a2 // indirect
|
||||
google.golang.org/genproto v0.0.0-20241015192408-796eee8c2d53 // indirect
|
||||
|
24
go.sum
24
go.sum
@ -689,8 +689,8 @@ github.com/kisielk/errcheck v1.1.0/go.mod h1:EZBBE59ingxPouuu3KfxchcWSUPOHkagtvW
|
||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/klauspost/compress v1.15.9/go.mod h1:PhcZ0MbTNciWF3rruxRgKxI5NkcHHrHUDtV4Yw2GlzU=
|
||||
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc=
|
||||
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0=
|
||||
github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
|
||||
github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
|
||||
github.com/klauspost/cpuid/v2 v2.0.1/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8 h1:+StwCXwm9PdpiEkPyzBXIy+M9KUb4ODm0Zarf1kS5BM=
|
||||
github.com/klauspost/cpuid/v2 v2.2.8/go.mod h1:Lcz8mBdAVJIBVzewtcLocK12l3Y+JytZYpaMropDUws=
|
||||
@ -829,14 +829,14 @@ github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8m
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/namedotcom/go v0.0.0-20180403034216-08470befbe04/go.mod h1:5sN+Lt1CaY4wsPvgQH/jsuJi4XO2ssZbdsIizr4CVC8=
|
||||
github.com/nats-io/jwt/v2 v2.5.8 h1:uvdSzwWiEGWGXf+0Q+70qv6AQdvcvxrv9hPM0RiPamE=
|
||||
github.com/nats-io/jwt/v2 v2.5.8/go.mod h1:ZdWS1nZa6WMZfFwwgpEaqBV8EPGVgOTDHN/wTbz0Y5A=
|
||||
github.com/nats-io/nats-server/v2 v2.10.22 h1:Yt63BGu2c3DdMoBZNcR6pjGQwk/asrKU7VX846ibxDA=
|
||||
github.com/nats-io/nats-server/v2 v2.10.22/go.mod h1:X/m1ye9NYansUXYFrbcDwUi/blHkrgHh2rgCJaakonk=
|
||||
github.com/nats-io/nats.go v1.37.0 h1:07rauXbVnnJvv1gfIyghFEo6lUcYRY0WXc3x7x0vUxE=
|
||||
github.com/nats-io/nats.go v1.37.0/go.mod h1:Ubdu4Nh9exXdSz0RVWRFBbRfrbSxOYd26oF0wkWclB8=
|
||||
github.com/nats-io/nkeys v0.4.7 h1:RwNJbbIdYCoClSDNY7QVKZlyb/wfT6ugvFCiKy6vDvI=
|
||||
github.com/nats-io/nkeys v0.4.7/go.mod h1:kqXRgRDPlGy7nGaEDMuYzmiJCIAAWDK0IMBtDmGD0nc=
|
||||
github.com/nats-io/jwt/v2 v2.7.3 h1:6bNPK+FXgBeAqdj4cYQ0F8ViHRbi7woQLq4W29nUAzE=
|
||||
github.com/nats-io/jwt/v2 v2.7.3/go.mod h1:GvkcbHhKquj3pkioy5put1wvPxs78UlZ7D/pY+BgZk4=
|
||||
github.com/nats-io/nats-server/v2 v2.10.27 h1:A/i3JqtrP897UHc2/Jia/mqaXkqj9+HGdpz+R0mC+sM=
|
||||
github.com/nats-io/nats-server/v2 v2.10.27/go.mod h1:SGzoWGU8wUVnMr/HJhEMv4R8U4f7hF4zDygmRxpNsvg=
|
||||
github.com/nats-io/nats.go v1.39.1 h1:oTkfKBmz7W047vRxV762M67ZdXeOtUgvbBaNoQ+3PPk=
|
||||
github.com/nats-io/nats.go v1.39.1/go.mod h1:MgRb8oOdigA6cYpEPhXJuRVH6UE/V4jblJ2jQ27IXYM=
|
||||
github.com/nats-io/nkeys v0.4.10 h1:glmRrpCmYLHByYcePvnTBEAwawwapjCPMjy2huw20wc=
|
||||
github.com/nats-io/nkeys v0.4.10/go.mod h1:OjRrnIKnWBFl+s4YK5ChQfvHP2fxqZexrKJoVVyWB3U=
|
||||
github.com/nats-io/nuid v1.0.1 h1:5iA8DT8V7q8WK2EScv2padNa/rTESc1KdnPw4TC2paw=
|
||||
github.com/nats-io/nuid v1.0.1/go.mod h1:19wcPz3Ph3q0Jbyiqsd0kePYG7A95tJPxeL+1OSON2c=
|
||||
github.com/nbio/st v0.0.0-20140626010706-e9e8d9816f32/go.mod h1:9wM+0iRr9ahx58uYLpLIr5fm8diHn0JbqRycJi6w0Ms=
|
||||
@ -1476,8 +1476,8 @@ golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxb
|
||||
golang.org/x/time v0.0.0-20200630173020-3af7569d3a1e/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20201208040808-7e3f01d25324/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20210220033141-f8bda1e9f3ba/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
|
||||
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
|
||||
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
|
||||
golang.org/x/tools v0.0.0-20180221164845-07fd8470d635/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
|
140
vendor/github.com/klauspost/compress/README.md
generated
vendored
140
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -14,8 +14,34 @@ This package provides various compression algorithms.
|
||||
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||
[](https://sourcegraph.com/github.com/klauspost/compress?badge)
|
||||
|
||||
# package usage
|
||||
|
||||
Use `go get github.com/klauspost/compress@latest` to add it to your project.
|
||||
|
||||
This package will support the current Go version and 2 versions back.
|
||||
|
||||
* Use the `nounsafe` tag to disable all use of the "unsafe" package.
|
||||
* Use the `noasm` tag to disable all assembly across packages.
|
||||
|
||||
Use the links above for more information on each.
|
||||
|
||||
# changelog
|
||||
|
||||
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
|
||||
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
|
||||
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
|
||||
* flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
|
||||
* flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
|
||||
* s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
|
||||
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
|
||||
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
|
||||
|
||||
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
|
||||
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
|
||||
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
|
||||
* gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
|
||||
* gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
|
||||
|
||||
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
|
||||
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
|
||||
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
|
||||
@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
|
||||
|
||||
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
|
||||
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871
|
||||
* s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
|
||||
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
|
||||
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867
|
||||
* s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
|
||||
|
||||
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
|
||||
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
|
||||
@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
<summary>See changes to v1.15.x</summary>
|
||||
|
||||
* Jan 21st, 2023 (v1.15.15)
|
||||
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||
* deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
|
||||
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
|
||||
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
|
||||
@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
|
||||
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
|
||||
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
|
||||
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
|
||||
* zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
|
||||
|
||||
* July 13, 2022 (v1.15.8)
|
||||
|
||||
@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
|
||||
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
|
||||
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
|
||||
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590
|
||||
* flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
|
||||
|
||||
|
||||
* May 11, 2022 (v1.15.4)
|
||||
@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
|
||||
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
|
||||
|
||||
* Mar 3, 2022 (v1.15.0)
|
||||
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||
* zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
|
||||
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
|
||||
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
|
||||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||
* flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
|
||||
* gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
|
||||
|
||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||
|
||||
@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
|
||||
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||
* huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||
|
||||
* Feb 17, 2022 (v1.14.3)
|
||||
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||
@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when
|
||||
|
||||
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||
|
||||
| old import | new import | Documentation
|
||||
|--------------------|-----------------------------------------|--------------------|
|
||||
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc)
|
||||
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc)
|
||||
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc)
|
||||
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc)
|
||||
Typical speed is about 2x of the standard library packages.
|
||||
|
||||
| old import | new import | Documentation |
|
||||
|------------------|---------------------------------------|-------------------------------------------------------------------------|
|
||||
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
|
||||
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
|
||||
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
|
||||
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
|
||||
|
||||
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
|
||||
|
||||
@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle.
|
||||
Compression is almost always worse than the fastest compression level
|
||||
and each write will allocate (a little) memory.
|
||||
|
||||
# Performance Update 2018
|
||||
|
||||
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
|
||||
|
||||
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
|
||||
|
||||
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
|
||||
|
||||
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
|
||||
|
||||
|
||||
## Overall differences.
|
||||
|
||||
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
|
||||
|
||||
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
|
||||
|
||||
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
|
||||
|
||||
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
|
||||
|
||||
## Web Content
|
||||
|
||||
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
|
||||
|
||||
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
|
||||
|
||||
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
|
||||
|
||||
## Object files
|
||||
|
||||
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
|
||||
|
||||
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
|
||||
|
||||
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
|
||||
|
||||
## Highly Compressible File
|
||||
|
||||
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
|
||||
|
||||
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
|
||||
|
||||
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
|
||||
|
||||
## Medium-High Compressible
|
||||
|
||||
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
|
||||
|
||||
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
|
||||
|
||||
## Medium Compressible
|
||||
|
||||
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
|
||||
|
||||
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
|
||||
|
||||
|
||||
## Un-compressible Content
|
||||
|
||||
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
|
||||
|
||||
|
||||
## Huffman only compression
|
||||
|
||||
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
|
||||
|
||||
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
|
||||
|
||||
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
|
||||
|
||||
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
|
||||
|
||||
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
|
||||
|
||||
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
||||
|
||||
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
|
||||
|
||||
# Other packages
|
||||
|
||||
|
63
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
63
vendor/github.com/klauspost/compress/flate/fast_encoder.go
generated
vendored
@ -6,8 +6,10 @@
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
type fastEnc interface {
|
||||
@ -58,11 +60,11 @@ const (
|
||||
)
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
return le.Load32(b, i)
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
return le.Load64(b, i)
|
||||
}
|
||||
|
||||
type tableEntry struct {
|
||||
@ -134,8 +136,8 @@ func hashLen(u uint64, length, mls uint8) uint32 {
|
||||
// matchlen will return the match length between offsets and t in src.
|
||||
// The maximum length returned is maxMatchLength - 4.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
|
||||
if debugDecode {
|
||||
func (e *fastGen) matchlen(s, t int, src []byte) int32 {
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
}
|
||||
@ -149,18 +151,34 @@ func (e *fastGen) matchlen(s, t int32, src []byte) int32 {
|
||||
panic(fmt.Sprint(s, "-", t, "(", s-t, ") > maxMatchLength (", maxMatchOffset, ")"))
|
||||
}
|
||||
}
|
||||
s1 := int(s) + maxMatchLength - 4
|
||||
if s1 > len(src) {
|
||||
s1 = len(src)
|
||||
s1 := min(s+maxMatchLength-4, len(src))
|
||||
left := s1 - s
|
||||
n := int32(0)
|
||||
for left >= 8 {
|
||||
diff := le.Load64(src, s) ^ le.Load64(src, t)
|
||||
if diff != 0 {
|
||||
return n + int32(bits.TrailingZeros64(diff)>>3)
|
||||
}
|
||||
s += 8
|
||||
t += 8
|
||||
n += 8
|
||||
left -= 8
|
||||
}
|
||||
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:s1], src[t:]))
|
||||
a := src[s:s1]
|
||||
b := src[t:]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// matchlenLong will return the match length between offsets and t in src.
|
||||
// It is assumed that s > t, that t >=0 and s < len(src).
|
||||
func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
|
||||
func (e *fastGen) matchlenLong(s, t int, src []byte) int32 {
|
||||
if debugDeflate {
|
||||
if t >= s {
|
||||
panic(fmt.Sprint("t >=s:", t, s))
|
||||
@ -176,7 +194,28 @@ func (e *fastGen) matchlenLong(s, t int32, src []byte) int32 {
|
||||
}
|
||||
}
|
||||
// Extend the match to be as long as possible.
|
||||
return int32(matchLen(src[s:], src[t:]))
|
||||
left := len(src) - s
|
||||
n := int32(0)
|
||||
for left >= 8 {
|
||||
diff := le.Load64(src, s) ^ le.Load64(src, t)
|
||||
if diff != 0 {
|
||||
return n + int32(bits.TrailingZeros64(diff)>>3)
|
||||
}
|
||||
s += 8
|
||||
t += 8
|
||||
n += 8
|
||||
left -= 8
|
||||
}
|
||||
|
||||
a := src[s:]
|
||||
b := src[t:]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
n++
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
// Reset the encoding table.
|
||||
|
19
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
19
vendor/github.com/klauspost/compress/flate/huffman_bit_writer.go
generated
vendored
@ -5,10 +5,11 @@
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"math"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -438,7 +439,7 @@ func (w *huffmanBitWriter) writeOutBits() {
|
||||
n := w.nbytes
|
||||
|
||||
// We over-write, but faster...
|
||||
binary.LittleEndian.PutUint64(w.bytes[n:], bits)
|
||||
le.Store64(w.bytes[n:], bits)
|
||||
n += 6
|
||||
|
||||
if n >= bufferFlushSize {
|
||||
@ -854,7 +855,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
bits |= c.code64() << (nbits & 63)
|
||||
nbits += c.len()
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
le.Store64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
bits >>= 48
|
||||
nbits -= 48
|
||||
@ -882,7 +883,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
bits |= c.code64() << (nbits & 63)
|
||||
nbits += c.len()
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
le.Store64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
bits >>= 48
|
||||
nbits -= 48
|
||||
@ -905,7 +906,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
bits |= uint64(extraLength) << (nbits & 63)
|
||||
nbits += extraLengthBits
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
le.Store64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
bits >>= 48
|
||||
nbits -= 48
|
||||
@ -931,7 +932,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
bits |= c.code64() << (nbits & 63)
|
||||
nbits += c.len()
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
le.Store64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
bits >>= 48
|
||||
nbits -= 48
|
||||
@ -953,7 +954,7 @@ func (w *huffmanBitWriter) writeTokens(tokens []token, leCodes, oeCodes []hcode)
|
||||
bits |= uint64((offset-(offsetComb>>8))&matchOffsetOnlyMask) << (nbits & 63)
|
||||
nbits += uint8(offsetComb)
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
le.Store64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
bits >>= 48
|
||||
nbits -= 48
|
||||
@ -1107,7 +1108,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
// We must have at least 48 bits free.
|
||||
if nbits >= 8 {
|
||||
n := nbits >> 3
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
le.Store64(w.bytes[nbytes:], bits)
|
||||
bits >>= (n * 8) & 63
|
||||
nbits -= n * 8
|
||||
nbytes += n
|
||||
@ -1136,7 +1137,7 @@ func (w *huffmanBitWriter) writeBlockHuff(eof bool, input []byte, sync bool) {
|
||||
// Remaining...
|
||||
for _, t := range input {
|
||||
if nbits >= 48 {
|
||||
binary.LittleEndian.PutUint64(w.bytes[nbytes:], bits)
|
||||
le.Store64(w.bytes[nbytes:], bits)
|
||||
//*(*uint64)(unsafe.Pointer(&w.bytes[nbytes])) = bits
|
||||
bits >>= 48
|
||||
nbits -= 48
|
||||
|
48
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
48
vendor/github.com/klauspost/compress/flate/level1.go
generated
vendored
@ -1,9 +1,9 @@
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// fastGen maintains the table for matches,
|
||||
@ -77,6 +77,7 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
|
||||
nextS := s
|
||||
var candidate tableEntry
|
||||
var t int32
|
||||
for {
|
||||
nextHash := hashLen(cv, tableBits, hashBytes)
|
||||
candidate = e.table[nextHash]
|
||||
@ -88,9 +89,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
now := load6432(src, nextS)
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur}
|
||||
nextHash = hashLen(now, tableBits, hashBytes)
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
t = candidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
|
||||
break
|
||||
}
|
||||
@ -103,8 +103,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
now >>= 8
|
||||
e.table[nextHash] = tableEntry{offset: s + e.cur}
|
||||
|
||||
offset = s - (candidate.offset - e.cur)
|
||||
if offset < maxMatchOffset && uint32(cv) == load3232(src, candidate.offset-e.cur) {
|
||||
t = candidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
e.table[nextHash] = tableEntry{offset: nextS + e.cur}
|
||||
break
|
||||
}
|
||||
@ -120,36 +120,10 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
// literal bytes prior to s.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
t := candidate.offset - e.cur
|
||||
var l = int32(4)
|
||||
if false {
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
} else {
|
||||
// inlined:
|
||||
a := src[s+4:]
|
||||
b := src[t+4:]
|
||||
for len(a) >= 8 {
|
||||
if diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b); diff != 0 {
|
||||
l += int32(bits.TrailingZeros64(diff) >> 3)
|
||||
break
|
||||
}
|
||||
l += 8
|
||||
a = a[8:]
|
||||
b = b[8:]
|
||||
}
|
||||
if len(a) < 8 {
|
||||
b = b[:len(a)]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
}
|
||||
l++
|
||||
}
|
||||
}
|
||||
}
|
||||
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
for t > 0 && s > nextEmit && le.Load8(src, t-1) == le.Load8(src, s-1) {
|
||||
s--
|
||||
t--
|
||||
l++
|
||||
@ -221,8 +195,8 @@ func (e *fastEncL1) Encode(dst *tokens, src []byte) {
|
||||
candidate = e.table[currHash]
|
||||
e.table[currHash] = tableEntry{offset: o + 2}
|
||||
|
||||
offset := s - (candidate.offset - e.cur)
|
||||
if offset > maxMatchOffset || uint32(x) != load3232(src, candidate.offset-e.cur) {
|
||||
t = candidate.offset - e.cur
|
||||
if s-t > maxMatchOffset || uint32(x) != load3232(src, t) {
|
||||
cv = x >> 8
|
||||
s++
|
||||
break
|
||||
|
2
vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
2
vendor/github.com/klauspost/compress/flate/level2.go
generated
vendored
@ -126,7 +126,7 @@ func (e *fastEncL2) Encode(dst *tokens, src []byte) {
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
t := candidate.offset - e.cur
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
|
2
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
2
vendor/github.com/klauspost/compress/flate/level3.go
generated
vendored
@ -135,7 +135,7 @@ func (e *fastEncL3) Encode(dst *tokens, src []byte) {
|
||||
// Extend the 4-byte match as long as possible.
|
||||
//
|
||||
t := candidate.offset - e.cur
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
|
10
vendor/github.com/klauspost/compress/flate/level4.go
generated
vendored
10
vendor/github.com/klauspost/compress/flate/level4.go
generated
vendored
@ -98,19 +98,19 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
|
||||
e.bTable[nextHashL] = entry
|
||||
|
||||
t = lCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// We got a long match. Use that.
|
||||
break
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// Found a 4 match...
|
||||
lCandidate = e.bTable[hash7(next, tableBits)]
|
||||
|
||||
// If the next long is a candidate, check if we should use that instead...
|
||||
lOff := nextS - (lCandidate.offset - e.cur)
|
||||
if lOff < maxMatchOffset && load3232(src, lCandidate.offset-e.cur) == uint32(next) {
|
||||
lOff := lCandidate.offset - e.cur
|
||||
if nextS-lOff < maxMatchOffset && load3232(src, lOff) == uint32(next) {
|
||||
l1, l2 := matchLen(src[s+4:], src[t+4:]), matchLen(src[nextS+4:], src[nextS-lOff+4:])
|
||||
if l2 > l1 {
|
||||
s = nextS
|
||||
@ -127,7 +127,7 @@ func (e *fastEncL4) Encode(dst *tokens, src []byte) {
|
||||
// them as literal bytes.
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
l := e.matchlenLong(s+4, t+4, src) + 4
|
||||
l := e.matchlenLong(int(s+4), int(t+4), src) + 4
|
||||
|
||||
// Extend backwards
|
||||
for t > 0 && s > nextEmit && src[t-1] == src[s-1] {
|
||||
|
40
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
40
vendor/github.com/klauspost/compress/flate/level5.go
generated
vendored
@ -111,16 +111,16 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
|
||||
t = lCandidate.Cur.offset - e.cur
|
||||
if s-t < maxMatchOffset {
|
||||
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
|
||||
if uint32(cv) == load3232(src, t) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
t2 := lCandidate.Prev.offset - e.cur
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
ml1 := e.matchlen(s+4, t2+4, src) + 4
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
|
||||
l = e.matchlen(int(s+4), int(t+4), src) + 4
|
||||
ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
|
||||
if ml1 > l {
|
||||
t = t2
|
||||
l = ml1
|
||||
@ -130,7 +130,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
break
|
||||
}
|
||||
t = lCandidate.Prev.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
@ -140,9 +140,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// Found a 4 match...
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
l = e.matchlen(int(s+4), int(t+4), src) + 4
|
||||
lCandidate = e.bTable[nextHashL]
|
||||
// Store the next match
|
||||
|
||||
@ -153,8 +153,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
// If the next long is a candidate, use that...
|
||||
t2 := lCandidate.Cur.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset {
|
||||
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if load3232(src, t2) == uint32(next) {
|
||||
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
@ -164,8 +164,8 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
// If the previous long is a candidate, use that...
|
||||
t2 = lCandidate.Prev.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
|
||||
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
@ -185,9 +185,9 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
|
||||
if l == 0 {
|
||||
// Extend the 4-byte match as long as possible.
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
l = e.matchlenLong(int(s+4), int(t+4), src) + 4
|
||||
} else if l == maxMatchLength {
|
||||
l += e.matchlenLong(s+l, t+l, src)
|
||||
l += e.matchlenLong(int(s+l), int(t+l), src)
|
||||
}
|
||||
|
||||
// Try to locate a better match by checking the end of best match...
|
||||
@ -203,7 +203,7 @@ func (e *fastEncL5) Encode(dst *tokens, src []byte) {
|
||||
s2 := s + skipBeginning
|
||||
off := s2 - t2
|
||||
if t2 >= 0 && off < maxMatchOffset && off > 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
@ -423,14 +423,14 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
|
||||
|
||||
t = lCandidate.Cur.offset - e.cur
|
||||
if s-t < maxMatchOffset {
|
||||
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
|
||||
if uint32(cv) == load3232(src, t) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
eLong.Cur, eLong.Prev = tableEntry{offset: nextS + e.cur}, eLong.Cur
|
||||
|
||||
t2 := lCandidate.Prev.offset - e.cur
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
ml1 := e.matchlen(s+4, t2+4, src) + 4
|
||||
if ml1 > l {
|
||||
@ -442,7 +442,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
|
||||
break
|
||||
}
|
||||
t = lCandidate.Prev.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
@ -452,7 +452,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// Found a 4 match...
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
lCandidate = e.bTable[nextHashL]
|
||||
@ -465,7 +465,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
|
||||
// If the next long is a candidate, use that...
|
||||
t2 := lCandidate.Cur.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset {
|
||||
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
|
||||
if load3232(src, t2) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
@ -476,7 +476,7 @@ func (e *fastEncL5Window) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
// If the previous long is a candidate, use that...
|
||||
t2 = lCandidate.Prev.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
|
32
vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
32
vendor/github.com/klauspost/compress/flate/level6.go
generated
vendored
@ -113,7 +113,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
|
||||
t = lCandidate.Cur.offset - e.cur
|
||||
if s-t < maxMatchOffset {
|
||||
if uint32(cv) == load3232(src, lCandidate.Cur.offset-e.cur) {
|
||||
if uint32(cv) == load3232(src, t) {
|
||||
// Long candidate matches at least 4 bytes.
|
||||
|
||||
// Store the next match
|
||||
@ -123,9 +123,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
|
||||
// Check the previous long candidate as well.
|
||||
t2 := lCandidate.Prev.offset - e.cur
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
ml1 := e.matchlen(s+4, t2+4, src) + 4
|
||||
if s-t2 < maxMatchOffset && uint32(cv) == load3232(src, t2) {
|
||||
l = e.matchlen(int(s+4), int(t+4), src) + 4
|
||||
ml1 := e.matchlen(int(s+4), int(t2+4), src) + 4
|
||||
if ml1 > l {
|
||||
t = t2
|
||||
l = ml1
|
||||
@ -136,7 +136,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
// Current value did not match, but check if previous long value does.
|
||||
t = lCandidate.Prev.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, lCandidate.Prev.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// Store the next match
|
||||
e.table[nextHashS] = tableEntry{offset: nextS + e.cur}
|
||||
eLong := &e.bTable[nextHashL]
|
||||
@ -146,9 +146,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
|
||||
t = sCandidate.offset - e.cur
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, sCandidate.offset-e.cur) {
|
||||
if s-t < maxMatchOffset && uint32(cv) == load3232(src, t) {
|
||||
// Found a 4 match...
|
||||
l = e.matchlen(s+4, t+4, src) + 4
|
||||
l = e.matchlen(int(s+4), int(t+4), src) + 4
|
||||
|
||||
// Look up next long candidate (at nextS)
|
||||
lCandidate = e.bTable[nextHashL]
|
||||
@ -162,7 +162,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
const repOff = 1
|
||||
t2 := s - repeat + repOff
|
||||
if load3232(src, t2) == uint32(cv>>(8*repOff)) {
|
||||
ml := e.matchlen(s+4+repOff, t2+4, src) + 4
|
||||
ml := e.matchlen(int(s+4+repOff), int(t2+4), src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
l = ml
|
||||
@ -175,8 +175,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
// If the next long is a candidate, use that...
|
||||
t2 = lCandidate.Cur.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset {
|
||||
if load3232(src, lCandidate.Cur.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if load3232(src, t2) == uint32(next) {
|
||||
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
@ -186,8 +186,8 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
}
|
||||
// If the previous long is a candidate, use that...
|
||||
t2 = lCandidate.Prev.offset - e.cur
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, lCandidate.Prev.offset-e.cur) == uint32(next) {
|
||||
ml := e.matchlen(nextS+4, t2+4, src) + 4
|
||||
if nextS-t2 < maxMatchOffset && load3232(src, t2) == uint32(next) {
|
||||
ml := e.matchlen(int(nextS+4), int(t2+4), src) + 4
|
||||
if ml > l {
|
||||
t = t2
|
||||
s = nextS
|
||||
@ -207,9 +207,9 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
if l == 0 {
|
||||
l = e.matchlenLong(s+4, t+4, src) + 4
|
||||
l = e.matchlenLong(int(s+4), int(t+4), src) + 4
|
||||
} else if l == maxMatchLength {
|
||||
l += e.matchlenLong(s+l, t+l, src)
|
||||
l += e.matchlenLong(int(s+l), int(t+l), src)
|
||||
}
|
||||
|
||||
// Try to locate a better match by checking the end-of-match...
|
||||
@ -227,7 +227,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
off := s2 - t2
|
||||
if off < maxMatchOffset {
|
||||
if off > 0 && t2 >= 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
@ -237,7 +237,7 @@ func (e *fastEncL6) Encode(dst *tokens, src []byte) {
|
||||
t2 = eLong.Prev.offset - e.cur - l + skipBeginning
|
||||
off := s2 - t2
|
||||
if off > 0 && off < maxMatchOffset && t2 >= 0 {
|
||||
if l2 := e.matchlenLong(s2, t2, src); l2 > l {
|
||||
if l2 := e.matchlenLong(int(s2), int(t2), src); l2 > l {
|
||||
t = t2
|
||||
l = l2
|
||||
s = s2
|
||||
|
16
vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
generated
vendored
16
vendor/github.com/klauspost/compress/flate/matchlen_amd64.go
generated
vendored
@ -1,16 +0,0 @@
|
||||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
// matchLen returns how many bytes match in a and b
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// len(a) <= len(b) and len(a) > 0
|
||||
//
|
||||
//go:noescape
|
||||
func matchLen(a []byte, b []byte) int
|
66
vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
generated
vendored
66
vendor/github.com/klauspost/compress/flate/matchlen_amd64.s
generated
vendored
@ -1,66 +0,0 @@
|
||||
// Copied from S2 implementation.
|
||||
|
||||
//go:build !appengine && !noasm && gc && !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
|
||||
// func matchLen(a []byte, b []byte) int
|
||||
TEXT ·matchLen(SB), NOSPLIT, $0-56
|
||||
MOVQ a_base+0(FP), AX
|
||||
MOVQ b_base+24(FP), CX
|
||||
MOVQ a_len+8(FP), DX
|
||||
|
||||
// matchLen
|
||||
XORL SI, SI
|
||||
CMPL DX, $0x08
|
||||
JB matchlen_match4_standalone
|
||||
|
||||
matchlen_loopback_standalone:
|
||||
MOVQ (AX)(SI*1), BX
|
||||
XORQ (CX)(SI*1), BX
|
||||
JZ matchlen_loop_standalone
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
TZCNTQ BX, BX
|
||||
#else
|
||||
BSFQ BX, BX
|
||||
#endif
|
||||
SHRL $0x03, BX
|
||||
LEAL (SI)(BX*1), SI
|
||||
JMP gen_match_len_end
|
||||
|
||||
matchlen_loop_standalone:
|
||||
LEAL -8(DX), DX
|
||||
LEAL 8(SI), SI
|
||||
CMPL DX, $0x08
|
||||
JAE matchlen_loopback_standalone
|
||||
|
||||
matchlen_match4_standalone:
|
||||
CMPL DX, $0x04
|
||||
JB matchlen_match2_standalone
|
||||
MOVL (AX)(SI*1), BX
|
||||
CMPL (CX)(SI*1), BX
|
||||
JNE matchlen_match2_standalone
|
||||
LEAL -4(DX), DX
|
||||
LEAL 4(SI), SI
|
||||
|
||||
matchlen_match2_standalone:
|
||||
CMPL DX, $0x02
|
||||
JB matchlen_match1_standalone
|
||||
MOVW (AX)(SI*1), BX
|
||||
CMPW (CX)(SI*1), BX
|
||||
JNE matchlen_match1_standalone
|
||||
LEAL -2(DX), DX
|
||||
LEAL 2(SI), SI
|
||||
|
||||
matchlen_match1_standalone:
|
||||
CMPL DX, $0x01
|
||||
JB gen_match_len_end
|
||||
MOVB (AX)(SI*1), BL
|
||||
CMPB (CX)(SI*1), BL
|
||||
JNE gen_match_len_end
|
||||
INCL SI
|
||||
|
||||
gen_match_len_end:
|
||||
MOVQ SI, ret+48(FP)
|
||||
RET
|
15
vendor/github.com/klauspost/compress/flate/matchlen_generic.go
generated
vendored
15
vendor/github.com/klauspost/compress/flate/matchlen_generic.go
generated
vendored
@ -1,27 +1,29 @@
|
||||
//go:build !amd64 || appengine || !gc || noasm
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
||||
package flate
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// matchLen returns the maximum common prefix length of a and b.
|
||||
// a must be the shortest of the two.
|
||||
func matchLen(a, b []byte) (n int) {
|
||||
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||
left := len(a)
|
||||
for left >= 8 {
|
||||
diff := le.Load64(a, n) ^ le.Load64(b, n)
|
||||
if diff != 0 {
|
||||
return n + bits.TrailingZeros64(diff)>>3
|
||||
}
|
||||
n += 8
|
||||
left -= 8
|
||||
}
|
||||
|
||||
a = a[n:]
|
||||
b = b[n:]
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
break
|
||||
@ -29,5 +31,4 @@ func matchLen(a, b []byte) (n int) {
|
||||
n++
|
||||
}
|
||||
return n
|
||||
|
||||
}
|
||||
|
13
vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
13
vendor/github.com/klauspost/compress/flate/stateless.go
generated
vendored
@ -4,6 +4,8 @@ import (
|
||||
"io"
|
||||
"math"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -152,18 +154,11 @@ func hashSL(u uint32) uint32 {
|
||||
}
|
||||
|
||||
func load3216(b []byte, i int16) uint32 {
|
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:]
|
||||
b = b[:4]
|
||||
return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24
|
||||
return le.Load32(b, i)
|
||||
}
|
||||
|
||||
func load6416(b []byte, i int16) uint64 {
|
||||
// Help the compiler eliminate bounds checks on the read so it can be done in a single read.
|
||||
b = b[i:]
|
||||
b = b[:8]
|
||||
return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 |
|
||||
uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56
|
||||
return le.Load64(b, i)
|
||||
}
|
||||
|
||||
func statelessEnc(dst *tokens, src []byte, startAt int16) {
|
||||
|
25
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
25
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@ -6,10 +6,11 @@
|
||||
package huff0
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// peekBitsFast requires that at least one bit is requested every time.
|
||||
// peekByteFast requires that at least one byte is requested every time.
|
||||
// There are no checks if the buffer is filled.
|
||||
func (b *bitReaderBytes) peekByteFast() uint8 {
|
||||
got := uint8(b.value >> 56)
|
||||
@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() {
|
||||
}
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() {
|
||||
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReaderBytes) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
b.value = le.Load64(b.in, b.off-8)
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
if b.off >= 4 {
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() {
|
||||
return
|
||||
}
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() {
|
||||
|
||||
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReaderShifted) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
b.value = le.Load64(b.in, b.off-8)
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4 : b.off]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
low := le.Load32(b.in, b.off-4)
|
||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
|
5
vendor/github.com/klauspost/compress/internal/le/le.go
generated
vendored
Normal file
5
vendor/github.com/klauspost/compress/internal/le/le.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package le
|
||||
|
||||
type Indexer interface {
|
||||
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
|
||||
}
|
42
vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
generated
vendored
Normal file
42
vendor/github.com/klauspost/compress/internal/le/unsafe_disabled.go
generated
vendored
Normal file
@ -0,0 +1,42 @@
|
||||
//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
|
||||
|
||||
package le
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
)
|
||||
|
||||
// Load8 will load from b at index i.
|
||||
func Load8[I Indexer](b []byte, i I) byte {
|
||||
return b[i]
|
||||
}
|
||||
|
||||
// Load16 will load from b at index i.
|
||||
func Load16[I Indexer](b []byte, i I) uint16 {
|
||||
return binary.LittleEndian.Uint16(b[i:])
|
||||
}
|
||||
|
||||
// Load32 will load from b at index i.
|
||||
func Load32[I Indexer](b []byte, i I) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
}
|
||||
|
||||
// Load64 will load from b at index i.
|
||||
func Load64[I Indexer](b []byte, i I) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
}
|
||||
|
||||
// Store16 will store v at b.
|
||||
func Store16(b []byte, v uint16) {
|
||||
binary.LittleEndian.PutUint16(b, v)
|
||||
}
|
||||
|
||||
// Store32 will store v at b.
|
||||
func Store32(b []byte, v uint32) {
|
||||
binary.LittleEndian.PutUint32(b, v)
|
||||
}
|
||||
|
||||
// Store64 will store v at b.
|
||||
func Store64(b []byte, v uint64) {
|
||||
binary.LittleEndian.PutUint64(b, v)
|
||||
}
|
55
vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
generated
vendored
Normal file
55
vendor/github.com/klauspost/compress/internal/le/unsafe_enabled.go
generated
vendored
Normal file
@ -0,0 +1,55 @@
|
||||
// We enable 64 bit LE platforms:
|
||||
|
||||
//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
|
||||
|
||||
package le
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Load8 will load from b at index i.
|
||||
func Load8[I Indexer](b []byte, i I) byte {
|
||||
//return binary.LittleEndian.Uint16(b[i:])
|
||||
//return *(*uint16)(unsafe.Pointer(&b[i]))
|
||||
return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Load16 will load from b at index i.
|
||||
func Load16[I Indexer](b []byte, i I) uint16 {
|
||||
//return binary.LittleEndian.Uint16(b[i:])
|
||||
//return *(*uint16)(unsafe.Pointer(&b[i]))
|
||||
return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Load32 will load from b at index i.
|
||||
func Load32[I Indexer](b []byte, i I) uint32 {
|
||||
//return binary.LittleEndian.Uint32(b[i:])
|
||||
//return *(*uint32)(unsafe.Pointer(&b[i]))
|
||||
return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Load64 will load from b at index i.
|
||||
func Load64[I Indexer](b []byte, i I) uint64 {
|
||||
//return binary.LittleEndian.Uint64(b[i:])
|
||||
//return *(*uint64)(unsafe.Pointer(&b[i]))
|
||||
return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
|
||||
}
|
||||
|
||||
// Store16 will store v at b.
|
||||
func Store16(b []byte, v uint16) {
|
||||
//binary.LittleEndian.PutUint16(b, v)
|
||||
*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
||||
|
||||
// Store32 will store v at b.
|
||||
func Store32(b []byte, v uint32) {
|
||||
//binary.LittleEndian.PutUint32(b, v)
|
||||
*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
||||
|
||||
// Store64 will store v at b.
|
||||
func Store64(b []byte, v uint64) {
|
||||
//binary.LittleEndian.PutUint64(b, v)
|
||||
*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
|
||||
}
|
2
vendor/github.com/klauspost/compress/s2/README.md
generated
vendored
2
vendor/github.com/klauspost/compress/s2/README.md
generated
vendored
@ -79,7 +79,7 @@ This will take ownership of the buffer until the stream is closed.
|
||||
func EncodeStream(src []byte, dst io.Writer) error {
|
||||
enc := s2.NewWriter(dst)
|
||||
// The encoder owns the buffer until Flush or Close is called.
|
||||
err := enc.EncodeBuffer(buf)
|
||||
err := enc.EncodeBuffer(src)
|
||||
if err != nil {
|
||||
enc.Close()
|
||||
return err
|
||||
|
26
vendor/github.com/klauspost/compress/s2/decode_other.go
generated
vendored
26
vendor/github.com/klauspost/compress/s2/decode_other.go
generated
vendored
@ -11,6 +11,8 @@ package s2
|
||||
import (
|
||||
"fmt"
|
||||
"strconv"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// decode writes the decoding of src to dst. It assumes that the varint-encoded
|
||||
@ -38,21 +40,18 @@ func s2Decode(dst, src []byte) int {
|
||||
case x < 60:
|
||||
s++
|
||||
case x == 60:
|
||||
x = uint32(src[s+1])
|
||||
s += 2
|
||||
x = uint32(src[s-1])
|
||||
case x == 61:
|
||||
in := src[s : s+3]
|
||||
x = uint32(in[1]) | uint32(in[2])<<8
|
||||
x = uint32(le.Load16(src, s+1))
|
||||
s += 3
|
||||
case x == 62:
|
||||
in := src[s : s+4]
|
||||
// Load as 32 bit and shift down.
|
||||
x = uint32(in[0]) | uint32(in[1])<<8 | uint32(in[2])<<16 | uint32(in[3])<<24
|
||||
x = le.Load32(src, s)
|
||||
x >>= 8
|
||||
s += 4
|
||||
case x == 63:
|
||||
in := src[s : s+5]
|
||||
x = uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24
|
||||
x = le.Load32(src, s+1)
|
||||
s += 5
|
||||
}
|
||||
length = int(x) + 1
|
||||
@ -85,8 +84,7 @@ func s2Decode(dst, src []byte) int {
|
||||
length = int(src[s]) + 4
|
||||
s += 1
|
||||
case 6:
|
||||
in := src[s : s+2]
|
||||
length = int(uint32(in[0])|(uint32(in[1])<<8)) + (1 << 8)
|
||||
length = int(le.Load16(src, s)) + 1<<8
|
||||
s += 2
|
||||
case 7:
|
||||
in := src[s : s+3]
|
||||
@ -99,15 +97,13 @@ func s2Decode(dst, src []byte) int {
|
||||
}
|
||||
length += 4
|
||||
case tagCopy2:
|
||||
in := src[s : s+3]
|
||||
offset = int(uint32(in[1]) | uint32(in[2])<<8)
|
||||
length = 1 + int(in[0])>>2
|
||||
offset = int(le.Load16(src, s+1))
|
||||
length = 1 + int(src[s])>>2
|
||||
s += 3
|
||||
|
||||
case tagCopy4:
|
||||
in := src[s : s+5]
|
||||
offset = int(uint32(in[1]) | uint32(in[2])<<8 | uint32(in[3])<<16 | uint32(in[4])<<24)
|
||||
length = 1 + int(in[0])>>2
|
||||
offset = int(le.Load32(src, s+1))
|
||||
length = 1 + int(src[s])>>2
|
||||
s += 5
|
||||
}
|
||||
|
||||
|
422
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
422
vendor/github.com/klauspost/compress/s2/encode_all.go
generated
vendored
@ -10,14 +10,16 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
func load32(b []byte, i int) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[i:])
|
||||
return le.Load32(b, i)
|
||||
}
|
||||
|
||||
func load64(b []byte, i int) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[i:])
|
||||
return le.Load64(b, i)
|
||||
}
|
||||
|
||||
// hash6 returns the hash of the lowest 6 bytes of u to fit in a hash table with h bits.
|
||||
@ -44,7 +46,12 @@ func encodeGo(dst, src []byte) []byte {
|
||||
d += emitLiteral(dst[d:], src)
|
||||
return dst[:d]
|
||||
}
|
||||
n := encodeBlockGo(dst[d:], src)
|
||||
var n int
|
||||
if len(src) < 64<<10 {
|
||||
n = encodeBlockGo64K(dst[d:], src)
|
||||
} else {
|
||||
n = encodeBlockGo(dst[d:], src)
|
||||
}
|
||||
if n > 0 {
|
||||
d += n
|
||||
return dst[:d]
|
||||
@ -70,7 +77,6 @@ func encodeBlockGo(dst, src []byte) (d int) {
|
||||
|
||||
debug = false
|
||||
)
|
||||
|
||||
var table [maxTableSize]uint32
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
@ -277,13 +283,228 @@ emitRemainder:
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlockGo64K is a specialized version for compressing blocks <= 64KB
|
||||
func encodeBlockGo64K(dst, src []byte) (d int) {
|
||||
// Initialize the hash table.
|
||||
const (
|
||||
tableBits = 14
|
||||
maxTableSize = 1 << tableBits
|
||||
|
||||
debug = false
|
||||
)
|
||||
|
||||
var table [maxTableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 5
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||
repeat := 1
|
||||
|
||||
for {
|
||||
candidate := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS := s + (s-nextEmit)>>5 + 4
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hash0 := hash6(cv, tableBits)
|
||||
hash1 := hash6(cv>>8, tableBits)
|
||||
candidate = int(table[hash0])
|
||||
candidate2 := int(table[hash1])
|
||||
table[hash0] = uint16(s)
|
||||
table[hash1] = uint16(s + 1)
|
||||
hash2 := hash6(cv>>16, tableBits)
|
||||
|
||||
// Check repeat at offset checkRep.
|
||||
const checkRep = 1
|
||||
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||
base := s + checkRep
|
||||
// Extend back
|
||||
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||
i--
|
||||
base--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(base-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
|
||||
// Extend forward
|
||||
candidate := s - repeat + 4 + checkRep
|
||||
s += 4 + checkRep
|
||||
for s <= sLimit {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
if debug {
|
||||
// Validate match.
|
||||
if s <= candidate {
|
||||
panic("s <= candidate")
|
||||
}
|
||||
a := src[base:s]
|
||||
b := src[base-repeat : base-repeat+(s-base)]
|
||||
if !bytes.Equal(a, b) {
|
||||
panic("mismatch")
|
||||
}
|
||||
}
|
||||
if nextEmit > 0 {
|
||||
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||
d += emitRepeat(dst[d:], repeat, s-base)
|
||||
} else {
|
||||
// First match, cannot be repeat.
|
||||
d += emitCopy(dst[d:], repeat, s-base)
|
||||
}
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
if uint32(cv) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
candidate = int(table[hash2])
|
||||
if uint32(cv>>8) == load32(src, candidate2) {
|
||||
table[hash2] = uint16(s + 2)
|
||||
candidate = candidate2
|
||||
s++
|
||||
break
|
||||
}
|
||||
table[hash2] = uint16(s + 2)
|
||||
if uint32(cv>>16) == load32(src, candidate) {
|
||||
s += 2
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards.
|
||||
// The top bytes will be rechecked to get the full match.
|
||||
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||
candidate--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
repeat = base - candidate
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidate += 4
|
||||
for s <= len(src)-8 {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
|
||||
d += emitCopy(dst[d:], repeat, s-base)
|
||||
if debug {
|
||||
// Validate match.
|
||||
if s <= candidate {
|
||||
panic("s <= candidate")
|
||||
}
|
||||
a := src[base:s]
|
||||
b := src[base-repeat : base-repeat+(s-base)]
|
||||
if !bytes.Equal(a, b) {
|
||||
panic("mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Check for an immediate match, otherwise start search at s+1
|
||||
x := load64(src, s-2)
|
||||
m2Hash := hash6(x, tableBits)
|
||||
currHash := hash6(x>>16, tableBits)
|
||||
candidate = int(table[currHash])
|
||||
table[m2Hash] = uint16(s - 2)
|
||||
table[currHash] = uint16(s)
|
||||
if debug && s == candidate {
|
||||
panic("s == candidate")
|
||||
}
|
||||
if uint32(x>>16) != load32(src, candidate) {
|
||||
cv = load64(src, s+1)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
func encodeBlockSnappyGo(dst, src []byte) (d int) {
|
||||
// Initialize the hash table.
|
||||
const (
|
||||
tableBits = 14
|
||||
maxTableSize = 1 << tableBits
|
||||
)
|
||||
|
||||
var table [maxTableSize]uint32
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
@ -467,6 +688,197 @@ emitRemainder:
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlockSnappyGo64K is a special version of encodeBlockSnappyGo for sizes <64KB
|
||||
func encodeBlockSnappyGo64K(dst, src []byte) (d int) {
|
||||
// Initialize the hash table.
|
||||
const (
|
||||
tableBits = 14
|
||||
maxTableSize = 1 << tableBits
|
||||
)
|
||||
|
||||
var table [maxTableSize]uint16
|
||||
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 5
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We search for a repeat at -1, but don't output repeats when nextEmit == 0
|
||||
repeat := 1
|
||||
|
||||
for {
|
||||
candidate := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS := s + (s-nextEmit)>>5 + 4
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hash0 := hash6(cv, tableBits)
|
||||
hash1 := hash6(cv>>8, tableBits)
|
||||
candidate = int(table[hash0])
|
||||
candidate2 := int(table[hash1])
|
||||
table[hash0] = uint16(s)
|
||||
table[hash1] = uint16(s + 1)
|
||||
hash2 := hash6(cv>>16, tableBits)
|
||||
|
||||
// Check repeat at offset checkRep.
|
||||
const checkRep = 1
|
||||
if uint32(cv>>(checkRep*8)) == load32(src, s-repeat+checkRep) {
|
||||
base := s + checkRep
|
||||
// Extend back
|
||||
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||
i--
|
||||
base--
|
||||
}
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(base-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
|
||||
// Extend forward
|
||||
candidate := s - repeat + 4 + checkRep
|
||||
s += 4 + checkRep
|
||||
for s <= sLimit {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
|
||||
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
if uint32(cv) == load32(src, candidate) {
|
||||
break
|
||||
}
|
||||
candidate = int(table[hash2])
|
||||
if uint32(cv>>8) == load32(src, candidate2) {
|
||||
table[hash2] = uint16(s + 2)
|
||||
candidate = candidate2
|
||||
s++
|
||||
break
|
||||
}
|
||||
table[hash2] = uint16(s + 2)
|
||||
if uint32(cv>>16) == load32(src, candidate) {
|
||||
s += 2
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for candidate > 0 && s > nextEmit && src[candidate-1] == src[s-1] {
|
||||
candidate--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
// A 4-byte match has been found. We'll later see if more than 4 bytes
|
||||
// match. But, prior to the match, src[nextEmit:s] are unmatched. Emit
|
||||
// them as literal bytes.
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:s])
|
||||
|
||||
// Call emitCopy, and then see if another emitCopy could be our next
|
||||
// move. Repeat until we find no match for the input immediately after
|
||||
// what was consumed by the last emitCopy call.
|
||||
//
|
||||
// If we exit this loop normally then we need to call emitLiteral next,
|
||||
// though we don't yet know how big the literal will be. We handle that
|
||||
// by proceeding to the next iteration of the main loop. We also can
|
||||
// exit this loop via goto if we get close to exhausting the input.
|
||||
for {
|
||||
// Invariant: we have a 4-byte match at s, and no need to emit any
|
||||
// literal bytes prior to s.
|
||||
base := s
|
||||
repeat = base - candidate
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidate += 4
|
||||
for s <= len(src)-8 {
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
|
||||
d += emitCopyNoRepeat(dst[d:], repeat, s-base)
|
||||
if false {
|
||||
// Validate match.
|
||||
a := src[base:s]
|
||||
b := src[base-repeat : base-repeat+(s-base)]
|
||||
if !bytes.Equal(a, b) {
|
||||
panic("mismatch")
|
||||
}
|
||||
}
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
// Check for an immediate match, otherwise start search at s+1
|
||||
x := load64(src, s-2)
|
||||
m2Hash := hash6(x, tableBits)
|
||||
currHash := hash6(x>>16, tableBits)
|
||||
candidate = int(table[currHash])
|
||||
table[m2Hash] = uint16(s - 2)
|
||||
table[currHash] = uint16(s)
|
||||
if uint32(x>>16) != load32(src, candidate) {
|
||||
cv = load64(src, s+1)
|
||||
s++
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlockGo encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
|
416
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
416
vendor/github.com/klauspost/compress/s2/encode_better.go
generated
vendored
@ -348,12 +348,7 @@ func encodeBlockBetterSnappyGo(dst, src []byte) (d int) {
|
||||
nextS := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS = (s-nextEmit)>>7 + 1
|
||||
if nextS > maxSkip {
|
||||
nextS = s + maxSkip
|
||||
} else {
|
||||
nextS += s
|
||||
}
|
||||
nextS = min(s+(s-nextEmit)>>7+1, s+maxSkip)
|
||||
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
@ -483,6 +478,415 @@ emitRemainder:
|
||||
return d
|
||||
}
|
||||
|
||||
func encodeBlockBetterGo64K(dst, src []byte) (d int) {
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
// Initialize the hash tables.
|
||||
// Use smaller tables for smaller blocks
|
||||
const (
|
||||
// Long hash matches.
|
||||
lTableBits = 16
|
||||
maxLTableSize = 1 << lTableBits
|
||||
|
||||
// Short hash matches.
|
||||
sTableBits = 13
|
||||
maxSTableSize = 1 << sTableBits
|
||||
)
|
||||
|
||||
var lTable [maxLTableSize]uint16
|
||||
var sTable [maxSTableSize]uint16
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 6
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
// We initialize repeat to 0, so we never match on first attempt
|
||||
repeat := 0
|
||||
|
||||
for {
|
||||
candidateL := 0
|
||||
nextS := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS = s + (s-nextEmit)>>6 + 1
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hashL := hash7(cv, lTableBits)
|
||||
hashS := hash4(cv, sTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
candidateS := int(sTable[hashS])
|
||||
lTable[hashL] = uint16(s)
|
||||
sTable[hashS] = uint16(s)
|
||||
|
||||
valLong := load64(src, candidateL)
|
||||
valShort := load64(src, candidateS)
|
||||
|
||||
// If long matches at least 8 bytes, use that.
|
||||
if cv == valLong {
|
||||
break
|
||||
}
|
||||
if cv == valShort {
|
||||
candidateL = candidateS
|
||||
break
|
||||
}
|
||||
|
||||
// Check repeat at offset checkRep.
|
||||
const checkRep = 1
|
||||
// Minimum length of a repeat. Tested with various values.
|
||||
// While 4-5 offers improvements in some, 6 reduces
|
||||
// regressions significantly.
|
||||
const wantRepeatBytes = 6
|
||||
const repeatMask = ((1 << (wantRepeatBytes * 8)) - 1) << (8 * checkRep)
|
||||
if false && repeat > 0 && cv&repeatMask == load64(src, s-repeat)&repeatMask {
|
||||
base := s + checkRep
|
||||
// Extend back
|
||||
for i := base - repeat; base > nextEmit && i > 0 && src[i-1] == src[base-1]; {
|
||||
i--
|
||||
base--
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
|
||||
// Extend forward
|
||||
candidate := s - repeat + wantRepeatBytes + checkRep
|
||||
s += wantRepeatBytes + checkRep
|
||||
for s < len(src) {
|
||||
if len(src)-s < 8 {
|
||||
if src[s] == src[candidate] {
|
||||
s++
|
||||
candidate++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if diff := load64(src, s) ^ load64(src, candidate); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidate += 8
|
||||
}
|
||||
// same as `add := emitCopy(dst[d:], repeat, s-base)` but skips storing offset.
|
||||
d += emitRepeat(dst[d:], repeat, s-base)
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
// Index in-between
|
||||
index0 := base + 1
|
||||
index1 := s - 2
|
||||
|
||||
for index0 < index1 {
|
||||
cv0 := load64(src, index0)
|
||||
cv1 := load64(src, index1)
|
||||
lTable[hash7(cv0, lTableBits)] = uint16(index0)
|
||||
sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1)
|
||||
|
||||
lTable[hash7(cv1, lTableBits)] = uint16(index1)
|
||||
sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1)
|
||||
index0 += 2
|
||||
index1 -= 2
|
||||
}
|
||||
|
||||
cv = load64(src, s)
|
||||
continue
|
||||
}
|
||||
|
||||
// Long likely matches 7, so take that.
|
||||
if uint32(cv) == uint32(valLong) {
|
||||
break
|
||||
}
|
||||
|
||||
// Check our short candidate
|
||||
if uint32(cv) == uint32(valShort) {
|
||||
// Try a long candidate at s+1
|
||||
hashL = hash7(cv>>8, lTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
lTable[hashL] = uint16(s + 1)
|
||||
if uint32(cv>>8) == load32(src, candidateL) {
|
||||
s++
|
||||
break
|
||||
}
|
||||
// Use our short candidate.
|
||||
candidateL = candidateS
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
|
||||
candidateL--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
base := s
|
||||
offset := base - candidateL
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidateL += 4
|
||||
for s < len(src) {
|
||||
if len(src)-s < 8 {
|
||||
if src[s] == src[candidateL] {
|
||||
s++
|
||||
candidateL++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidateL += 8
|
||||
}
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
if repeat == offset {
|
||||
d += emitRepeat(dst[d:], offset, s-base)
|
||||
} else {
|
||||
d += emitCopy(dst[d:], offset, s-base)
|
||||
repeat = offset
|
||||
}
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
|
||||
// Index short & long
|
||||
index0 := base + 1
|
||||
index1 := s - 2
|
||||
|
||||
cv0 := load64(src, index0)
|
||||
cv1 := load64(src, index1)
|
||||
lTable[hash7(cv0, lTableBits)] = uint16(index0)
|
||||
sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1)
|
||||
|
||||
// lTable could be postponed, but very minor difference.
|
||||
lTable[hash7(cv1, lTableBits)] = uint16(index1)
|
||||
sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1)
|
||||
index0 += 1
|
||||
index1 -= 1
|
||||
cv = load64(src, s)
|
||||
|
||||
// Index large values sparsely in between.
|
||||
// We do two starting from different offsets for speed.
|
||||
index2 := (index0 + index1 + 1) >> 1
|
||||
for index2 < index1 {
|
||||
lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0)
|
||||
lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2)
|
||||
index0 += 2
|
||||
index2 += 2
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlockBetterSnappyGo encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
//
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlockBetterSnappyGo64K(dst, src []byte) (d int) {
|
||||
// sLimit is when to stop looking for offset/length copies. The inputMargin
|
||||
// lets us use a fast path for emitLiteral in the main loop, while we are
|
||||
// looking for copies.
|
||||
sLimit := len(src) - inputMargin
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
|
||||
// Initialize the hash tables.
|
||||
// Use smaller tables for smaller blocks
|
||||
const (
|
||||
// Long hash matches.
|
||||
lTableBits = 15
|
||||
maxLTableSize = 1 << lTableBits
|
||||
|
||||
// Short hash matches.
|
||||
sTableBits = 13
|
||||
maxSTableSize = 1 << sTableBits
|
||||
)
|
||||
|
||||
var lTable [maxLTableSize]uint16
|
||||
var sTable [maxSTableSize]uint16
|
||||
|
||||
// Bail if we can't compress to at least this.
|
||||
dstLimit := len(src) - len(src)>>5 - 6
|
||||
|
||||
// nextEmit is where in src the next emitLiteral should start from.
|
||||
nextEmit := 0
|
||||
|
||||
// The encoded form must start with a literal, as there are no previous
|
||||
// bytes to copy, so we start looking for hash matches at s == 1.
|
||||
s := 1
|
||||
cv := load64(src, s)
|
||||
|
||||
const maxSkip = 100
|
||||
|
||||
for {
|
||||
candidateL := 0
|
||||
nextS := 0
|
||||
for {
|
||||
// Next src position to check
|
||||
nextS = min(s+(s-nextEmit)>>6+1, s+maxSkip)
|
||||
|
||||
if nextS > sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
hashL := hash7(cv, lTableBits)
|
||||
hashS := hash4(cv, sTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
candidateS := int(sTable[hashS])
|
||||
lTable[hashL] = uint16(s)
|
||||
sTable[hashS] = uint16(s)
|
||||
|
||||
if uint32(cv) == load32(src, candidateL) {
|
||||
break
|
||||
}
|
||||
|
||||
// Check our short candidate
|
||||
if uint32(cv) == load32(src, candidateS) {
|
||||
// Try a long candidate at s+1
|
||||
hashL = hash7(cv>>8, lTableBits)
|
||||
candidateL = int(lTable[hashL])
|
||||
lTable[hashL] = uint16(s + 1)
|
||||
if uint32(cv>>8) == load32(src, candidateL) {
|
||||
s++
|
||||
break
|
||||
}
|
||||
// Use our short candidate.
|
||||
candidateL = candidateS
|
||||
break
|
||||
}
|
||||
|
||||
cv = load64(src, nextS)
|
||||
s = nextS
|
||||
}
|
||||
|
||||
// Extend backwards
|
||||
for candidateL > 0 && s > nextEmit && src[candidateL-1] == src[s-1] {
|
||||
candidateL--
|
||||
s--
|
||||
}
|
||||
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+(s-nextEmit) > dstLimit {
|
||||
return 0
|
||||
}
|
||||
|
||||
base := s
|
||||
offset := base - candidateL
|
||||
|
||||
// Extend the 4-byte match as long as possible.
|
||||
s += 4
|
||||
candidateL += 4
|
||||
for s < len(src) {
|
||||
if len(src)-s < 8 {
|
||||
if src[s] == src[candidateL] {
|
||||
s++
|
||||
candidateL++
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
if diff := load64(src, s) ^ load64(src, candidateL); diff != 0 {
|
||||
s += bits.TrailingZeros64(diff) >> 3
|
||||
break
|
||||
}
|
||||
s += 8
|
||||
candidateL += 8
|
||||
}
|
||||
|
||||
d += emitLiteral(dst[d:], src[nextEmit:base])
|
||||
d += emitCopyNoRepeat(dst[d:], offset, s-base)
|
||||
|
||||
nextEmit = s
|
||||
if s >= sLimit {
|
||||
goto emitRemainder
|
||||
}
|
||||
|
||||
if d > dstLimit {
|
||||
// Do we have space for more, if not bail.
|
||||
return 0
|
||||
}
|
||||
|
||||
// Index short & long
|
||||
index0 := base + 1
|
||||
index1 := s - 2
|
||||
|
||||
cv0 := load64(src, index0)
|
||||
cv1 := load64(src, index1)
|
||||
lTable[hash7(cv0, lTableBits)] = uint16(index0)
|
||||
sTable[hash4(cv0>>8, sTableBits)] = uint16(index0 + 1)
|
||||
|
||||
lTable[hash7(cv1, lTableBits)] = uint16(index1)
|
||||
sTable[hash4(cv1>>8, sTableBits)] = uint16(index1 + 1)
|
||||
index0 += 1
|
||||
index1 -= 1
|
||||
cv = load64(src, s)
|
||||
|
||||
// Index large values sparsely in between.
|
||||
// We do two starting from different offsets for speed.
|
||||
index2 := (index0 + index1 + 1) >> 1
|
||||
for index2 < index1 {
|
||||
lTable[hash7(load64(src, index0), lTableBits)] = uint16(index0)
|
||||
lTable[hash7(load64(src, index2), lTableBits)] = uint16(index2)
|
||||
index0 += 2
|
||||
index2 += 2
|
||||
}
|
||||
}
|
||||
|
||||
emitRemainder:
|
||||
if nextEmit < len(src) {
|
||||
// Bail if we exceed the maximum size.
|
||||
if d+len(src)-nextEmit > dstLimit {
|
||||
return 0
|
||||
}
|
||||
d += emitLiteral(dst[d:], src[nextEmit:])
|
||||
}
|
||||
return d
|
||||
}
|
||||
|
||||
// encodeBlockBetterDict encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||
// been written.
|
||||
|
12
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
12
vendor/github.com/klauspost/compress/s2/encode_go.go
generated
vendored
@ -21,6 +21,9 @@ func encodeBlock(dst, src []byte) (d int) {
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
if len(src) <= 64<<10 {
|
||||
return encodeBlockGo64K(dst, src)
|
||||
}
|
||||
return encodeBlockGo(dst, src)
|
||||
}
|
||||
|
||||
@ -32,6 +35,9 @@ func encodeBlock(dst, src []byte) (d int) {
|
||||
//
|
||||
// len(dst) >= MaxEncodedLen(len(src))
|
||||
func encodeBlockBetter(dst, src []byte) (d int) {
|
||||
if len(src) <= 64<<10 {
|
||||
return encodeBlockBetterGo64K(dst, src)
|
||||
}
|
||||
return encodeBlockBetterGo(dst, src)
|
||||
}
|
||||
|
||||
@ -43,6 +49,9 @@ func encodeBlockBetter(dst, src []byte) (d int) {
|
||||
//
|
||||
// len(dst) >= MaxEncodedLen(len(src))
|
||||
func encodeBlockBetterSnappy(dst, src []byte) (d int) {
|
||||
if len(src) <= 64<<10 {
|
||||
return encodeBlockBetterSnappyGo64K(dst, src)
|
||||
}
|
||||
return encodeBlockBetterSnappyGo(dst, src)
|
||||
}
|
||||
|
||||
@ -57,6 +66,9 @@ func encodeBlockSnappy(dst, src []byte) (d int) {
|
||||
if len(src) < minNonLiteralBlockSize {
|
||||
return 0
|
||||
}
|
||||
if len(src) <= 64<<10 {
|
||||
return encodeBlockSnappyGo64K(dst, src)
|
||||
}
|
||||
return encodeBlockSnappyGo(dst, src)
|
||||
}
|
||||
|
||||
|
3
vendor/github.com/klauspost/compress/s2sx.mod
generated
vendored
3
vendor/github.com/klauspost/compress/s2sx.mod
generated
vendored
@ -1,4 +1,3 @@
|
||||
module github.com/klauspost/compress
|
||||
|
||||
go 1.19
|
||||
|
||||
go 1.22
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee
|
||||
|
||||
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
|
||||
|
||||
This package is pure Go and without use of "unsafe".
|
||||
This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
|
||||
|
||||
The `zstd` package is provided as open source software using a Go standard license.
|
||||
|
||||
|
37
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
37
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
@ -5,11 +5,12 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
@ -18,6 +19,7 @@ import (
|
||||
type bitReader struct {
|
||||
in []byte
|
||||
value uint64 // Maybe use [16]byte, but shifting is awkward.
|
||||
cursor int // offset where next read should end
|
||||
bitsRead uint8
|
||||
}
|
||||
|
||||
@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error {
|
||||
if v == 0 {
|
||||
return errors.New("corrupt stream, did not find end of stream")
|
||||
}
|
||||
b.cursor = len(in)
|
||||
b.bitsRead = 64
|
||||
b.value = 0
|
||||
if len(in) >= 8 {
|
||||
@ -67,18 +70,15 @@ func (b *bitReader) fillFast() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.cursor -= 4
|
||||
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
|
||||
b.bitsRead -= 32
|
||||
}
|
||||
|
||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReader) fillFastStart() {
|
||||
v := b.in[len(b.in)-8:]
|
||||
b.in = b.in[:len(b.in)-8]
|
||||
b.value = binary.LittleEndian.Uint64(v)
|
||||
b.cursor -= 8
|
||||
b.value = le.Load64(b.in, b.cursor)
|
||||
b.bitsRead = 0
|
||||
}
|
||||
|
||||
@ -87,25 +87,23 @@ func (b *bitReader) fill() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if len(b.in) >= 4 {
|
||||
v := b.in[len(b.in)-4:]
|
||||
b.in = b.in[:len(b.in)-4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
if b.cursor >= 4 {
|
||||
b.cursor -= 4
|
||||
b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
|
||||
b.bitsRead -= 32
|
||||
return
|
||||
}
|
||||
|
||||
b.bitsRead -= uint8(8 * len(b.in))
|
||||
for len(b.in) > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1])
|
||||
b.in = b.in[:len(b.in)-1]
|
||||
b.bitsRead -= uint8(8 * b.cursor)
|
||||
for b.cursor > 0 {
|
||||
b.cursor -= 1
|
||||
b.value = (b.value << 8) | uint64(b.in[b.cursor])
|
||||
}
|
||||
}
|
||||
|
||||
// finished returns true if all bits have been read from the bit stream.
|
||||
func (b *bitReader) finished() bool {
|
||||
return len(b.in) == 0 && b.bitsRead >= 64
|
||||
return b.cursor == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
// overread returns true if more bits have been requested than is on the stream.
|
||||
@ -115,13 +113,14 @@ func (b *bitReader) overread() bool {
|
||||
|
||||
// remain returns the number of bits remaining.
|
||||
func (b *bitReader) remain() uint {
|
||||
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead)
|
||||
return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReader) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
b.cursor = 0
|
||||
if !b.finished() {
|
||||
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
|
||||
}
|
||||
|
19
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
19
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -5,14 +5,10 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"hash/crc32"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/huff0"
|
||||
@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
println("initializing sequences:", err)
|
||||
return err
|
||||
}
|
||||
// Extract blocks...
|
||||
if false && hist.dict == nil {
|
||||
fatalErr := func(err error) {
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
}
|
||||
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
|
||||
var buf bytes.Buffer
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
||||
buf.Write(in)
|
||||
os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
27
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
27
vendor/github.com/klauspost/compress/zstd/blockenc.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
"slices"
|
||||
|
||||
"github.com/klauspost/compress/huff0"
|
||||
)
|
||||
@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int {
|
||||
// All 0
|
||||
return 0
|
||||
}
|
||||
maxCount := func(a []uint32) int {
|
||||
var max uint32
|
||||
for _, v := range a {
|
||||
if v > max {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return int(max)
|
||||
}
|
||||
cnt := maxCount(hist[:maxSym])
|
||||
cnt := int(slices.Max(hist[:maxSym]))
|
||||
if cnt == len(data) {
|
||||
// RLE
|
||||
return 0
|
||||
@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() {
|
||||
}
|
||||
}
|
||||
}
|
||||
maxCount := func(a []uint32) int {
|
||||
var max uint32
|
||||
for _, v := range a {
|
||||
if v > max {
|
||||
max = v
|
||||
}
|
||||
}
|
||||
return int(max)
|
||||
}
|
||||
if debugAsserts && mlMax > maxMatchLengthSymbol {
|
||||
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
|
||||
}
|
||||
@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() {
|
||||
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
|
||||
}
|
||||
|
||||
b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1]))
|
||||
b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1]))
|
||||
b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1]))
|
||||
b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
|
||||
b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
|
||||
b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
|
||||
}
|
||||
|
3
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
||||
}
|
||||
|
||||
// Read bytes from the decompressed stream into p.
|
||||
// Returns the number of bytes written and any error that occurred.
|
||||
// Returns the number of bytes read and any error that occurred.
|
||||
// When the stream is done, io.EOF will be returned.
|
||||
func (d *Decoder) Read(p []byte) (int, error) {
|
||||
var n int
|
||||
@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
frame.bBuf = nil
|
||||
if frame.history.decoders.br != nil {
|
||||
frame.history.decoders.br.in = nil
|
||||
frame.history.decoders.br.cursor = 0
|
||||
}
|
||||
d.decoders <- block
|
||||
}()
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
||||
panic(err)
|
||||
}
|
||||
if t < 0 {
|
||||
err := fmt.Sprintf("s (%d) < 0", s)
|
||||
err := fmt.Sprintf("t (%d) < 0", t)
|
||||
panic(err)
|
||||
}
|
||||
if s-t > e.maxMatchOff {
|
||||
|
11
vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
generated
vendored
11
vendor/github.com/klauspost/compress/zstd/matchlen_generic.go
generated
vendored
@ -7,20 +7,25 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"math/bits"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// matchLen returns the maximum common prefix length of a and b.
|
||||
// a must be the shortest of the two.
|
||||
func matchLen(a, b []byte) (n int) {
|
||||
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] {
|
||||
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b)
|
||||
left := len(a)
|
||||
for left >= 8 {
|
||||
diff := le.Load64(a, n) ^ le.Load64(b, n)
|
||||
if diff != 0 {
|
||||
return n + bits.TrailingZeros64(diff)>>3
|
||||
}
|
||||
n += 8
|
||||
left -= 8
|
||||
}
|
||||
a = a[n:]
|
||||
b = b[n:]
|
||||
|
||||
for i := range a {
|
||||
if a[i] != b[i] {
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var ll, mo, ml int
|
||||
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||
if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
|
64
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
64
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
@ -7,9 +7,9 @@
|
||||
TEXT ·sequenceDecs_decode_amd64(SB), $8-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVBQZX 40(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
MOVQ 32(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
|
||||
MOVQ R13, 160(AX)
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 32(AX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
@ -335,9 +335,9 @@ error_overread:
|
||||
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVBQZX 40(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
MOVQ 32(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
|
||||
MOVQ R13, 160(AX)
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 32(AX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
@ -634,9 +634,9 @@ error_overread:
|
||||
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVBQZX 40(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
MOVQ 32(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
|
||||
MOVQ R12, 160(CX)
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 32(CX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
@ -920,9 +920,9 @@ error_overread:
|
||||
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVBQZX 40(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
MOVQ 32(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
|
||||
MOVQ R12, 160(CX)
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 32(CX)
|
||||
|
||||
// Return success
|
||||
MOVQ $0x00000000, ret+24(FP)
|
||||
@ -1787,9 +1787,9 @@ empty_seqs:
|
||||
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVBQZX 40(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
MOVQ 32(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
@ -2281,8 +2281,8 @@ handle_loop:
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 32(AX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
@ -2349,9 +2349,9 @@ error_not_enough_space:
|
||||
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVBQZX 40(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
MOVQ 32(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
@ -2801,8 +2801,8 @@ handle_loop:
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 32(CX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
@ -2869,9 +2869,9 @@ error_not_enough_space:
|
||||
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ 24(CX), DX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVBQZX 40(CX), BX
|
||||
MOVQ (CX), AX
|
||||
MOVQ 8(CX), SI
|
||||
MOVQ 32(CX), SI
|
||||
ADDQ SI, AX
|
||||
MOVQ AX, (SP)
|
||||
MOVQ ctx+16(FP), AX
|
||||
@ -3465,8 +3465,8 @@ handle_loop:
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), AX
|
||||
MOVQ DX, 24(AX)
|
||||
MOVB BL, 32(AX)
|
||||
MOVQ SI, 8(AX)
|
||||
MOVB BL, 40(AX)
|
||||
MOVQ SI, 32(AX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
@ -3533,9 +3533,9 @@ error_not_enough_space:
|
||||
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
|
||||
MOVQ br+8(FP), BX
|
||||
MOVQ 24(BX), AX
|
||||
MOVBQZX 32(BX), DX
|
||||
MOVBQZX 40(BX), DX
|
||||
MOVQ (BX), CX
|
||||
MOVQ 8(BX), BX
|
||||
MOVQ 32(BX), BX
|
||||
ADDQ BX, CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ ctx+16(FP), CX
|
||||
@ -4087,8 +4087,8 @@ handle_loop:
|
||||
loop_finished:
|
||||
MOVQ br+8(FP), CX
|
||||
MOVQ AX, 24(CX)
|
||||
MOVB DL, 32(CX)
|
||||
MOVQ BX, 8(CX)
|
||||
MOVB DL, 40(CX)
|
||||
MOVQ BX, 32(CX)
|
||||
|
||||
// Update the context
|
||||
MOVQ ctx+16(FP), AX
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
}
|
||||
for i := range seqs {
|
||||
var ll, mo, ml int
|
||||
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) {
|
||||
if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/seqenc.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/seqenc.go
generated
vendored
@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{
|
||||
func llCode(litLength uint32) uint8 {
|
||||
const llDeltaCode = 19
|
||||
if litLength <= 63 {
|
||||
// Compiler insists on bounds check (Go 1.12)
|
||||
return llCodeTable[litLength&63]
|
||||
}
|
||||
return uint8(highBit(litLength)) + llDeltaCode
|
||||
@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{
|
||||
func mlCode(mlBase uint32) uint8 {
|
||||
const mlDeltaCode = 36
|
||||
if mlBase <= 127 {
|
||||
// Compiler insists on bounds check (Go 1.12)
|
||||
return mlCodeTable[mlBase&127]
|
||||
}
|
||||
return uint8(highBit(mlBase)) + mlDeltaCode
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/snappy.go
generated
vendored
@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
||||
|
||||
n, r.err = w.Write(r.block.output)
|
||||
if r.err != nil {
|
||||
return written, err
|
||||
return written, r.err
|
||||
}
|
||||
written += int64(n)
|
||||
continue
|
||||
@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
|
||||
}
|
||||
n, r.err = w.Write(r.block.output)
|
||||
if r.err != nil {
|
||||
return written, err
|
||||
return written, r.err
|
||||
}
|
||||
written += int64(n)
|
||||
continue
|
||||
|
7
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -5,10 +5,11 @@ package zstd
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"github.com/klauspost/compress/internal/le"
|
||||
)
|
||||
|
||||
// enable debug printing
|
||||
@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) {
|
||||
}
|
||||
|
||||
func load3232(b []byte, i int32) uint32 {
|
||||
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:])
|
||||
return le.Load32(b, i)
|
||||
}
|
||||
|
||||
func load6432(b []byte, i int32) uint64 {
|
||||
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:])
|
||||
return le.Load64(b, i)
|
||||
}
|
||||
|
||||
type byter interface {
|
||||
|
37
vendor/github.com/nats-io/jwt/v2/account_claims.go
generated
vendored
37
vendor/github.com/nats-io/jwt/v2/account_claims.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2023 The NATS Authors
|
||||
* Copyright 2018-2024 The NATS Authors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
@ -133,7 +133,7 @@ func (o *OperatorLimits) Validate(vr *ValidationResults) {
|
||||
}
|
||||
}
|
||||
|
||||
// Mapping for publishes
|
||||
// WeightedMapping for publishes
|
||||
type WeightedMapping struct {
|
||||
Subject Subject `json:"subject"`
|
||||
Weight uint8 `json:"weight,omitempty"`
|
||||
@ -177,13 +177,13 @@ func (a *Account) AddMapping(sub Subject, to ...WeightedMapping) {
|
||||
a.Mappings[sub] = to
|
||||
}
|
||||
|
||||
// Enable external authorization for account users.
|
||||
// ExternalAuthorization enables external authorization for account users.
|
||||
// AuthUsers are those users specified to bypass the authorization callout and should be used for the authorization service itself.
|
||||
// AllowedAccounts specifies which accounts, if any, that the authorization service can bind an authorized user to.
|
||||
// The authorization response, a user JWT, will still need to be signed by the correct account.
|
||||
// If optional XKey is specified, that is the public xkey (x25519) and the server will encrypt the request such that only the
|
||||
// holder of the private key can decrypt. The auth service can also optionally encrypt the response back to the server using it's
|
||||
// publick xkey which will be in the authorization request.
|
||||
// public xkey which will be in the authorization request.
|
||||
type ExternalAuthorization struct {
|
||||
AuthUsers StringList `json:"auth_users,omitempty"`
|
||||
AllowedAccounts StringList `json:"allowed_accounts,omitempty"`
|
||||
@ -194,12 +194,12 @@ func (ac *ExternalAuthorization) IsEnabled() bool {
|
||||
return len(ac.AuthUsers) > 0
|
||||
}
|
||||
|
||||
// Helper function to determine if external authorization is enabled.
|
||||
// HasExternalAuthorization helper function to determine if external authorization is enabled.
|
||||
func (a *Account) HasExternalAuthorization() bool {
|
||||
return a.Authorization.IsEnabled()
|
||||
}
|
||||
|
||||
// Helper function to setup external authorization.
|
||||
// EnableExternalAuthorization helper function to setup external authorization.
|
||||
func (a *Account) EnableExternalAuthorization(users ...string) {
|
||||
a.Authorization.AuthUsers.Add(users...)
|
||||
}
|
||||
@ -230,6 +230,20 @@ func (ac *ExternalAuthorization) Validate(vr *ValidationResults) {
|
||||
}
|
||||
}
|
||||
|
||||
const (
|
||||
ClusterTrafficSystem = "system"
|
||||
ClusterTrafficOwner = "owner"
|
||||
)
|
||||
|
||||
type ClusterTraffic string
|
||||
|
||||
func (ct ClusterTraffic) Valid() error {
|
||||
if ct == "" || ct == ClusterTrafficSystem || ct == ClusterTrafficOwner {
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("unknown cluster traffic option: %q", ct)
|
||||
}
|
||||
|
||||
// Account holds account specific claims data
|
||||
type Account struct {
|
||||
Imports Imports `json:"imports,omitempty"`
|
||||
@ -241,6 +255,7 @@ type Account struct {
|
||||
Mappings Mapping `json:"mappings,omitempty"`
|
||||
Authorization ExternalAuthorization `json:"authorization,omitempty"`
|
||||
Trace *MsgTrace `json:"trace,omitempty"`
|
||||
ClusterTraffic ClusterTraffic `json:"cluster_traffic,omitempty"`
|
||||
Info
|
||||
GenericFields
|
||||
}
|
||||
@ -308,6 +323,10 @@ func (a *Account) Validate(acct *AccountClaims, vr *ValidationResults) {
|
||||
}
|
||||
a.SigningKeys.Validate(vr)
|
||||
a.Info.Validate(vr)
|
||||
|
||||
if err := a.ClusterTraffic.Valid(); err != nil {
|
||||
vr.AddError(err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// AccountClaims defines the body of an account JWT
|
||||
@ -338,13 +357,17 @@ func NewAccountClaims(subject string) *AccountClaims {
|
||||
|
||||
// Encode converts account claims into a JWT string
|
||||
func (a *AccountClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
return a.EncodeWithSigner(pair, nil)
|
||||
}
|
||||
|
||||
func (a *AccountClaims) EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error) {
|
||||
if !nkeys.IsValidPublicAccountKey(a.Subject) {
|
||||
return "", errors.New("expected subject to be account public key")
|
||||
}
|
||||
sort.Sort(a.Exports)
|
||||
sort.Sort(a.Imports)
|
||||
a.Type = AccountClaim
|
||||
return a.ClaimsData.encode(pair, a)
|
||||
return a.ClaimsData.encode(pair, a, fn)
|
||||
}
|
||||
|
||||
// DecodeAccountClaims decodes account claims from a JWT string
|
||||
|
8
vendor/github.com/nats-io/jwt/v2/activation_claims.go
generated
vendored
8
vendor/github.com/nats-io/jwt/v2/activation_claims.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018 The NATS Authors
|
||||
* Copyright 2018-2024 The NATS Authors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
@ -72,11 +72,15 @@ func NewActivationClaims(subject string) *ActivationClaims {
|
||||
|
||||
// Encode turns an activation claim into a JWT strimg
|
||||
func (a *ActivationClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
return a.EncodeWithSigner(pair, nil)
|
||||
}
|
||||
|
||||
func (a *ActivationClaims) EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error) {
|
||||
if !nkeys.IsValidPublicAccountKey(a.ClaimsData.Subject) {
|
||||
return "", errors.New("expected subject to be an account")
|
||||
}
|
||||
a.Type = ActivationClaim
|
||||
return a.ClaimsData.encode(pair, a)
|
||||
return a.ClaimsData.encode(pair, a, fn)
|
||||
}
|
||||
|
||||
// DecodeActivationClaims tries to create an activation claim from a JWT string
|
||||
|
16
vendor/github.com/nats-io/jwt/v2/authorization_claims.go
generated
vendored
16
vendor/github.com/nats-io/jwt/v2/authorization_claims.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2022 The NATS Authors
|
||||
* Copyright 2022-2024 The NATS Authors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
@ -113,8 +113,12 @@ func (ac *AuthorizationRequestClaims) Validate(vr *ValidationResults) {
|
||||
|
||||
// Encode tries to turn the auth request claims into a JWT string.
|
||||
func (ac *AuthorizationRequestClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
return ac.EncodeWithSigner(pair, nil)
|
||||
}
|
||||
|
||||
func (ac *AuthorizationRequestClaims) EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error) {
|
||||
ac.Type = AuthorizationRequestClaim
|
||||
return ac.ClaimsData.encode(pair, ac)
|
||||
return ac.ClaimsData.encode(pair, ac, fn)
|
||||
}
|
||||
|
||||
// DecodeAuthorizationRequestClaims tries to parse an auth request claims from a JWT string
|
||||
@ -242,6 +246,10 @@ func (ar *AuthorizationResponseClaims) Validate(vr *ValidationResults) {
|
||||
|
||||
// Encode tries to turn the auth request claims into a JWT string.
|
||||
func (ar *AuthorizationResponseClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
ar.Type = AuthorizationResponseClaim
|
||||
return ar.ClaimsData.encode(pair, ar)
|
||||
return ar.EncodeWithSigner(pair, nil)
|
||||
}
|
||||
|
||||
func (ar *AuthorizationResponseClaims) EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error) {
|
||||
ar.Type = AuthorizationResponseClaim
|
||||
return ar.ClaimsData.encode(pair, ar, fn)
|
||||
}
|
||||
|
32
vendor/github.com/nats-io/jwt/v2/claims.go
generated
vendored
32
vendor/github.com/nats-io/jwt/v2/claims.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2022 The NATS Authors
|
||||
* Copyright 2018-2024 The NATS Authors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
@ -68,10 +68,16 @@ func IsGenericClaimType(s string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// SignFn is used in an external sign environment. The function should be
|
||||
// able to locate the private key for the specified pub key specified and sign the
|
||||
// specified data returning the signature as generated.
|
||||
type SignFn func(pub string, data []byte) ([]byte, error)
|
||||
|
||||
// Claims is a JWT claims
|
||||
type Claims interface {
|
||||
Claims() *ClaimsData
|
||||
Encode(kp nkeys.KeyPair) (string, error)
|
||||
EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error)
|
||||
ExpectedPrefixes() []nkeys.PrefixByte
|
||||
Payload() interface{}
|
||||
String() string
|
||||
@ -121,7 +127,7 @@ func serialize(v interface{}) (string, error) {
|
||||
return encodeToString(j), nil
|
||||
}
|
||||
|
||||
func (c *ClaimsData) doEncode(header *Header, kp nkeys.KeyPair, claim Claims) (string, error) {
|
||||
func (c *ClaimsData) doEncode(header *Header, kp nkeys.KeyPair, claim Claims, fn SignFn) (string, error) {
|
||||
if header == nil {
|
||||
return "", errors.New("header is required")
|
||||
}
|
||||
@ -200,9 +206,21 @@ func (c *ClaimsData) doEncode(header *Header, kp nkeys.KeyPair, claim Claims) (s
|
||||
if header.Algorithm == AlgorithmNkeyOld {
|
||||
return "", errors.New(AlgorithmNkeyOld + " not supported to write jwtV2")
|
||||
} else if header.Algorithm == AlgorithmNkey {
|
||||
sig, err := kp.Sign([]byte(toSign))
|
||||
if err != nil {
|
||||
return "", err
|
||||
var sig []byte
|
||||
if fn != nil {
|
||||
pk, err := kp.PublicKey()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
sig, err = fn(pk, []byte(toSign))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
sig, err = kp.Sign([]byte(toSign))
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
eSig = encodeToString(sig)
|
||||
} else {
|
||||
@ -224,8 +242,8 @@ func (c *ClaimsData) hash() (string, error) {
|
||||
|
||||
// Encode encodes a claim into a JWT token. The claim is signed with the
|
||||
// provided nkey's private key
|
||||
func (c *ClaimsData) encode(kp nkeys.KeyPair, payload Claims) (string, error) {
|
||||
return c.doEncode(&Header{TokenTypeJwt, AlgorithmNkey}, kp, payload)
|
||||
func (c *ClaimsData) encode(kp nkeys.KeyPair, payload Claims, fn SignFn) (string, error) {
|
||||
return c.doEncode(&Header{TokenTypeJwt, AlgorithmNkey}, kp, payload, fn)
|
||||
}
|
||||
|
||||
// Returns a JSON representation of the claim
|
||||
|
4
vendor/github.com/nats-io/jwt/v2/exports.go
generated
vendored
4
vendor/github.com/nats-io/jwt/v2/exports.go
generated
vendored
@ -273,7 +273,7 @@ func isContainedIn(kind ExportType, subjects []Subject, vr *ValidationResults) {
|
||||
}
|
||||
|
||||
// Validate calls validate on all of the exports
|
||||
func (e *Exports) Validate(vr *ValidationResults) error {
|
||||
func (e *Exports) Validate(vr *ValidationResults) {
|
||||
var serviceSubjects []Subject
|
||||
var streamSubjects []Subject
|
||||
|
||||
@ -292,8 +292,6 @@ func (e *Exports) Validate(vr *ValidationResults) error {
|
||||
|
||||
isContainedIn(Service, serviceSubjects, vr)
|
||||
isContainedIn(Stream, streamSubjects, vr)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// HasExportContainingSubject checks if the export list has an export with the provided subject
|
||||
|
8
vendor/github.com/nats-io/jwt/v2/genericlaims.go
generated
vendored
8
vendor/github.com/nats-io/jwt/v2/genericlaims.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2020 The NATS Authors
|
||||
* Copyright 2018-2024 The NATS Authors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
@ -107,7 +107,11 @@ func (gc *GenericClaims) Payload() interface{} {
|
||||
|
||||
// Encode takes a generic claims and creates a JWT string
|
||||
func (gc *GenericClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
return gc.ClaimsData.encode(pair, gc)
|
||||
return gc.ClaimsData.encode(pair, gc, nil)
|
||||
}
|
||||
|
||||
func (gc *GenericClaims) EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error) {
|
||||
return gc.ClaimsData.encode(pair, gc, fn)
|
||||
}
|
||||
|
||||
// Validate checks the generic part of the claims data
|
||||
|
8
vendor/github.com/nats-io/jwt/v2/operator_claims.go
generated
vendored
8
vendor/github.com/nats-io/jwt/v2/operator_claims.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018 The NATS Authors
|
||||
* Copyright 2018-2024 The NATS Authors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
@ -191,6 +191,10 @@ func (oc *OperatorClaims) DidSign(op Claims) bool {
|
||||
|
||||
// Encode the claims into a JWT string
|
||||
func (oc *OperatorClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
return oc.EncodeWithSigner(pair, nil)
|
||||
}
|
||||
|
||||
func (oc *OperatorClaims) EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error) {
|
||||
if !nkeys.IsValidPublicOperatorKey(oc.Subject) {
|
||||
return "", errors.New("expected subject to be an operator public key")
|
||||
}
|
||||
@ -199,7 +203,7 @@ func (oc *OperatorClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
return "", err
|
||||
}
|
||||
oc.Type = OperatorClaim
|
||||
return oc.ClaimsData.encode(pair, oc)
|
||||
return oc.ClaimsData.encode(pair, oc, fn)
|
||||
}
|
||||
|
||||
func (oc *OperatorClaims) ClaimType() ClaimType {
|
||||
|
2
vendor/github.com/nats-io/jwt/v2/types.go
generated
vendored
2
vendor/github.com/nats-io/jwt/v2/types.go
generated
vendored
@ -309,7 +309,7 @@ func (l *Limits) Validate(vr *ValidationResults) {
|
||||
}
|
||||
}
|
||||
|
||||
if l.Times != nil && len(l.Times) > 0 {
|
||||
if len(l.Times) > 0 {
|
||||
for _, t := range l.Times {
|
||||
t.Validate(vr)
|
||||
}
|
||||
|
8
vendor/github.com/nats-io/jwt/v2/user_claims.go
generated
vendored
8
vendor/github.com/nats-io/jwt/v2/user_claims.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
/*
|
||||
* Copyright 2018-2019 The NATS Authors
|
||||
* Copyright 2018-2024 The NATS Authors
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
@ -92,11 +92,15 @@ func (u *UserClaims) HasEmptyPermissions() bool {
|
||||
|
||||
// Encode tries to turn the user claims into a JWT string
|
||||
func (u *UserClaims) Encode(pair nkeys.KeyPair) (string, error) {
|
||||
return u.EncodeWithSigner(pair, nil)
|
||||
}
|
||||
|
||||
func (u *UserClaims) EncodeWithSigner(pair nkeys.KeyPair, fn SignFn) (string, error) {
|
||||
if !nkeys.IsValidPublicUserKey(u.Subject) {
|
||||
return "", errors.New("expected subject to be user public key")
|
||||
}
|
||||
u.Type = UserClaim
|
||||
return u.ClaimsData.encode(pair, u)
|
||||
return u.ClaimsData.encode(pair, u, fn)
|
||||
}
|
||||
|
||||
// DecodeUserClaims tries to parse a user claims from a JWT string
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/conf/fuzz.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/conf/fuzz.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 The NATS Authors
|
||||
// Copyright 2020-2021 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/conf/parse.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/conf/parse.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2013-2018 The NATS Authors
|
||||
// Copyright 2013-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/logger/log.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/logger/log.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2019 The NATS Authors
|
||||
// Copyright 2012-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/logger/syslog.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/logger/syslog.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2019 The NATS Authors
|
||||
// Copyright 2012-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/logger/syslog_windows.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/logger/syslog_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2018 The NATS Authors
|
||||
// Copyright 2012-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
13
vendor/github.com/nats-io/nats-server/v2/server/accounts.go
generated
vendored
13
vendor/github.com/nats-io/nats-server/v2/server/accounts.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2018-2023 The NATS Authors
|
||||
// Copyright 2018-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -858,9 +858,14 @@ func (a *Account) Interest(subject string) int {
|
||||
func (a *Account) addClient(c *client) int {
|
||||
a.mu.Lock()
|
||||
n := len(a.clients)
|
||||
if a.clients != nil {
|
||||
a.clients[c] = struct{}{}
|
||||
|
||||
// Could come here earlier than the account is registered with the server.
|
||||
// Make sure we can still track clients.
|
||||
if a.clients == nil {
|
||||
a.clients = make(map[*client]struct{})
|
||||
}
|
||||
a.clients[c] = struct{}{}
|
||||
|
||||
// If we did not add it, we are done
|
||||
if n == len(a.clients) {
|
||||
a.mu.Unlock()
|
||||
@ -2021,7 +2026,7 @@ func (a *Account) addServiceImportSub(si *serviceImport) error {
|
||||
a.mu.Unlock()
|
||||
|
||||
cb := func(sub *subscription, c *client, acc *Account, subject, reply string, msg []byte) {
|
||||
c.processServiceImport(si, acc, msg)
|
||||
c.pa.delivered = c.processServiceImport(si, acc, msg)
|
||||
}
|
||||
sub, err := c.processSubEx([]byte(subject), nil, []byte(sid), cb, true, true, false)
|
||||
if err != nil {
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/auth_callout.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/auth_callout.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2022-2023 The NATS Authors
|
||||
// Copyright 2022-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/avl/seqset.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/avl/seqset.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 The NATS Authors
|
||||
// Copyright 2023-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
8
vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go
generated
vendored
8
vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2022-2023 The NATS Authors
|
||||
// Copyright 2022-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -46,11 +46,13 @@ type MatchByType int
|
||||
const (
|
||||
matchByIssuer MatchByType = iota + 1
|
||||
matchBySubject
|
||||
matchByThumbprint
|
||||
)
|
||||
|
||||
var MatchByMap = map[string]MatchByType{
|
||||
"issuer": matchByIssuer,
|
||||
"subject": matchBySubject,
|
||||
"issuer": matchByIssuer,
|
||||
"subject": matchBySubject,
|
||||
"thumbprint": matchByThumbprint,
|
||||
}
|
||||
|
||||
var Usage = `
|
||||
|
3
vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go
generated
vendored
3
vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_other.go
generated
vendored
@ -26,8 +26,7 @@ var _ = MATCHBYEMPTY
|
||||
// otherKey implements crypto.Signer and crypto.Decrypter to satisfy linter on platforms that don't implement certstore
|
||||
type otherKey struct{}
|
||||
|
||||
func TLSConfig(certStore StoreType, certMatchBy MatchByType, certMatch string, config *tls.Config) error {
|
||||
_, _, _, _ = certStore, certMatchBy, certMatch, config
|
||||
func TLSConfig(_ StoreType, _ MatchByType, _ string, _ []string, _ bool, _ *tls.Config) error {
|
||||
return ErrOSNotCompatCertStore
|
||||
}
|
||||
|
||||
|
215
vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_windows.go
generated
vendored
215
vendor/github.com/nats-io/nats-server/v2/server/certstore/certstore_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2022-2023 The NATS Authors
|
||||
// Copyright 2022-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -41,26 +41,26 @@ import (
|
||||
|
||||
const (
|
||||
// wincrypt.h constants
|
||||
winAcquireCached = 0x1 // CRYPT_ACQUIRE_CACHE_FLAG
|
||||
winAcquireSilent = 0x40 // CRYPT_ACQUIRE_SILENT_FLAG
|
||||
winAcquireOnlyNCryptKey = 0x40000 // CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG
|
||||
winEncodingX509ASN = 1 // X509_ASN_ENCODING
|
||||
winEncodingPKCS7 = 65536 // PKCS_7_ASN_ENCODING
|
||||
winCertStoreProvSystem = 10 // CERT_STORE_PROV_SYSTEM
|
||||
winCertStoreCurrentUser = uint32(winCertStoreCurrentUserID << winCompareShift) // CERT_SYSTEM_STORE_CURRENT_USER
|
||||
winCertStoreLocalMachine = uint32(winCertStoreLocalMachineID << winCompareShift) // CERT_SYSTEM_STORE_LOCAL_MACHINE
|
||||
winCertStoreCurrentUserID = 1 // CERT_SYSTEM_STORE_CURRENT_USER_ID
|
||||
winCertStoreLocalMachineID = 2 // CERT_SYSTEM_STORE_LOCAL_MACHINE_ID
|
||||
winInfoIssuerFlag = 4 // CERT_INFO_ISSUER_FLAG
|
||||
winInfoSubjectFlag = 7 // CERT_INFO_SUBJECT_FLAG
|
||||
winCompareNameStrW = 8 // CERT_COMPARE_NAME_STR_A
|
||||
winCompareShift = 16 // CERT_COMPARE_SHIFT
|
||||
winAcquireCached = windows.CRYPT_ACQUIRE_CACHE_FLAG
|
||||
winAcquireSilent = windows.CRYPT_ACQUIRE_SILENT_FLAG
|
||||
winAcquireOnlyNCryptKey = windows.CRYPT_ACQUIRE_ONLY_NCRYPT_KEY_FLAG
|
||||
winEncodingX509ASN = windows.X509_ASN_ENCODING
|
||||
winEncodingPKCS7 = windows.PKCS_7_ASN_ENCODING
|
||||
winCertStoreProvSystem = windows.CERT_STORE_PROV_SYSTEM
|
||||
winCertStoreCurrentUser = windows.CERT_SYSTEM_STORE_CURRENT_USER
|
||||
winCertStoreLocalMachine = windows.CERT_SYSTEM_STORE_LOCAL_MACHINE
|
||||
winCertStoreReadOnly = windows.CERT_STORE_READONLY_FLAG
|
||||
winInfoIssuerFlag = windows.CERT_INFO_ISSUER_FLAG
|
||||
winInfoSubjectFlag = windows.CERT_INFO_SUBJECT_FLAG
|
||||
winCompareNameStrW = windows.CERT_COMPARE_NAME_STR_W
|
||||
winCompareShift = windows.CERT_COMPARE_SHIFT
|
||||
|
||||
// Reference https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore
|
||||
winFindIssuerStr = winCompareNameStrW<<winCompareShift | winInfoIssuerFlag // CERT_FIND_ISSUER_STR_W
|
||||
winFindSubjectStr = winCompareNameStrW<<winCompareShift | winInfoSubjectFlag // CERT_FIND_SUBJECT_STR_W
|
||||
winFindIssuerStr = windows.CERT_FIND_ISSUER_STR_W
|
||||
winFindSubjectStr = windows.CERT_FIND_SUBJECT_STR_W
|
||||
winFindHashStr = windows.CERT_FIND_HASH_STR
|
||||
|
||||
winNcryptKeySpec = 0xFFFFFFFF // CERT_NCRYPT_KEY_SPEC
|
||||
winNcryptKeySpec = windows.CERT_NCRYPT_KEY_SPEC
|
||||
|
||||
winBCryptPadPKCS1 uintptr = 0x2
|
||||
winBCryptPadPSS uintptr = 0x8 // Modern TLS 1.2+
|
||||
@ -76,7 +76,7 @@ const (
|
||||
winECK3Magic = 0x334B4345 // "ECK3" BCRYPT_ECDH_PUBLIC_P384_MAGIC
|
||||
winECK5Magic = 0x354B4345 // "ECK5" BCRYPT_ECDH_PUBLIC_P521_MAGIC
|
||||
|
||||
winCryptENotFound = 0x80092004 // CRYPT_E_NOT_FOUND
|
||||
winCryptENotFound = windows.CRYPT_E_NOT_FOUND
|
||||
|
||||
providerMSSoftware = "Microsoft Software Key Storage Provider"
|
||||
)
|
||||
@ -111,14 +111,24 @@ var (
|
||||
crypto.SHA512: winWide("SHA512"), // BCRYPT_SHA512_ALGORITHM
|
||||
}
|
||||
|
||||
// MY is well-known system store on Windows that holds personal certificates
|
||||
winMyStore = winWide("MY")
|
||||
// MY is well-known system store on Windows that holds personal certificates. Read
|
||||
// More about the CA locations here:
|
||||
// https://learn.microsoft.com/en-us/dotnet/framework/configure-apps/file-schema/wcf/certificate-of-clientcertificate-element?redirectedfrom=MSDN
|
||||
// https://superuser.com/questions/217719/what-are-the-windows-system-certificate-stores
|
||||
// https://docs.microsoft.com/en-us/windows/win32/seccrypto/certificate-stores
|
||||
// https://learn.microsoft.com/en-us/windows/win32/seccrypto/system-store-locations
|
||||
// https://stackoverflow.com/questions/63286085/which-x509-storename-refers-to-the-certificates-stored-beneath-trusted-root-cert#:~:text=4-,StoreName.,is%20%22Intermediate%20Certification%20Authorities%22.
|
||||
winMyStore = winWide("MY")
|
||||
winIntermediateCAStore = winWide("CA")
|
||||
winRootStore = winWide("Root")
|
||||
winAuthRootStore = winWide("AuthRoot")
|
||||
|
||||
// These DLLs must be available on all Windows hosts
|
||||
winCrypt32 = windows.NewLazySystemDLL("crypt32.dll")
|
||||
winNCrypt = windows.NewLazySystemDLL("ncrypt.dll")
|
||||
|
||||
winCertFindCertificateInStore = winCrypt32.NewProc("CertFindCertificateInStore")
|
||||
winCertVerifyTimeValidity = winCrypt32.NewProc("CertVerifyTimeValidity")
|
||||
winCryptAcquireCertificatePrivateKey = winCrypt32.NewProc("CryptAcquireCertificatePrivateKey")
|
||||
winNCryptExportKey = winNCrypt.NewProc("NCryptExportKey")
|
||||
winNCryptOpenStorageProvider = winNCrypt.NewProc("NCryptOpenStorageProvider")
|
||||
@ -156,9 +166,40 @@ type winPSSPaddingInfo struct {
|
||||
cbSalt uint32
|
||||
}
|
||||
|
||||
// TLSConfig fulfills the same function as reading cert and key pair from pem files but
|
||||
// sources the Windows certificate store instead
|
||||
func TLSConfig(certStore StoreType, certMatchBy MatchByType, certMatch string, config *tls.Config) error {
|
||||
// createCACertsPool generates a CertPool from the Windows certificate store,
|
||||
// adding all matching certificates from the caCertsMatch array to the pool.
|
||||
// All matching certificates (vs first) are added to the pool based on a user
|
||||
// request. If no certificates are found an error is returned.
|
||||
func createCACertsPool(cs *winCertStore, storeType uint32, caCertsMatch []string, skipInvalid bool) (*x509.CertPool, error) {
|
||||
var errs []error
|
||||
caPool := x509.NewCertPool()
|
||||
for _, s := range caCertsMatch {
|
||||
lfs, err := cs.caCertsBySubjectMatch(s, storeType, skipInvalid)
|
||||
if err != nil {
|
||||
errs = append(errs, err)
|
||||
} else {
|
||||
for _, lf := range lfs {
|
||||
caPool.AddCert(lf)
|
||||
}
|
||||
}
|
||||
}
|
||||
// If every lookup failed return the errors.
|
||||
if len(errs) == len(caCertsMatch) {
|
||||
return nil, fmt.Errorf("unable to match any CA certificate: %v", errs)
|
||||
}
|
||||
return caPool, nil
|
||||
}
|
||||
|
||||
// TLSConfig fulfills the same function as reading cert and key pair from
|
||||
// pem files but sources the Windows certificate store instead. The
|
||||
// certMatchBy and certMatch fields search the "MY" certificate location
|
||||
// for the first certificate that matches the certMatch field. The
|
||||
// caCertsMatch field is used to search the Trusted Root, Third Party Root,
|
||||
// and Intermediate Certificate Authority locations for certificates with
|
||||
// Subjects matching the provided strings. If a match is found, the
|
||||
// certificate is added to the pool that is used to verify the certificate
|
||||
// chain.
|
||||
func TLSConfig(certStore StoreType, certMatchBy MatchByType, certMatch string, caCertsMatch []string, skipInvalid bool, config *tls.Config) error {
|
||||
var (
|
||||
leaf *x509.Certificate
|
||||
leafCtx *windows.CertContext
|
||||
@ -185,9 +226,11 @@ func TLSConfig(certStore StoreType, certMatchBy MatchByType, certMatch string, c
|
||||
|
||||
// certByIssuer or certBySubject
|
||||
if certMatchBy == matchBySubject || certMatchBy == MATCHBYEMPTY {
|
||||
leaf, leafCtx, err = cs.certBySubject(certMatch, scope)
|
||||
leaf, leafCtx, err = cs.certBySubject(certMatch, scope, skipInvalid)
|
||||
} else if certMatchBy == matchByIssuer {
|
||||
leaf, leafCtx, err = cs.certByIssuer(certMatch, scope)
|
||||
leaf, leafCtx, err = cs.certByIssuer(certMatch, scope, skipInvalid)
|
||||
} else if certMatchBy == matchByThumbprint {
|
||||
leaf, leafCtx, err = cs.certByThumbprint(certMatch, scope, skipInvalid)
|
||||
} else {
|
||||
return ErrBadMatchByType
|
||||
}
|
||||
@ -205,6 +248,14 @@ func TLSConfig(certStore StoreType, certMatchBy MatchByType, certMatch string, c
|
||||
if pk == nil {
|
||||
return ErrNoPrivateKeyStoreRef
|
||||
}
|
||||
// Look for CA Certificates
|
||||
if len(caCertsMatch) != 0 {
|
||||
caPool, err := createCACertsPool(cs, scope, caCertsMatch, skipInvalid)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
config.ClientCAs = caPool
|
||||
}
|
||||
} else {
|
||||
return ErrBadCertStore
|
||||
}
|
||||
@ -278,7 +329,7 @@ func winFindCert(store windows.Handle, enc, findFlags, findType uint32, para *ui
|
||||
)
|
||||
if h == 0 {
|
||||
// Actual error, or simply not found?
|
||||
if errno, ok := err.(syscall.Errno); ok && errno == winCryptENotFound {
|
||||
if errno, ok := err.(syscall.Errno); ok && errno == syscall.Errno(winCryptENotFound) {
|
||||
return nil, ErrFailedCertSearch
|
||||
}
|
||||
return nil, ErrFailedCertSearch
|
||||
@ -287,6 +338,16 @@ func winFindCert(store windows.Handle, enc, findFlags, findType uint32, para *ui
|
||||
return (*windows.CertContext)(unsafe.Pointer(h)), nil
|
||||
}
|
||||
|
||||
// winVerifyCertValid wraps the CertVerifyTimeValidity and simply returns true if the certificate is valid
|
||||
func winVerifyCertValid(timeToVerify *windows.Filetime, certInfo *windows.CertInfo) bool {
|
||||
// this function does not document returning errors / setting lasterror
|
||||
r, _, _ := winCertVerifyTimeValidity.Call(
|
||||
uintptr(unsafe.Pointer(timeToVerify)),
|
||||
uintptr(unsafe.Pointer(certInfo)),
|
||||
)
|
||||
return r == 0
|
||||
}
|
||||
|
||||
// winCertStore is a store implementation for the Windows Certificate Store
|
||||
type winCertStore struct {
|
||||
Prov uintptr
|
||||
@ -326,21 +387,70 @@ func winCertContextToX509(ctx *windows.CertContext) (*x509.Certificate, error) {
|
||||
// CertContext pointer returned allows subsequent key operations like Sign. Caller specifies
|
||||
// current user's personal certs or local machine's personal certs using storeType.
|
||||
// See CERT_FIND_ISSUER_STR description at https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore
|
||||
func (w *winCertStore) certByIssuer(issuer string, storeType uint32) (*x509.Certificate, *windows.CertContext, error) {
|
||||
return w.certSearch(winFindIssuerStr, issuer, winMyStore, storeType)
|
||||
func (w *winCertStore) certByIssuer(issuer string, storeType uint32, skipInvalid bool) (*x509.Certificate, *windows.CertContext, error) {
|
||||
return w.certSearch(winFindIssuerStr, issuer, winMyStore, storeType, skipInvalid)
|
||||
}
|
||||
|
||||
// certBySubject matches and returns the first certificate found by passed subject field.
|
||||
// CertContext pointer returned allows subsequent key operations like Sign. Caller specifies
|
||||
// current user's personal certs or local machine's personal certs using storeType.
|
||||
// See CERT_FIND_SUBJECT_STR description at https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore
|
||||
func (w *winCertStore) certBySubject(subject string, storeType uint32) (*x509.Certificate, *windows.CertContext, error) {
|
||||
return w.certSearch(winFindSubjectStr, subject, winMyStore, storeType)
|
||||
func (w *winCertStore) certBySubject(subject string, storeType uint32, skipInvalid bool) (*x509.Certificate, *windows.CertContext, error) {
|
||||
return w.certSearch(winFindSubjectStr, subject, winMyStore, storeType, skipInvalid)
|
||||
}
|
||||
|
||||
// certByThumbprint matches and returns the first certificate found by passed SHA1 thumbprint.
|
||||
// CertContext pointer returned allows subsequent key operations like Sign. Caller specifies
|
||||
// current user's personal certs or local machine's personal certs using storeType.
|
||||
// See CERT_FIND_SUBJECT_STR description at https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore
|
||||
func (w *winCertStore) certByThumbprint(hash string, storeType uint32, skipInvalid bool) (*x509.Certificate, *windows.CertContext, error) {
|
||||
return w.certSearch(winFindHashStr, hash, winMyStore, storeType, skipInvalid)
|
||||
}
|
||||
|
||||
// caCertsBySubjectMatch matches and returns all matching certificates of the subject field.
|
||||
//
|
||||
// The following locations are searched:
|
||||
// 1) Root (Trusted Root Certification Authorities)
|
||||
// 2) AuthRoot (Third-Party Root Certification Authorities)
|
||||
// 3) CA (Intermediate Certification Authorities)
|
||||
//
|
||||
// Caller specifies current user's personal certs or local machine's personal certs using storeType.
|
||||
// See CERT_FIND_SUBJECT_STR description at https://learn.microsoft.com/en-us/windows/win32/api/wincrypt/nf-wincrypt-certfindcertificateinstore
|
||||
func (w *winCertStore) caCertsBySubjectMatch(subject string, storeType uint32, skipInvalid bool) ([]*x509.Certificate, error) {
|
||||
var (
|
||||
leaf *x509.Certificate
|
||||
searchLocations = [3]*uint16{winRootStore, winAuthRootStore, winIntermediateCAStore}
|
||||
rv []*x509.Certificate
|
||||
)
|
||||
// surprisingly, an empty string returns a result. We'll treat this as an error.
|
||||
if subject == "" {
|
||||
return nil, ErrBadCaCertMatchField
|
||||
}
|
||||
for _, sr := range searchLocations {
|
||||
var err error
|
||||
if leaf, _, err = w.certSearch(winFindSubjectStr, subject, sr, storeType, skipInvalid); err == nil {
|
||||
rv = append(rv, leaf)
|
||||
} else {
|
||||
// Ignore the failed search from a single location. Errors we catch include
|
||||
// ErrFailedX509Extract (resulting from a malformed certificate) and errors
|
||||
// around invalid attributes, unsupported algorithms, etc. These are corner
|
||||
// cases as certificates with these errors shouldn't have been allowed
|
||||
// to be added to the store in the first place.
|
||||
if err != ErrFailedCertSearch {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
// Not found anywhere
|
||||
if len(rv) == 0 {
|
||||
return nil, ErrFailedCertSearch
|
||||
}
|
||||
return rv, nil
|
||||
}
|
||||
|
||||
// certSearch is a helper function to lookup certificates based on search type and match value.
|
||||
// store is used to specify which store to perform the lookup in (system or user).
|
||||
func (w *winCertStore) certSearch(searchType uint32, matchValue string, searchRoot *uint16, store uint32) (*x509.Certificate, *windows.CertContext, error) {
|
||||
func (w *winCertStore) certSearch(searchType uint32, matchValue string, searchRoot *uint16, store uint32, skipInvalid bool) (*x509.Certificate, *windows.CertContext, error) {
|
||||
// store handle to "MY" store
|
||||
h, err := w.storeHandle(store, searchRoot)
|
||||
if err != nil {
|
||||
@ -357,23 +467,32 @@ func (w *winCertStore) certSearch(searchType uint32, matchValue string, searchRo
|
||||
|
||||
// pass 0 as the third parameter because it is not used
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa376064(v=vs.85).aspx
|
||||
nc, err := winFindCert(h, winEncodingX509ASN|winEncodingPKCS7, 0, searchType, i, prev)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if nc != nil {
|
||||
// certificate found
|
||||
prev = nc
|
||||
|
||||
// Extract the DER-encoded certificate from the cert context
|
||||
xc, err := winCertContextToX509(nc)
|
||||
if err == nil {
|
||||
cert = xc
|
||||
} else {
|
||||
return nil, nil, ErrFailedX509Extract
|
||||
for {
|
||||
nc, err := winFindCert(h, winEncodingX509ASN|winEncodingPKCS7, 0, searchType, i, prev)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
if nc != nil {
|
||||
// certificate found
|
||||
prev = nc
|
||||
|
||||
var now *windows.Filetime
|
||||
if skipInvalid && !winVerifyCertValid(now, nc.CertInfo) {
|
||||
continue
|
||||
}
|
||||
|
||||
// Extract the DER-encoded certificate from the cert context
|
||||
xc, err := winCertContextToX509(nc)
|
||||
if err == nil {
|
||||
cert = xc
|
||||
break
|
||||
} else {
|
||||
return nil, nil, ErrFailedX509Extract
|
||||
}
|
||||
} else {
|
||||
return nil, nil, ErrFailedCertSearch
|
||||
}
|
||||
} else {
|
||||
return nil, nil, ErrFailedCertSearch
|
||||
}
|
||||
|
||||
if cert == nil {
|
||||
@ -396,7 +515,7 @@ func winNewStoreHandle(provider uint32, store *uint16) (*winStoreHandle, error)
|
||||
winCertStoreProvSystem,
|
||||
0,
|
||||
0,
|
||||
provider,
|
||||
provider|winCertStoreReadOnly,
|
||||
uintptr(unsafe.Pointer(store)))
|
||||
if err != nil {
|
||||
return nil, ErrBadCryptoStoreProvider
|
||||
|
6
vendor/github.com/nats-io/nats-server/v2/server/certstore/errors.go
generated
vendored
6
vendor/github.com/nats-io/nats-server/v2/server/certstore/errors.go
generated
vendored
@ -68,6 +68,12 @@ var (
|
||||
// ErrBadCertMatchField represents malformed cert_match option
|
||||
ErrBadCertMatchField = errors.New("expected 'cert_match' to be a valid non-empty string")
|
||||
|
||||
// ErrBadCaCertMatchField represents malformed cert_match option
|
||||
ErrBadCaCertMatchField = errors.New("expected 'ca_certs_match' to be a valid non-empty string array")
|
||||
|
||||
// ErrBadCertMatchSkipInvalidField represents malformed cert_match_skip_invalid option
|
||||
ErrBadCertMatchSkipInvalidField = errors.New("expected 'cert_match_skip_invalid' to be a boolean")
|
||||
|
||||
// ErrOSNotCompatCertStore represents cert_store passed that exists but is not valid on current OS
|
||||
ErrOSNotCompatCertStore = errors.New("cert_store not compatible with current operating system")
|
||||
)
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/ciphersuites.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/ciphersuites.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2016-2018 The NATS Authors
|
||||
// Copyright 2016-2020 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
230
vendor/github.com/nats-io/nats-server/v2/server/client.go
generated
vendored
230
vendor/github.com/nats-io/nats-server/v2/server/client.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2023 The NATS Authors
|
||||
// Copyright 2012-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -113,8 +113,9 @@ const (
|
||||
maxNoRTTPingBeforeFirstPong = 2 * time.Second
|
||||
|
||||
// For stalling fast producers
|
||||
stallClientMinDuration = 100 * time.Millisecond
|
||||
stallClientMaxDuration = time.Second
|
||||
stallClientMinDuration = 2 * time.Millisecond
|
||||
stallClientMaxDuration = 5 * time.Millisecond
|
||||
stallTotalAllowed = 10 * time.Millisecond
|
||||
)
|
||||
|
||||
var readLoopReportThreshold = readLoopReport
|
||||
@ -261,6 +262,9 @@ type client struct {
|
||||
last time.Time
|
||||
lastIn time.Time
|
||||
|
||||
repliesSincePrune uint16
|
||||
lastReplyPrune time.Time
|
||||
|
||||
headers bool
|
||||
|
||||
rtt time.Duration
|
||||
@ -420,6 +424,7 @@ const (
|
||||
pruneSize = 32
|
||||
routeTargetInit = 8
|
||||
replyPermLimit = 4096
|
||||
replyPruneTime = time.Second
|
||||
)
|
||||
|
||||
// Represent read cache booleans with a bitmask
|
||||
@ -458,6 +463,9 @@ type readCache struct {
|
||||
|
||||
// Capture the time we started processing our readLoop.
|
||||
start time.Time
|
||||
|
||||
// Total time stalled so far for readLoop processing.
|
||||
tst time.Duration
|
||||
}
|
||||
|
||||
// set the flag (would be equivalent to set the boolean to true)
|
||||
@ -1410,6 +1418,11 @@ func (c *client) readLoop(pre []byte) {
|
||||
}
|
||||
return
|
||||
}
|
||||
// Clear total stalled time here.
|
||||
if c.in.tst >= stallClientMaxDuration {
|
||||
c.rateLimitFormatWarnf("Producer was stalled for a total of %v", c.in.tst.Round(time.Millisecond))
|
||||
}
|
||||
c.in.tst = 0
|
||||
}
|
||||
|
||||
// If we are a ROUTER/LEAF and have processed an INFO, it is possible that
|
||||
@ -1636,8 +1649,10 @@ func (c *client) flushOutbound() bool {
|
||||
}
|
||||
consumed := len(wnb)
|
||||
|
||||
// Actual write to the socket.
|
||||
nc.SetWriteDeadline(start.Add(wdl))
|
||||
// Actual write to the socket. The deadline applies to each batch
|
||||
// rather than the total write, such that the configured deadline
|
||||
// can be tuned to a known maximum quantity (64MB).
|
||||
nc.SetWriteDeadline(time.Now().Add(wdl))
|
||||
wn, err = wnb.WriteTo(nc)
|
||||
nc.SetWriteDeadline(time.Time{})
|
||||
|
||||
@ -1724,7 +1739,7 @@ func (c *client) flushOutbound() bool {
|
||||
|
||||
// Check if we have a stalled gate and if so and we are recovering release
|
||||
// any stalled producers. Only kind==CLIENT will stall.
|
||||
if c.out.stc != nil && (n == attempted || c.out.pb < c.out.mp/2) {
|
||||
if c.out.stc != nil && (n == attempted || c.out.pb < c.out.mp/4*3) {
|
||||
close(c.out.stc)
|
||||
c.out.stc = nil
|
||||
}
|
||||
@ -2286,7 +2301,8 @@ func (c *client) queueOutbound(data []byte) {
|
||||
// Check here if we should create a stall channel if we are falling behind.
|
||||
// We do this here since if we wait for consumer's writeLoop it could be
|
||||
// too late with large number of fan in producers.
|
||||
if c.out.pb > c.out.mp/2 && c.out.stc == nil {
|
||||
// If the outbound connection is > 75% of maximum pending allowed, create a stall gate.
|
||||
if c.out.pb > c.out.mp/4*3 && c.out.stc == nil {
|
||||
c.out.stc = make(chan struct{})
|
||||
}
|
||||
}
|
||||
@ -3331,31 +3347,37 @@ func (c *client) msgHeader(subj, reply []byte, sub *subscription) []byte {
|
||||
}
|
||||
|
||||
func (c *client) stalledWait(producer *client) {
|
||||
// Check to see if we have exceeded our total wait time per readLoop invocation.
|
||||
if producer.in.tst > stallTotalAllowed {
|
||||
return
|
||||
}
|
||||
|
||||
// Grab stall channel which the slow consumer will close when caught up.
|
||||
stall := c.out.stc
|
||||
ttl := stallDuration(c.out.pb, c.out.mp)
|
||||
|
||||
// Calculate stall time.
|
||||
ttl := stallClientMinDuration
|
||||
if c.out.pb >= c.out.mp {
|
||||
ttl = stallClientMaxDuration
|
||||
}
|
||||
|
||||
c.mu.Unlock()
|
||||
defer c.mu.Lock()
|
||||
|
||||
// Now check if we are close to total allowed.
|
||||
if producer.in.tst+ttl > stallTotalAllowed {
|
||||
ttl = stallTotalAllowed - producer.in.tst
|
||||
}
|
||||
delay := time.NewTimer(ttl)
|
||||
defer delay.Stop()
|
||||
|
||||
start := time.Now()
|
||||
select {
|
||||
case <-stall:
|
||||
case <-delay.C:
|
||||
producer.Debugf("Timed out of fast producer stall (%v)", ttl)
|
||||
}
|
||||
}
|
||||
|
||||
func stallDuration(pb, mp int64) time.Duration {
|
||||
ttl := stallClientMinDuration
|
||||
if pb >= mp {
|
||||
ttl = stallClientMaxDuration
|
||||
} else if hmp := mp / 2; pb > hmp {
|
||||
bsz := hmp / 10
|
||||
additional := int64(ttl) * ((pb - hmp) / bsz)
|
||||
ttl += time.Duration(additional)
|
||||
}
|
||||
return ttl
|
||||
producer.in.tst += time.Since(start)
|
||||
}
|
||||
|
||||
// Used to treat maps as efficient set
|
||||
@ -3447,10 +3469,15 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su
|
||||
msgSize -= int64(LEN_CR_LF)
|
||||
}
|
||||
|
||||
// No atomic needed since accessed under client lock.
|
||||
// Monitor is reading those also under client's lock.
|
||||
client.outMsgs++
|
||||
client.outBytes += msgSize
|
||||
// We do not update the outbound stats if we are doing trace only since
|
||||
// this message will not be sent out.
|
||||
// Also do not update on internal callbacks.
|
||||
if sub.icb == nil {
|
||||
// No atomic needed since accessed under client lock.
|
||||
// Monitor is reading those also under client's lock.
|
||||
client.outMsgs++
|
||||
client.outBytes += msgSize
|
||||
}
|
||||
|
||||
// Check for internal subscriptions.
|
||||
if sub.icb != nil && !c.noIcb {
|
||||
@ -3461,23 +3488,35 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su
|
||||
}
|
||||
client.mu.Unlock()
|
||||
|
||||
// For service imports, track if we delivered.
|
||||
didDeliver := true
|
||||
|
||||
// Internal account clients are for service imports and need the '\r\n'.
|
||||
start := time.Now()
|
||||
if client.kind == ACCOUNT {
|
||||
sub.icb(sub, c, acc, string(subject), string(reply), msg)
|
||||
// If we are a service import check to make sure we delivered the message somewhere.
|
||||
if sub.si {
|
||||
didDeliver = c.pa.delivered
|
||||
}
|
||||
} else {
|
||||
sub.icb(sub, c, acc, string(subject), string(reply), msg[:msgSize])
|
||||
}
|
||||
if dur := time.Since(start); dur >= readLoopReportThreshold {
|
||||
srv.Warnf("Internal subscription on %q took too long: %v", subject, dur)
|
||||
}
|
||||
return true
|
||||
|
||||
return didDeliver
|
||||
}
|
||||
|
||||
// If we are a client and we detect that the consumer we are
|
||||
// sending to is in a stalled state, go ahead and wait here
|
||||
// with a limit.
|
||||
if c.kind == CLIENT && client.out.stc != nil {
|
||||
if srv.getOpts().NoFastProducerStall {
|
||||
client.mu.Unlock()
|
||||
return false
|
||||
}
|
||||
client.stalledWait(c)
|
||||
}
|
||||
|
||||
@ -3526,9 +3565,11 @@ func (c *client) deliverMsg(prodIsMQTT bool, sub *subscription, acc *Account, su
|
||||
|
||||
// If we are tracking dynamic publish permissions that track reply subjects,
|
||||
// do that accounting here. We only look at client.replies which will be non-nil.
|
||||
if client.replies != nil && len(reply) > 0 {
|
||||
// Only reply subject permissions if the client is not already allowed to publish to the reply subject.
|
||||
if client.replies != nil && len(reply) > 0 && !client.pubAllowedFullCheck(string(reply), true, true) {
|
||||
client.replies[string(reply)] = &resp{time.Now(), 0}
|
||||
if len(client.replies) > replyPermLimit {
|
||||
client.repliesSincePrune++
|
||||
if client.repliesSincePrune > replyPermLimit || time.Since(client.lastReplyPrune) > replyPruneTime {
|
||||
client.pruneReplyPerms()
|
||||
}
|
||||
}
|
||||
@ -3652,6 +3693,9 @@ func (c *client) pruneReplyPerms() {
|
||||
delete(c.replies, k)
|
||||
}
|
||||
}
|
||||
|
||||
c.repliesSincePrune = 0
|
||||
c.lastReplyPrune = now
|
||||
}
|
||||
|
||||
// pruneDenyCache will prune the deny cache via randomly
|
||||
@ -3720,7 +3764,7 @@ func (c *client) pubAllowedFullCheck(subject string, fullCheck, hasLock bool) bo
|
||||
allowed = np == 0
|
||||
}
|
||||
|
||||
// If we are currently not allowed but we are tracking reply subjects
|
||||
// If we are tracking reply subjects
|
||||
// dynamically, check to see if we are allowed here but avoid pcache.
|
||||
// We need to acquire the lock though.
|
||||
if !allowed && fullCheck && c.perms.resp != nil {
|
||||
@ -3950,7 +3994,7 @@ func (c *client) processInboundClientMsg(msg []byte) (bool, bool) {
|
||||
reply = append(reply, '@')
|
||||
reply = append(reply, c.pa.deliver...)
|
||||
}
|
||||
didDeliver = c.sendMsgToGateways(acc, msg, c.pa.subject, reply, qnames) || didDeliver
|
||||
didDeliver = c.sendMsgToGateways(acc, msg, c.pa.subject, reply, qnames, false) || didDeliver
|
||||
}
|
||||
|
||||
// Check to see if we did not deliver to anyone and the client has a reply subject set
|
||||
@ -3997,7 +4041,7 @@ func (c *client) handleGWReplyMap(msg []byte) bool {
|
||||
reply = append(reply, '@')
|
||||
reply = append(reply, c.pa.deliver...)
|
||||
}
|
||||
c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, nil)
|
||||
c.sendMsgToGateways(c.acc, msg, c.pa.subject, reply, nil, false)
|
||||
}
|
||||
return true
|
||||
}
|
||||
@ -4120,9 +4164,20 @@ func (c *client) setHeader(key, value string, msg []byte) []byte {
|
||||
return bb.Bytes()
|
||||
}
|
||||
|
||||
// Will return the value for the header denoted by key or nil if it does not exists.
|
||||
// This function ignores errors and tries to achieve speed and no additional allocations.
|
||||
// Will return a copy of the value for the header denoted by key or nil if it does not exist.
|
||||
// If you know that it is safe to refer to the underlying hdr slice for the period that the
|
||||
// return value is used, then sliceHeader() will be faster.
|
||||
func getHeader(key string, hdr []byte) []byte {
|
||||
v := sliceHeader(key, hdr)
|
||||
if v == nil {
|
||||
return nil
|
||||
}
|
||||
return append(make([]byte, 0, len(v)), v...)
|
||||
}
|
||||
|
||||
// Will return the sliced value for the header denoted by key or nil if it does not exists.
|
||||
// This function ignores errors and tries to achieve speed and no additional allocations.
|
||||
func sliceHeader(key string, hdr []byte) []byte {
|
||||
if len(hdr) == 0 {
|
||||
return nil
|
||||
}
|
||||
@ -4147,15 +4202,14 @@ func getHeader(key string, hdr []byte) []byte {
|
||||
index++
|
||||
}
|
||||
// Collect together the rest of the value until we hit a CRLF.
|
||||
var value []byte
|
||||
start := index
|
||||
for index < hdrLen {
|
||||
if hdr[index] == '\r' && index < hdrLen-1 && hdr[index+1] == '\n' {
|
||||
break
|
||||
}
|
||||
value = append(value, hdr[index])
|
||||
index++
|
||||
}
|
||||
return value
|
||||
return hdr[start:index:index]
|
||||
}
|
||||
|
||||
// For bytes.HasPrefix below.
|
||||
@ -4166,17 +4220,17 @@ var (
|
||||
|
||||
// processServiceImport is an internal callback when a subscription matches an imported service
|
||||
// from another account. This includes response mappings as well.
|
||||
func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byte) {
|
||||
func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byte) bool {
|
||||
// If we are a GW and this is not a direct serviceImport ignore.
|
||||
isResponse := si.isRespServiceImport()
|
||||
if (c.kind == GATEWAY || c.kind == ROUTER) && !isResponse {
|
||||
return
|
||||
return false
|
||||
}
|
||||
// Detect cycles and ignore (return) when we detect one.
|
||||
if len(c.pa.psi) > 0 {
|
||||
for i := len(c.pa.psi) - 1; i >= 0; i-- {
|
||||
if psi := c.pa.psi[i]; psi.se == si.se {
|
||||
return
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -4197,7 +4251,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt
|
||||
// response service imports and rrMap entries which all will need to simply expire.
|
||||
// TODO(dlc) - Come up with something better.
|
||||
if shouldReturn || (checkJS && si.se != nil && si.se.acc == c.srv.SystemAccount()) {
|
||||
return
|
||||
return false
|
||||
}
|
||||
|
||||
var nrr []byte
|
||||
@ -4269,7 +4323,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt
|
||||
var ci *ClientInfo
|
||||
if hadPrevSi && c.pa.hdr >= 0 {
|
||||
var cis ClientInfo
|
||||
if err := json.Unmarshal(getHeader(ClientInfoHdr, msg[:c.pa.hdr]), &cis); err == nil {
|
||||
if err := json.Unmarshal(sliceHeader(ClientInfoHdr, msg[:c.pa.hdr]), &cis); err == nil {
|
||||
ci = &cis
|
||||
ci.Service = acc.Name
|
||||
// Check if we are moving into a share details account from a non-shared
|
||||
@ -4278,7 +4332,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt
|
||||
c.addServerAndClusterInfo(ci)
|
||||
}
|
||||
}
|
||||
} else if c.kind != LEAF || c.pa.hdr < 0 || len(getHeader(ClientInfoHdr, msg[:c.pa.hdr])) == 0 {
|
||||
} else if c.kind != LEAF || c.pa.hdr < 0 || len(sliceHeader(ClientInfoHdr, msg[:c.pa.hdr])) == 0 {
|
||||
ci = c.getClientInfo(share)
|
||||
// If we did not share but the imports destination is the system account add in the server and cluster info.
|
||||
if !share && isSysImport {
|
||||
@ -4336,7 +4390,7 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt
|
||||
flags |= pmrCollectQueueNames
|
||||
var queues [][]byte
|
||||
didDeliver, queues = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags)
|
||||
didDeliver = c.sendMsgToGateways(siAcc, msg, []byte(to), nrr, queues) || didDeliver
|
||||
didDeliver = c.sendMsgToGateways(siAcc, msg, []byte(to), nrr, queues, false) || didDeliver
|
||||
} else {
|
||||
didDeliver, _ = c.processMsgResults(siAcc, rr, msg, c.pa.deliver, []byte(to), nrr, flags)
|
||||
}
|
||||
@ -4345,6 +4399,10 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt
|
||||
c.in.rts = orts
|
||||
c.pa = pacopy
|
||||
|
||||
// Before we undo didDeliver based on tracing and last mile, mark in the c.pa which informs us of no responders status.
|
||||
// If we override due to tracing and traceOnly we do not want to send back a no responders.
|
||||
c.pa.delivered = didDeliver
|
||||
|
||||
// Determine if we should remove this service import. This is for response service imports.
|
||||
// We will remove if we did not deliver, or if we are a response service import and we are
|
||||
// a singleton, or we have an EOF message.
|
||||
@ -4374,6 +4432,8 @@ func (c *client) processServiceImport(si *serviceImport, acc *Account, msg []byt
|
||||
siAcc.removeRespServiceImport(rsi, reason)
|
||||
}
|
||||
}
|
||||
|
||||
return didDeliver
|
||||
}
|
||||
|
||||
func (c *client) addSubToRouteTargets(sub *subscription) {
|
||||
@ -4570,6 +4630,21 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver,
|
||||
// Declared here because of goto.
|
||||
var queues [][]byte
|
||||
|
||||
var leafOrigin string
|
||||
switch c.kind {
|
||||
case ROUTER:
|
||||
if len(c.pa.origin) > 0 {
|
||||
// Picture a message sent from a leafnode to a server that then routes
|
||||
// this message: CluserA -leaf-> HUB1 -route-> HUB2
|
||||
// Here we are in HUB2, so c.kind is a ROUTER, but the message will
|
||||
// contain a c.pa.origin set to "ClusterA" to indicate that this message
|
||||
// originated from that leafnode cluster.
|
||||
leafOrigin = bytesToString(c.pa.origin)
|
||||
}
|
||||
case LEAF:
|
||||
leafOrigin = c.remoteCluster()
|
||||
}
|
||||
|
||||
// For all routes/leaf/gateway connections, we may still want to send messages to
|
||||
// leaf nodes or routes even if there are no queue filters since we collect
|
||||
// them above and do not process inline like normal clients.
|
||||
@ -4608,12 +4683,24 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver,
|
||||
ql := _ql[:0]
|
||||
for i := 0; i < len(qsubs); i++ {
|
||||
sub = qsubs[i]
|
||||
if sub.client.kind == LEAF || sub.client.kind == ROUTER {
|
||||
// If we have assigned an rsub already, replace if the destination is a LEAF
|
||||
// since we want to favor that compared to a ROUTER. We could make sure that
|
||||
// we override only if previous was a ROUTE and not a LEAF, but we don't have to.
|
||||
if rsub == nil || sub.client.kind == LEAF {
|
||||
if dst := sub.client.kind; dst == LEAF || dst == ROUTER {
|
||||
// If the destination is a LEAF, we first need to make sure
|
||||
// that we would not pick one that was the origin of this
|
||||
// message.
|
||||
if dst == LEAF && leafOrigin != _EMPTY_ && leafOrigin == sub.client.remoteCluster() {
|
||||
continue
|
||||
}
|
||||
// If we have assigned a ROUTER rsub already, replace if
|
||||
// the destination is a LEAF since we want to favor that.
|
||||
if rsub == nil || (rsub.client.kind == ROUTER && dst == LEAF) {
|
||||
rsub = sub
|
||||
} else if dst == LEAF {
|
||||
// We already have a LEAF and this is another one.
|
||||
// Flip a coin to see if we swap it or not.
|
||||
// See https://github.com/nats-io/nats-server/issues/6040
|
||||
if fastrand.Uint32()%2 == 1 {
|
||||
rsub = sub
|
||||
}
|
||||
}
|
||||
} else {
|
||||
ql = append(ql, sub)
|
||||
@ -4629,6 +4716,8 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver,
|
||||
}
|
||||
|
||||
// Find a subscription that is able to deliver this message starting at a random index.
|
||||
// Note that if the message came from a ROUTER, we will only have CLIENT or LEAF
|
||||
// queue subs here, otherwise we can have all types.
|
||||
for i := 0; i < lqs; i++ {
|
||||
if sindex+i < lqs {
|
||||
sub = qsubs[sindex+i]
|
||||
@ -4649,20 +4738,38 @@ func (c *client) processMsgResults(acc *Account, r *SublistResult, msg, deliver,
|
||||
// Here we just care about a client or leaf and skipping a leaf and preferring locals.
|
||||
if dst := sub.client.kind; dst == ROUTER || dst == LEAF {
|
||||
if (src == LEAF || src == CLIENT) && dst == LEAF {
|
||||
// If we come from a LEAF and are about to pick a LEAF connection,
|
||||
// make sure this is not the same leaf cluster.
|
||||
if src == LEAF && leafOrigin != _EMPTY_ && leafOrigin == sub.client.remoteCluster() {
|
||||
continue
|
||||
}
|
||||
// Remember that leaf in case we don't find any other candidate.
|
||||
// We already start randomly in lqs slice, so we don't need
|
||||
// to do a random swap if we already have an rsub like we do
|
||||
// when src == ROUTER above.
|
||||
if rsub == nil {
|
||||
rsub = sub
|
||||
}
|
||||
continue
|
||||
} else {
|
||||
// We would be picking a route, but if we had remembered a "hub" leaf,
|
||||
// then pick that one instead of the route.
|
||||
if rsub != nil && rsub.client.kind == LEAF && rsub.client.isHubLeafNode() {
|
||||
break
|
||||
// We want to favor qsubs in our own cluster. If the routed
|
||||
// qsub has an origin, it means that is on behalf of a leaf.
|
||||
// We need to treat it differently.
|
||||
if len(sub.origin) > 0 {
|
||||
// If we already have an rsub, nothing to do. Also, do
|
||||
// not pick a routed qsub for a LEAF origin cluster
|
||||
// that is the same than where the message comes from.
|
||||
if rsub == nil && (leafOrigin == _EMPTY_ || leafOrigin != bytesToString(sub.origin)) {
|
||||
rsub = sub
|
||||
}
|
||||
continue
|
||||
}
|
||||
// This is a qsub that is local on the remote server (or
|
||||
// we are connected to an older server and we don't know).
|
||||
// Pick this one and be done.
|
||||
rsub = sub
|
||||
break
|
||||
}
|
||||
break
|
||||
}
|
||||
|
||||
// Assume delivery subject is normal subject to this point.
|
||||
@ -4749,18 +4856,11 @@ sendToRoutesOrLeafs:
|
||||
// If so make sure we do not send it back to the same cluster for a different
|
||||
// leafnode. Cluster wide no echo.
|
||||
if dc.kind == LEAF {
|
||||
// Check two scenarios. One is inbound from a route (c.pa.origin)
|
||||
if c.kind == ROUTER && len(c.pa.origin) > 0 {
|
||||
if bytesToString(c.pa.origin) == dc.remoteCluster() {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// The other is leaf to leaf.
|
||||
if c.kind == LEAF {
|
||||
src, dest := c.remoteCluster(), dc.remoteCluster()
|
||||
if src != _EMPTY_ && src == dest {
|
||||
continue
|
||||
}
|
||||
// Check two scenarios. One is inbound from a route (c.pa.origin),
|
||||
// and the other is leaf to leaf. In both case, leafOrigin is the one
|
||||
// to use for the comparison.
|
||||
if leafOrigin != _EMPTY_ && leafOrigin == dc.remoteCluster() {
|
||||
continue
|
||||
}
|
||||
|
||||
// We need to check if this is a request that has a stamped client information header.
|
||||
@ -4797,7 +4897,7 @@ func (c *client) checkLeafClientInfoHeader(msg []byte) (dmsg []byte, setHdr bool
|
||||
if c.pa.hdr < 0 || len(msg) < c.pa.hdr {
|
||||
return msg, false
|
||||
}
|
||||
cir := getHeader(ClientInfoHdr, msg[:c.pa.hdr])
|
||||
cir := sliceHeader(ClientInfoHdr, msg[:c.pa.hdr])
|
||||
if len(cir) == 0 {
|
||||
return msg, false
|
||||
}
|
||||
|
7
vendor/github.com/nats-io/nats-server/v2/server/const.go
generated
vendored
7
vendor/github.com/nats-io/nats-server/v2/server/const.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2023 The NATS Authors
|
||||
// Copyright 2012-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -55,7 +55,7 @@ func init() {
|
||||
|
||||
const (
|
||||
// VERSION is the current version for the server.
|
||||
VERSION = "2.10.22"
|
||||
VERSION = "2.10.27"
|
||||
|
||||
// PROTO is the currently supported protocol.
|
||||
// 0 was the original
|
||||
@ -171,6 +171,9 @@ const (
|
||||
// MAX_HPUB_ARGS Maximum possible number of arguments from HPUB proto.
|
||||
MAX_HPUB_ARGS = 4
|
||||
|
||||
// MAX_RSUB_ARGS Maximum possible number of arguments from a RS+/LS+ proto.
|
||||
MAX_RSUB_ARGS = 6
|
||||
|
||||
// DEFAULT_MAX_CLOSED_CLIENTS is the maximum number of closed connections we hold onto.
|
||||
DEFAULT_MAX_CLOSED_CLIENTS = 10000
|
||||
|
||||
|
482
vendor/github.com/nats-io/nats-server/v2/server/consumer.go
generated
vendored
482
vendor/github.com/nats-io/nats-server/v2/server/consumer.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2019-2024 The NATS Authors
|
||||
// Copyright 2019-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -60,7 +60,6 @@ type ConsumerInfo struct {
|
||||
}
|
||||
|
||||
type ConsumerConfig struct {
|
||||
// Durable is deprecated. All consumers should have names, picked by clients.
|
||||
Durable string `json:"durable_name,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
Description string `json:"description,omitempty"`
|
||||
@ -345,6 +344,7 @@ type consumer struct {
|
||||
outq *jsOutQ
|
||||
pending map[uint64]*Pending
|
||||
ptmr *time.Timer
|
||||
ptmrEnd time.Time
|
||||
rdq []uint64
|
||||
rdqi avl.SequenceSet
|
||||
rdc map[uint64]uint64
|
||||
@ -394,7 +394,7 @@ type consumer struct {
|
||||
ackMsgs *ipQueue[*jsAckMsg]
|
||||
|
||||
// for stream signaling when multiple filters are set.
|
||||
sigSubs []*subscription
|
||||
sigSubs []string
|
||||
}
|
||||
|
||||
// A single subject filter.
|
||||
@ -504,7 +504,7 @@ func checkConsumerCfg(
|
||||
}
|
||||
|
||||
// Check if we have a BackOff defined that MaxDeliver is within range etc.
|
||||
if lbo := len(config.BackOff); lbo > 0 && config.MaxDeliver != -1 && config.MaxDeliver <= lbo {
|
||||
if lbo := len(config.BackOff); lbo > 0 && config.MaxDeliver != -1 && lbo > config.MaxDeliver {
|
||||
return NewJSConsumerMaxDeliverBackoffError()
|
||||
}
|
||||
|
||||
@ -950,7 +950,7 @@ func (mset *stream) addConsumerWithAssignment(config *ConsumerConfig, oname stri
|
||||
// If we have multiple filter subjects, create a sublist which we will use
|
||||
// in calling store.LoadNextMsgMulti.
|
||||
if len(o.cfg.FilterSubjects) > 0 {
|
||||
o.filters = NewSublistWithCache()
|
||||
o.filters = NewSublistNoCache()
|
||||
for _, filter := range o.cfg.FilterSubjects {
|
||||
o.filters.Insert(&subscription{subject: []byte(filter)})
|
||||
}
|
||||
@ -1349,7 +1349,7 @@ func (o *consumer) setLeader(isLeader bool) {
|
||||
stopAndClearTimer(&o.dtmr)
|
||||
|
||||
// Make sure to clear out any re-deliver queues
|
||||
stopAndClearTimer(&o.ptmr)
|
||||
o.stopAndClearPtmr()
|
||||
o.rdq = nil
|
||||
o.rdqi.Empty()
|
||||
o.pending = nil
|
||||
@ -1372,6 +1372,8 @@ func (o *consumer) setLeader(isLeader bool) {
|
||||
// If we were the leader make sure to drain queued up acks.
|
||||
if wasLeader {
|
||||
o.ackMsgs.drain()
|
||||
// Reset amount of acks that need to be processed.
|
||||
atomic.StoreInt64(&o.awl, 0)
|
||||
// Also remove any pending replies since we should not be the one to respond at this point.
|
||||
o.replies = nil
|
||||
}
|
||||
@ -1414,8 +1416,23 @@ func (o *consumer) unsubscribe(sub *subscription) {
|
||||
|
||||
// We need to make sure we protect access to the outq.
|
||||
// Do all advisory sends here.
|
||||
func (o *consumer) sendAdvisory(subj string, msg []byte) {
|
||||
o.outq.sendMsg(subj, msg)
|
||||
func (o *consumer) sendAdvisory(subject string, e any) {
|
||||
if o.acc == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// If there is no one listening for this advisory then save ourselves the effort
|
||||
// and don't bother encoding the JSON or sending it.
|
||||
if sl := o.acc.sl; (sl != nil && !sl.HasInterest(subject)) && !o.srv.hasGatewayInterest(o.acc.Name, subject) {
|
||||
return
|
||||
}
|
||||
|
||||
j, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
o.outq.sendMsg(subject, j)
|
||||
}
|
||||
|
||||
func (o *consumer) sendDeleteAdvisoryLocked() {
|
||||
@ -1431,13 +1448,8 @@ func (o *consumer) sendDeleteAdvisoryLocked() {
|
||||
Domain: o.srv.getOpts().JetStreamDomain,
|
||||
}
|
||||
|
||||
j, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
subj := JSAdvisoryConsumerDeletedPre + "." + o.stream + "." + o.name
|
||||
o.sendAdvisory(subj, j)
|
||||
o.sendAdvisory(subj, e)
|
||||
}
|
||||
|
||||
func (o *consumer) sendCreateAdvisory() {
|
||||
@ -1456,13 +1468,8 @@ func (o *consumer) sendCreateAdvisory() {
|
||||
Domain: o.srv.getOpts().JetStreamDomain,
|
||||
}
|
||||
|
||||
j, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
subj := JSAdvisoryConsumerCreatedPre + "." + o.stream + "." + o.name
|
||||
o.sendAdvisory(subj, j)
|
||||
o.sendAdvisory(subj, e)
|
||||
}
|
||||
|
||||
// Created returns created time.
|
||||
@ -1562,7 +1569,25 @@ func (o *consumer) updateDeliveryInterest(localInterest bool) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
const (
|
||||
defaultConsumerNotActiveStartInterval = 30 * time.Second
|
||||
defaultConsumerNotActiveMaxInterval = 5 * time.Minute
|
||||
)
|
||||
|
||||
var (
|
||||
consumerNotActiveStartInterval = defaultConsumerNotActiveStartInterval
|
||||
consumerNotActiveMaxInterval = defaultConsumerNotActiveMaxInterval
|
||||
)
|
||||
|
||||
// deleteNotActive must only be called from time.AfterFunc or in its own
|
||||
// goroutine, as it can block on clean-up.
|
||||
func (o *consumer) deleteNotActive() {
|
||||
// Take a copy of these when the goroutine starts, mostly it avoids a
|
||||
// race condition with tests that modify these consts, such as
|
||||
// TestJetStreamClusterGhostEphemeralsAfterRestart.
|
||||
cnaMax := consumerNotActiveMaxInterval
|
||||
cnaStart := consumerNotActiveStartInterval
|
||||
|
||||
o.mu.Lock()
|
||||
if o.mset == nil {
|
||||
o.mu.Unlock()
|
||||
@ -1602,7 +1627,24 @@ func (o *consumer) deleteNotActive() {
|
||||
|
||||
s, js := o.mset.srv, o.srv.js.Load()
|
||||
acc, stream, name, isDirect := o.acc.Name, o.stream, o.name, o.cfg.Direct
|
||||
var qch, cqch chan struct{}
|
||||
if o.srv != nil {
|
||||
qch = o.srv.quitCh
|
||||
}
|
||||
o.mu.Unlock()
|
||||
if js != nil {
|
||||
cqch = js.clusterQuitC()
|
||||
}
|
||||
|
||||
// Useful for pprof.
|
||||
setGoRoutineLabels(pprofLabels{
|
||||
"account": acc,
|
||||
"stream": stream,
|
||||
"consumer": name,
|
||||
})
|
||||
|
||||
// We will delete locally regardless.
|
||||
defer o.delete()
|
||||
|
||||
// If we are clustered, check if we still have this consumer assigned.
|
||||
// If we do forward a proposal to delete ourselves to the metacontroller leader.
|
||||
@ -1626,42 +1668,40 @@ func (o *consumer) deleteNotActive() {
|
||||
if ca != nil && cc != nil {
|
||||
// Check to make sure we went away.
|
||||
// Don't think this needs to be a monitored go routine.
|
||||
go func() {
|
||||
const (
|
||||
startInterval = 30 * time.Second
|
||||
maxInterval = 5 * time.Minute
|
||||
)
|
||||
jitter := time.Duration(rand.Int63n(int64(startInterval)))
|
||||
interval := startInterval + jitter
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for range ticker.C {
|
||||
js.mu.RLock()
|
||||
if js.shuttingDown {
|
||||
js.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
nca := js.consumerAssignment(acc, stream, name)
|
||||
js.mu.RUnlock()
|
||||
// Make sure this is not a new consumer with the same name.
|
||||
if nca != nil && nca == ca {
|
||||
s.Warnf("Consumer assignment for '%s > %s > %s' not cleaned up, retrying", acc, stream, name)
|
||||
meta.ForwardProposal(removeEntry)
|
||||
if interval < maxInterval {
|
||||
interval *= 2
|
||||
ticker.Reset(interval)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// We saw that consumer has been removed, all done.
|
||||
jitter := time.Duration(rand.Int63n(int64(cnaStart)))
|
||||
interval := cnaStart + jitter
|
||||
ticker := time.NewTicker(interval)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
select {
|
||||
case <-ticker.C:
|
||||
case <-qch:
|
||||
return
|
||||
case <-cqch:
|
||||
return
|
||||
}
|
||||
}()
|
||||
js.mu.RLock()
|
||||
if js.shuttingDown {
|
||||
js.mu.RUnlock()
|
||||
return
|
||||
}
|
||||
nca := js.consumerAssignment(acc, stream, name)
|
||||
js.mu.RUnlock()
|
||||
// Make sure this is not a new consumer with the same name.
|
||||
if nca != nil && nca == ca {
|
||||
s.Warnf("Consumer assignment for '%s > %s > %s' not cleaned up, retrying", acc, stream, name)
|
||||
meta.ForwardProposal(removeEntry)
|
||||
if interval < cnaMax {
|
||||
interval *= 2
|
||||
ticker.Reset(interval)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// We saw that consumer has been removed, all done.
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// We will delete here regardless.
|
||||
o.delete()
|
||||
}
|
||||
|
||||
func (o *consumer) watchGWinterest() {
|
||||
@ -1709,11 +1749,16 @@ func (o *consumer) hasMaxDeliveries(seq uint64) bool {
|
||||
if o.maxp > 0 && len(o.pending) >= o.maxp {
|
||||
o.signalNewMessages()
|
||||
}
|
||||
// Cleanup our tracking.
|
||||
delete(o.pending, seq)
|
||||
if o.rdc != nil {
|
||||
delete(o.rdc, seq)
|
||||
// Make sure to remove from pending.
|
||||
if p, ok := o.pending[seq]; ok && p != nil {
|
||||
delete(o.pending, seq)
|
||||
o.updateDelivered(p.Sequence, seq, dc, p.Timestamp)
|
||||
}
|
||||
// Ensure redelivered state is set, if not already.
|
||||
if o.rdc == nil {
|
||||
o.rdc = make(map[uint64]uint64)
|
||||
}
|
||||
o.rdc[seq] = dc
|
||||
return true
|
||||
}
|
||||
return false
|
||||
@ -1739,7 +1784,7 @@ func (o *consumer) forceExpirePending() {
|
||||
p.Timestamp += off
|
||||
}
|
||||
}
|
||||
o.ptmr.Reset(o.ackWait(0))
|
||||
o.resetPtmr(o.ackWait(0))
|
||||
}
|
||||
o.signalNewMessages()
|
||||
}
|
||||
@ -1824,9 +1869,6 @@ func (acc *Account) checkNewConsumerConfig(cfg, ncfg *ConsumerConfig) error {
|
||||
if cfg.FlowControl != ncfg.FlowControl {
|
||||
return errors.New("flow control can not be updated")
|
||||
}
|
||||
if cfg.MaxWaiting != ncfg.MaxWaiting {
|
||||
return errors.New("max waiting can not be updated")
|
||||
}
|
||||
|
||||
// Deliver Subject is conditional on if its bound.
|
||||
if cfg.DeliverSubject != ncfg.DeliverSubject {
|
||||
@ -1841,8 +1883,12 @@ func (acc *Account) checkNewConsumerConfig(cfg, ncfg *ConsumerConfig) error {
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.MaxWaiting != ncfg.MaxWaiting {
|
||||
return errors.New("max waiting can not be updated")
|
||||
}
|
||||
|
||||
// Check if BackOff is defined, MaxDeliver is within range.
|
||||
if lbo := len(ncfg.BackOff); lbo > 0 && ncfg.MaxDeliver != -1 && ncfg.MaxDeliver <= lbo {
|
||||
if lbo := len(ncfg.BackOff); lbo > 0 && ncfg.MaxDeliver != -1 && lbo > ncfg.MaxDeliver {
|
||||
return NewJSConsumerMaxDeliverBackoffError()
|
||||
}
|
||||
|
||||
@ -1882,7 +1928,7 @@ func (o *consumer) updateConfig(cfg *ConsumerConfig) error {
|
||||
// AckWait
|
||||
if cfg.AckWait != o.cfg.AckWait {
|
||||
if o.ptmr != nil {
|
||||
o.ptmr.Reset(100 * time.Millisecond)
|
||||
o.resetPtmr(100 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
// Rate Limit
|
||||
@ -1940,7 +1986,7 @@ func (o *consumer) updateConfig(cfg *ConsumerConfig) error {
|
||||
if len(o.subjf) == 1 {
|
||||
o.filters = nil
|
||||
} else {
|
||||
o.filters = NewSublistWithCache()
|
||||
o.filters = NewSublistNoCache()
|
||||
for _, filter := range o.subjf {
|
||||
o.filters.Insert(&subscription{subject: []byte(filter.subject)})
|
||||
}
|
||||
@ -2205,9 +2251,7 @@ func (o *consumer) updateDelivered(dseq, sseq, dc uint64, ts int64) {
|
||||
n += binary.PutUvarint(b[n:], dc)
|
||||
n += binary.PutVarint(b[n:], ts)
|
||||
o.propose(b[:n])
|
||||
}
|
||||
if o.store != nil {
|
||||
// Update local state always.
|
||||
} else if o.store != nil {
|
||||
o.store.UpdateDelivered(dseq, sseq, dc, ts)
|
||||
}
|
||||
// Update activity.
|
||||
@ -2377,12 +2421,7 @@ func (o *consumer) processNak(sseq, dseq, dc uint64, nak []byte) {
|
||||
Domain: o.srv.getOpts().JetStreamDomain,
|
||||
}
|
||||
|
||||
j, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
o.sendAdvisory(o.nakEventT, j)
|
||||
o.sendAdvisory(o.nakEventT, e)
|
||||
|
||||
// Check to see if we have delays attached.
|
||||
if len(nak) > len(AckNak) {
|
||||
@ -2413,7 +2452,7 @@ func (o *consumer) processNak(sseq, dseq, dc uint64, nak []byte) {
|
||||
if o.ptmr != nil {
|
||||
// Want checkPending to run and figure out the next timer ttl.
|
||||
// TODO(dlc) - We could optimize this maybe a bit more and track when we expect the timer to fire.
|
||||
o.ptmr.Reset(10 * time.Millisecond)
|
||||
o.resetPtmr(10 * time.Millisecond)
|
||||
}
|
||||
}
|
||||
// Nothing else for use to do now so return.
|
||||
@ -2457,15 +2496,8 @@ func (o *consumer) processTerm(sseq, dseq, dc uint64, reason, reply string) bool
|
||||
Domain: o.srv.getOpts().JetStreamDomain,
|
||||
}
|
||||
|
||||
j, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
// We had an error during the marshal, so we can't send the advisory,
|
||||
// but we still need to tell the caller that the ack was processed.
|
||||
return ackedInPlace
|
||||
}
|
||||
|
||||
subj := JSAdvisoryConsumerMsgTerminatedPre + "." + o.stream + "." + o.name
|
||||
o.sendAdvisory(subj, j)
|
||||
o.sendAdvisory(subj, e)
|
||||
return ackedInPlace
|
||||
}
|
||||
|
||||
@ -2547,11 +2579,7 @@ func (o *consumer) applyState(state *ConsumerState) {
|
||||
if o.cfg.AckWait < delay {
|
||||
delay = o.ackWait(0)
|
||||
}
|
||||
if o.ptmr == nil {
|
||||
o.ptmr = time.AfterFunc(delay, o.checkPending)
|
||||
} else {
|
||||
o.ptmr.Reset(delay)
|
||||
}
|
||||
o.resetPtmr(delay)
|
||||
}
|
||||
}
|
||||
|
||||
@ -2666,23 +2694,20 @@ func (o *consumer) infoWithSnapAndReply(snap bool, reply string) *ConsumerInfo {
|
||||
TimeStamp: time.Now().UTC(),
|
||||
}
|
||||
|
||||
// If we are replicated and we are not the leader or we are filtered, we need to pull certain data from our store.
|
||||
isLeader := o.isLeader()
|
||||
if rg != nil && rg.node != nil && o.store != nil && (!isLeader || o.isFiltered()) {
|
||||
// If we are replicated, we need to pull certain data from our store.
|
||||
if rg != nil && rg.node != nil && o.store != nil {
|
||||
state, err := o.store.BorrowState()
|
||||
if err != nil {
|
||||
o.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
if !isLeader {
|
||||
info.Delivered.Consumer, info.Delivered.Stream = state.Delivered.Consumer, state.Delivered.Stream
|
||||
info.AckFloor.Consumer, info.AckFloor.Stream = state.AckFloor.Consumer, state.AckFloor.Stream
|
||||
// If we are the leader we could have o.sseq that is skipped ahead.
|
||||
// To maintain consistency in reporting (e.g. jsz) we always take the state for our delivered/ackfloor stream sequence.
|
||||
info.Delivered.Consumer, info.Delivered.Stream = state.Delivered.Consumer, state.Delivered.Stream
|
||||
info.AckFloor.Consumer, info.AckFloor.Stream = state.AckFloor.Consumer, state.AckFloor.Stream
|
||||
if !o.isLeader() {
|
||||
info.NumAckPending = len(state.Pending)
|
||||
info.NumRedelivered = len(state.Redelivered)
|
||||
} else {
|
||||
// Since we are filtered and we are the leader we could have o.sseq that is skipped ahead.
|
||||
// To maintain consistency in reporting (e.g. jsz) we take the state for our delivered stream sequence.
|
||||
info.Delivered.Stream = state.Delivered.Stream
|
||||
}
|
||||
}
|
||||
|
||||
@ -2767,12 +2792,7 @@ func (o *consumer) sampleAck(sseq, dseq, dc uint64) {
|
||||
Domain: o.srv.getOpts().JetStreamDomain,
|
||||
}
|
||||
|
||||
j, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
o.sendAdvisory(o.ackEventT, j)
|
||||
o.sendAdvisory(o.ackEventT, e)
|
||||
}
|
||||
|
||||
// Process an ACK.
|
||||
@ -2786,14 +2806,29 @@ func (o *consumer) processAckMsg(sseq, dseq, dc uint64, reply string, doSample b
|
||||
return false
|
||||
}
|
||||
|
||||
mset := o.mset
|
||||
if mset == nil || mset.closed.Load() {
|
||||
o.mu.Unlock()
|
||||
return false
|
||||
}
|
||||
|
||||
// Check if this ack is above the current pointer to our next to deliver.
|
||||
// This could happen on a cooperative takeover with high speed deliveries.
|
||||
if sseq >= o.sseq {
|
||||
o.sseq = sseq + 1
|
||||
}
|
||||
|
||||
mset := o.mset
|
||||
if mset == nil || mset.closed.Load() {
|
||||
// Let's make sure this is valid.
|
||||
// This is only received on the consumer leader, so should never be higher
|
||||
// than the last stream sequence. But could happen if we've just become
|
||||
// consumer leader, and we are not up-to-date on the stream yet.
|
||||
var ss StreamState
|
||||
mset.store.FastState(&ss)
|
||||
if sseq > ss.LastSeq {
|
||||
o.srv.Warnf("JetStream consumer '%s > %s > %s' ACK sequence %d past last stream sequence of %d",
|
||||
o.acc.Name, o.stream, o.name, sseq, ss.LastSeq)
|
||||
// FIXME(dlc) - For 2.11 onwards should we return an error here to the caller?
|
||||
}
|
||||
// Even though another leader must have delivered a message with this sequence, we must not adjust
|
||||
// the current pointer. This could otherwise result in a stuck consumer, where messages below this
|
||||
// sequence can't be redelivered, and we'll have incorrect pending state and ack floors.
|
||||
o.mu.Unlock()
|
||||
return false
|
||||
}
|
||||
@ -2841,7 +2876,8 @@ func (o *consumer) processAckMsg(sseq, dseq, dc uint64, reply string, doSample b
|
||||
// no-op
|
||||
if dseq <= o.adflr || sseq <= o.asflr {
|
||||
o.mu.Unlock()
|
||||
return ackInPlace
|
||||
// Return true to let caller respond back to the client.
|
||||
return true
|
||||
}
|
||||
if o.maxp > 0 && len(o.pending) >= o.maxp {
|
||||
needSignal = true
|
||||
@ -2957,6 +2993,7 @@ func (o *consumer) needAck(sseq uint64, subj string) bool {
|
||||
var needAck bool
|
||||
var asflr, osseq uint64
|
||||
var pending map[uint64]*Pending
|
||||
var rdc map[uint64]uint64
|
||||
|
||||
o.mu.RLock()
|
||||
defer o.mu.RUnlock()
|
||||
@ -2981,7 +3018,7 @@ func (o *consumer) needAck(sseq uint64, subj string) bool {
|
||||
}
|
||||
if o.isLeader() {
|
||||
asflr, osseq = o.asflr, o.sseq
|
||||
pending = o.pending
|
||||
pending, rdc = o.pending, o.rdc
|
||||
} else {
|
||||
if o.store == nil {
|
||||
return false
|
||||
@ -2992,7 +3029,7 @@ func (o *consumer) needAck(sseq uint64, subj string) bool {
|
||||
return sseq > o.asflr && !o.isFiltered()
|
||||
}
|
||||
// If loading state as here, the osseq is +1.
|
||||
asflr, osseq, pending = state.AckFloor.Stream, state.Delivered.Stream+1, state.Pending
|
||||
asflr, osseq, pending, rdc = state.AckFloor.Stream, state.Delivered.Stream+1, state.Pending, state.Redelivered
|
||||
}
|
||||
|
||||
switch o.cfg.AckPolicy {
|
||||
@ -3008,9 +3045,23 @@ func (o *consumer) needAck(sseq uint64, subj string) bool {
|
||||
}
|
||||
}
|
||||
|
||||
// Finally check if redelivery of this message is tracked.
|
||||
// If the message is not pending, it should be preserved if it reached max delivery.
|
||||
if !needAck {
|
||||
_, needAck = rdc[sseq]
|
||||
}
|
||||
|
||||
return needAck
|
||||
}
|
||||
|
||||
// Used in nextReqFromMsg, since the json.Unmarshal causes the request
|
||||
// struct to escape to the heap always. This should reduce GC pressure.
|
||||
var jsGetNextPool = sync.Pool{
|
||||
New: func() any {
|
||||
return &JSApiConsumerGetNextRequest{}
|
||||
},
|
||||
}
|
||||
|
||||
// Helper for the next message requests.
|
||||
func nextReqFromMsg(msg []byte) (time.Time, int, int, bool, time.Duration, time.Time, error) {
|
||||
req := bytes.TrimSpace(msg)
|
||||
@ -3020,7 +3071,11 @@ func nextReqFromMsg(msg []byte) (time.Time, int, int, bool, time.Duration, time.
|
||||
return time.Time{}, 1, 0, false, 0, time.Time{}, nil
|
||||
|
||||
case req[0] == '{':
|
||||
var cr JSApiConsumerGetNextRequest
|
||||
cr := jsGetNextPool.Get().(*JSApiConsumerGetNextRequest)
|
||||
defer func() {
|
||||
*cr = JSApiConsumerGetNextRequest{}
|
||||
jsGetNextPool.Put(cr)
|
||||
}()
|
||||
if err := json.Unmarshal(req, &cr); err != nil {
|
||||
return time.Time{}, -1, 0, false, 0, time.Time{}, err
|
||||
}
|
||||
@ -3420,6 +3475,7 @@ func (o *consumer) processNextMsgRequest(reply string, msg []byte) {
|
||||
|
||||
if err := o.waiting.add(wr); err != nil {
|
||||
sendErr(409, "Exceeded MaxWaiting")
|
||||
wr.recycle()
|
||||
return
|
||||
}
|
||||
o.signalNewMessages()
|
||||
@ -3453,7 +3509,10 @@ func (o *consumer) deliveryCount(seq uint64) uint64 {
|
||||
if o.rdc == nil {
|
||||
return 1
|
||||
}
|
||||
return o.rdc[seq]
|
||||
if dc := o.rdc[seq]; dc >= 1 {
|
||||
return dc
|
||||
}
|
||||
return 1
|
||||
}
|
||||
|
||||
// Increase the delivery count for this message.
|
||||
@ -3492,12 +3551,7 @@ func (o *consumer) notifyDeliveryExceeded(sseq, dc uint64) {
|
||||
Domain: o.srv.getOpts().JetStreamDomain,
|
||||
}
|
||||
|
||||
j, err := json.Marshal(e)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
o.sendAdvisory(o.deliveryExcEventT, j)
|
||||
o.sendAdvisory(o.deliveryExcEventT, e)
|
||||
}
|
||||
|
||||
// Check if the candidate subject matches a filter if its present.
|
||||
@ -3573,17 +3627,23 @@ func (o *consumer) getNextMsg() (*jsPubMsg, uint64, error) {
|
||||
}
|
||||
continue
|
||||
}
|
||||
if seq > 0 {
|
||||
pmsg := getJSPubMsgFromPool()
|
||||
sm, err := o.mset.store.LoadMsg(seq, &pmsg.StoreMsg)
|
||||
if sm == nil || err != nil {
|
||||
pmsg.returnToPool()
|
||||
pmsg, dc = nil, 0
|
||||
// Adjust back deliver count.
|
||||
o.decDeliveryCount(seq)
|
||||
}
|
||||
return pmsg, dc, err
|
||||
pmsg := getJSPubMsgFromPool()
|
||||
sm, err := o.mset.store.LoadMsg(seq, &pmsg.StoreMsg)
|
||||
if sm == nil || err != nil {
|
||||
pmsg.returnToPool()
|
||||
pmsg, dc = nil, 0
|
||||
// Adjust back deliver count.
|
||||
o.decDeliveryCount(seq)
|
||||
}
|
||||
// Message was scheduled for redelivery but was removed in the meantime.
|
||||
if err == ErrStoreMsgNotFound || err == errDeletedMsg {
|
||||
// This is a race condition where the message is still in o.pending and
|
||||
// scheduled for redelivery, but it has been removed from the stream.
|
||||
// o.processTerm is called in a goroutine so could run after we get here.
|
||||
// That will correct the pending state and delivery/ack floors, so just skip here.
|
||||
continue
|
||||
}
|
||||
return pmsg, dc, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -3625,7 +3685,7 @@ func (o *consumer) getNextMsg() (*jsPubMsg, uint64, error) {
|
||||
// Check if we are multi-filtered or not.
|
||||
if filters != nil {
|
||||
sm, sseq, err = store.LoadNextMsgMulti(filters, fseq, &pmsg.StoreMsg)
|
||||
} else if subjf != nil { // Means single filtered subject since o.filters means > 1.
|
||||
} else if len(subjf) > 0 { // Means single filtered subject since o.filters means > 1.
|
||||
filter, wc := subjf[0].subject, subjf[0].hasWildcard
|
||||
sm, sseq, err = store.LoadNextMsg(filter, wc, fseq, &pmsg.StoreMsg)
|
||||
} else {
|
||||
@ -3766,10 +3826,7 @@ func (o *consumer) checkAckFloor() {
|
||||
// Check if this message was pending.
|
||||
o.mu.RLock()
|
||||
p, isPending := o.pending[seq]
|
||||
var rdc uint64 = 1
|
||||
if o.rdc != nil {
|
||||
rdc = o.rdc[seq]
|
||||
}
|
||||
rdc := o.deliveryCount(seq)
|
||||
o.mu.RUnlock()
|
||||
// If it was pending for us, get rid of it.
|
||||
if isPending {
|
||||
@ -3787,10 +3844,7 @@ func (o *consumer) checkAckFloor() {
|
||||
if p != nil {
|
||||
dseq = p.Sequence
|
||||
}
|
||||
var rdc uint64 = 1
|
||||
if o.rdc != nil {
|
||||
rdc = o.rdc[seq]
|
||||
}
|
||||
rdc := o.deliveryCount(seq)
|
||||
toTerm = append(toTerm, seq, dseq, rdc)
|
||||
}
|
||||
}
|
||||
@ -3817,7 +3871,7 @@ func (o *consumer) checkAckFloor() {
|
||||
// We will set it explicitly to 1 behind our current lowest in pending, or if
|
||||
// pending is empty, to our current delivered -1.
|
||||
const minOffThreshold = 50
|
||||
if o.asflr < ss.FirstSeq-minOffThreshold {
|
||||
if ss.FirstSeq >= minOffThreshold && o.asflr < ss.FirstSeq-minOffThreshold {
|
||||
var psseq, pdseq uint64
|
||||
for seq, p := range o.pending {
|
||||
if psseq == 0 || seq < psseq {
|
||||
@ -4270,37 +4324,15 @@ func (o *consumer) calculateNumPending() (npc, npf uint64) {
|
||||
}
|
||||
|
||||
isLastPerSubject := o.cfg.DeliverPolicy == DeliverLastPerSubject
|
||||
filters, subjf := o.filters, o.subjf
|
||||
|
||||
// Deliver Last Per Subject calculates num pending differently.
|
||||
if isLastPerSubject {
|
||||
// Consumer without filters.
|
||||
if o.subjf == nil {
|
||||
return o.mset.store.NumPending(o.sseq, _EMPTY_, isLastPerSubject)
|
||||
}
|
||||
// Consumer with filters.
|
||||
for _, filter := range o.subjf {
|
||||
lnpc, lnpf := o.mset.store.NumPending(o.sseq, filter.subject, isLastPerSubject)
|
||||
npc += lnpc
|
||||
if lnpf > npf {
|
||||
npf = lnpf // Always last
|
||||
}
|
||||
}
|
||||
return npc, npf
|
||||
if filters != nil {
|
||||
return o.mset.store.NumPendingMulti(o.sseq, filters, isLastPerSubject)
|
||||
} else if len(subjf) > 0 {
|
||||
filter := subjf[0].subject
|
||||
return o.mset.store.NumPending(o.sseq, filter, isLastPerSubject)
|
||||
}
|
||||
// Every other Delivery Policy is handled here.
|
||||
// Consumer without filters.
|
||||
if o.subjf == nil {
|
||||
return o.mset.store.NumPending(o.sseq, _EMPTY_, false)
|
||||
}
|
||||
// Consumer with filters.
|
||||
for _, filter := range o.subjf {
|
||||
lnpc, lnpf := o.mset.store.NumPending(o.sseq, filter.subject, false)
|
||||
npc += lnpc
|
||||
if lnpf > npf {
|
||||
npf = lnpf // Always last
|
||||
}
|
||||
}
|
||||
return npc, npf
|
||||
return o.mset.store.NumPending(o.sseq, _EMPTY_, isLastPerSubject)
|
||||
}
|
||||
|
||||
func convertToHeadersOnly(pmsg *jsPubMsg) {
|
||||
@ -4465,9 +4497,24 @@ func (o *consumer) trackPending(sseq, dseq uint64) {
|
||||
if o.pending == nil {
|
||||
o.pending = make(map[uint64]*Pending)
|
||||
}
|
||||
if o.ptmr == nil {
|
||||
o.ptmr = time.AfterFunc(o.ackWait(0), o.checkPending)
|
||||
|
||||
// We could have a backoff that set a timer higher than what we need for this message.
|
||||
// In that case, reset to lowest backoff required for a message redelivery.
|
||||
minDelay := o.ackWait(0)
|
||||
if l := len(o.cfg.BackOff); l > 0 {
|
||||
bi := int(o.rdc[sseq])
|
||||
if bi < 0 {
|
||||
bi = 0
|
||||
} else if bi >= l {
|
||||
bi = l - 1
|
||||
}
|
||||
minDelay = o.ackWait(o.cfg.BackOff[bi])
|
||||
}
|
||||
minDeadline := time.Now().Add(minDelay)
|
||||
if o.ptmr == nil || o.ptmrEnd.After(minDeadline) {
|
||||
o.resetPtmr(minDelay)
|
||||
}
|
||||
|
||||
if p, ok := o.pending[sseq]; ok {
|
||||
// Update timestamp but keep original consumer delivery sequence.
|
||||
// So do not update p.Sequence.
|
||||
@ -4590,24 +4637,21 @@ func (o *consumer) removeFromRedeliverQueue(seq uint64) bool {
|
||||
|
||||
// Checks the pending messages.
|
||||
func (o *consumer) checkPending() {
|
||||
o.mu.RLock()
|
||||
o.mu.Lock()
|
||||
defer o.mu.Unlock()
|
||||
|
||||
mset := o.mset
|
||||
// On stop, mset and timer will be nil.
|
||||
if o.closed || mset == nil || o.ptmr == nil {
|
||||
stopAndClearTimer(&o.ptmr)
|
||||
o.mu.RUnlock()
|
||||
o.stopAndClearPtmr()
|
||||
return
|
||||
}
|
||||
o.mu.RUnlock()
|
||||
|
||||
var shouldUpdateState bool
|
||||
var state StreamState
|
||||
mset.store.FastState(&state)
|
||||
fseq := state.FirstSeq
|
||||
|
||||
o.mu.Lock()
|
||||
defer o.mu.Unlock()
|
||||
|
||||
now := time.Now().UnixNano()
|
||||
ttl := int64(o.cfg.AckWait)
|
||||
next := int64(o.ackWait(0))
|
||||
@ -4623,11 +4667,7 @@ func (o *consumer) checkPending() {
|
||||
check := len(o.pending) > 1024
|
||||
for seq, p := range o.pending {
|
||||
if check && atomic.LoadInt64(&o.awl) > 0 {
|
||||
if o.ptmr == nil {
|
||||
o.ptmr = time.AfterFunc(100*time.Millisecond, o.checkPending)
|
||||
} else {
|
||||
o.ptmr.Reset(100 * time.Millisecond)
|
||||
}
|
||||
o.resetPtmr(100 * time.Millisecond)
|
||||
return
|
||||
}
|
||||
// Check if these are no longer valid.
|
||||
@ -4694,15 +4734,10 @@ func (o *consumer) checkPending() {
|
||||
}
|
||||
|
||||
if len(o.pending) > 0 {
|
||||
delay := time.Duration(next)
|
||||
if o.ptmr == nil {
|
||||
o.ptmr = time.AfterFunc(delay, o.checkPending)
|
||||
} else {
|
||||
o.ptmr.Reset(o.ackWait(delay))
|
||||
}
|
||||
o.resetPtmr(time.Duration(next))
|
||||
} else {
|
||||
// Make sure to stop timer and clear out any re delivery queues
|
||||
stopAndClearTimer(&o.ptmr)
|
||||
o.stopAndClearPtmr()
|
||||
o.rdq = nil
|
||||
o.rdqi.Empty()
|
||||
o.pending = nil
|
||||
@ -4890,7 +4925,7 @@ func (o *consumer) selectStartingSeqNo() {
|
||||
for _, filter := range o.subjf {
|
||||
// Use first sequence since this is more optimized atm.
|
||||
ss := o.mset.store.FilteredState(state.FirstSeq, filter.subject)
|
||||
if ss.First > o.sseq && ss.First < nseq {
|
||||
if ss.First >= o.sseq && ss.First < nseq {
|
||||
nseq = ss.First
|
||||
}
|
||||
}
|
||||
@ -5188,7 +5223,7 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error {
|
||||
o.client = nil
|
||||
sysc := o.sysc
|
||||
o.sysc = nil
|
||||
stopAndClearTimer(&o.ptmr)
|
||||
o.stopAndClearPtmr()
|
||||
stopAndClearTimer(&o.dtmr)
|
||||
stopAndClearTimer(&o.gwdtmr)
|
||||
delivery := o.cfg.DeliverSubject
|
||||
@ -5242,12 +5277,6 @@ func (o *consumer) stopWithFlags(dflag, sdflag, doSignal, advisory bool) error {
|
||||
if dflag {
|
||||
n.Delete()
|
||||
} else {
|
||||
// Try to install snapshot on clean exit
|
||||
if o.store != nil && (o.retention != LimitsPolicy || n.NeedSnapshot()) {
|
||||
if snap, err := o.store.EncodedState(); err == nil {
|
||||
n.InstallSnapshot(snap)
|
||||
}
|
||||
}
|
||||
n.Stop()
|
||||
}
|
||||
}
|
||||
@ -5329,12 +5358,14 @@ func (o *consumer) cleanupNoInterestMessages(mset *stream, ignoreInterest bool)
|
||||
return
|
||||
}
|
||||
|
||||
mset.mu.RUnlock()
|
||||
mset.mu.Lock()
|
||||
for seq := start; seq <= stop; seq++ {
|
||||
if mset.noInterest(seq, co) {
|
||||
rmseqs = append(rmseqs, seq)
|
||||
}
|
||||
}
|
||||
mset.mu.RUnlock()
|
||||
mset.mu.Unlock()
|
||||
|
||||
// These can be removed.
|
||||
for _, seq := range rmseqs {
|
||||
@ -5379,6 +5410,7 @@ func (o *consumer) requestNextMsgSubject() string {
|
||||
|
||||
func (o *consumer) decStreamPending(sseq uint64, subj string) {
|
||||
o.mu.Lock()
|
||||
|
||||
// Update our cached num pending only if we think deliverMsg has not done so.
|
||||
if sseq >= o.sseq && o.isFilteredMatch(subj) {
|
||||
o.npc--
|
||||
@ -5386,10 +5418,8 @@ func (o *consumer) decStreamPending(sseq uint64, subj string) {
|
||||
|
||||
// Check if this message was pending.
|
||||
p, wasPending := o.pending[sseq]
|
||||
var rdc uint64 = 1
|
||||
if o.rdc != nil {
|
||||
rdc = o.rdc[sseq]
|
||||
}
|
||||
rdc := o.deliveryCount(sseq)
|
||||
|
||||
o.mu.Unlock()
|
||||
|
||||
// If it was pending process it like an ack.
|
||||
@ -5409,7 +5439,7 @@ func (o *consumer) account() *Account {
|
||||
|
||||
// Creates a sublist for consumer.
|
||||
// All subjects share the same callback.
|
||||
func (o *consumer) signalSubs() []*subscription {
|
||||
func (o *consumer) signalSubs() []string {
|
||||
o.mu.Lock()
|
||||
defer o.mu.Unlock()
|
||||
|
||||
@ -5417,15 +5447,15 @@ func (o *consumer) signalSubs() []*subscription {
|
||||
return o.sigSubs
|
||||
}
|
||||
|
||||
subs := []*subscription{}
|
||||
if o.subjf == nil {
|
||||
subs = append(subs, &subscription{subject: []byte(fwcs), icb: o.processStreamSignal})
|
||||
if len(o.subjf) == 0 {
|
||||
subs := []string{fwcs}
|
||||
o.sigSubs = subs
|
||||
return subs
|
||||
}
|
||||
|
||||
subs := make([]string, 0, len(o.subjf))
|
||||
for _, filter := range o.subjf {
|
||||
subs = append(subs, &subscription{subject: []byte(filter.subject), icb: o.processStreamSignal})
|
||||
subs = append(subs, filter.subject)
|
||||
}
|
||||
o.sigSubs = subs
|
||||
return subs
|
||||
@ -5435,7 +5465,7 @@ func (o *consumer) signalSubs() []*subscription {
|
||||
// We know that this subject matches us by how the parent handles registering us with the signaling sublist,
|
||||
// but we must check if we are leader.
|
||||
// We do need the sequence of the message however and we use the msg as the encoded seq.
|
||||
func (o *consumer) processStreamSignal(_ *subscription, _ *client, _ *Account, subject, _ string, seqb []byte) {
|
||||
func (o *consumer) processStreamSignal(seq uint64) {
|
||||
// We can get called here now when not leader, so bail fast
|
||||
// and without acquiring any locks.
|
||||
if !o.leader.Load() {
|
||||
@ -5446,10 +5476,6 @@ func (o *consumer) processStreamSignal(_ *subscription, _ *client, _ *Account, s
|
||||
if o.mset == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var le = binary.LittleEndian
|
||||
seq := le.Uint64(seqb)
|
||||
|
||||
if seq > o.npf {
|
||||
o.npc++
|
||||
}
|
||||
@ -5524,6 +5550,7 @@ func (o *consumer) isMonitorRunning() bool {
|
||||
|
||||
// If we detect that our ackfloor is higher than the stream's last sequence, return this error.
|
||||
var errAckFloorHigherThanLastSeq = errors.New("consumer ack floor is higher than streams last sequence")
|
||||
var errAckFloorInvalid = errors.New("consumer ack floor is invalid")
|
||||
|
||||
// If we are a consumer of an interest or workqueue policy stream, process that state and make sure consistent.
|
||||
func (o *consumer) checkStateForInterestStream(ss *StreamState) error {
|
||||
@ -5553,7 +5580,7 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error {
|
||||
asflr := state.AckFloor.Stream
|
||||
// Protect ourselves against rolling backwards.
|
||||
if asflr&(1<<63) != 0 {
|
||||
return nil
|
||||
return errAckFloorInvalid
|
||||
}
|
||||
|
||||
// Check if the underlying stream's last sequence is less than our floor.
|
||||
@ -5572,6 +5599,7 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error {
|
||||
fseq = chkfloor
|
||||
}
|
||||
|
||||
var retryAsflr uint64
|
||||
for seq = fseq; asflr > 0 && seq <= asflr; seq++ {
|
||||
if filters != nil {
|
||||
_, nseq, err = store.LoadNextMsgMulti(filters, seq, &smv)
|
||||
@ -5584,14 +5612,24 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error {
|
||||
}
|
||||
// Only ack though if no error and seq <= ack floor.
|
||||
if err == nil && seq <= asflr {
|
||||
mset.ackMsg(o, seq)
|
||||
didRemove := mset.ackMsg(o, seq)
|
||||
// Removing the message could fail. For example if we're behind on stream applies.
|
||||
// Overwrite retry floor (only the first time) to allow us to check next time if the removal was successful.
|
||||
if didRemove && retryAsflr == 0 {
|
||||
retryAsflr = seq
|
||||
}
|
||||
}
|
||||
}
|
||||
// If retry floor was not overwritten, set to ack floor+1, we don't need to account for any retries below it.
|
||||
if retryAsflr == 0 {
|
||||
retryAsflr = asflr + 1
|
||||
}
|
||||
|
||||
o.mu.Lock()
|
||||
// Update our check floor.
|
||||
if seq > o.chkflr {
|
||||
o.chkflr = seq
|
||||
// Check floor must never be greater than ack floor+1, otherwise subsequent calls to this function would skip work.
|
||||
if retryAsflr > o.chkflr {
|
||||
o.chkflr = retryAsflr
|
||||
}
|
||||
// See if we need to process this update if our parent stream is not a limits policy stream.
|
||||
state, _ = o.store.State()
|
||||
@ -5610,3 +5648,17 @@ func (o *consumer) checkStateForInterestStream(ss *StreamState) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (o *consumer) resetPtmr(delay time.Duration) {
|
||||
if o.ptmr == nil {
|
||||
o.ptmr = time.AfterFunc(delay, o.checkPending)
|
||||
} else {
|
||||
o.ptmr.Reset(delay)
|
||||
}
|
||||
o.ptmrEnd = time.Now().Add(delay)
|
||||
}
|
||||
|
||||
func (o *consumer) stopAndClearPtmr() {
|
||||
stopAndClearTimer(&o.ptmr)
|
||||
o.ptmrEnd = time.Time{}
|
||||
}
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/dirstore.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/dirstore.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2021 The NATS Authors
|
||||
// Copyright 2012-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/disk_avail_wasm.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/disk_avail_wasm.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2022 The NATS Authors
|
||||
// Copyright 2022-2021 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/disk_avail_windows.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/disk_avail_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 The NATS Authors
|
||||
// Copyright 2020-2021 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/errors.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/errors.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2021 The NATS Authors
|
||||
// Copyright 2012-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
134
vendor/github.com/nats-io/nats-server/v2/server/events.go
generated
vendored
134
vendor/github.com/nats-io/nats-server/v2/server/events.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2018-2023 The NATS Authors
|
||||
// Copyright 2018-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -315,6 +315,37 @@ type ClientInfo struct {
|
||||
Nonce string `json:"nonce,omitempty"`
|
||||
}
|
||||
|
||||
// forAssignmentSnap returns the minimum amount of ClientInfo we need for assignment snapshots.
|
||||
func (ci *ClientInfo) forAssignmentSnap() *ClientInfo {
|
||||
return &ClientInfo{
|
||||
Account: ci.Account,
|
||||
Service: ci.Service,
|
||||
Cluster: ci.Cluster,
|
||||
}
|
||||
}
|
||||
|
||||
// forProposal returns the minimum amount of ClientInfo we need for assignment proposals.
|
||||
func (ci *ClientInfo) forProposal() *ClientInfo {
|
||||
if ci == nil {
|
||||
return nil
|
||||
}
|
||||
cci := *ci
|
||||
cci.Jwt = _EMPTY_
|
||||
cci.IssuerKey = _EMPTY_
|
||||
return &cci
|
||||
}
|
||||
|
||||
// forAdvisory returns the minimum amount of ClientInfo we need for JS advisory events.
|
||||
func (ci *ClientInfo) forAdvisory() *ClientInfo {
|
||||
if ci == nil {
|
||||
return nil
|
||||
}
|
||||
cci := *ci
|
||||
cci.Jwt = _EMPTY_
|
||||
cci.Alternates = nil
|
||||
return &cci
|
||||
}
|
||||
|
||||
// ServerStats hold various statistics that we will periodically send out.
|
||||
type ServerStats struct {
|
||||
Start time.Time `json:"start"`
|
||||
@ -1184,6 +1215,14 @@ func (s *Server) initEventTracking() {
|
||||
optz := &ExpvarzEventOptions{}
|
||||
s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (any, error) { return s.expvarz(optz), nil })
|
||||
},
|
||||
"IPQUEUESZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) {
|
||||
optz := &IpqueueszEventOptions{}
|
||||
s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (any, error) { return s.Ipqueuesz(&optz.IpqueueszOptions), nil })
|
||||
},
|
||||
"RAFTZ": func(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) {
|
||||
optz := &RaftzEventOptions{}
|
||||
s.zReq(c, reply, hdr, msg, &optz.EventFilterOptions, optz, func() (any, error) { return s.Raftz(&optz.RaftzOptions), nil })
|
||||
},
|
||||
}
|
||||
profilez := func(_ *subscription, c *client, _ *Account, _, rply string, rmsg []byte) {
|
||||
hdr, msg := c.msgParts(rmsg)
|
||||
@ -1890,6 +1929,18 @@ type ExpvarzEventOptions struct {
|
||||
EventFilterOptions
|
||||
}
|
||||
|
||||
// In the context of system events, IpqueueszEventOptions are options passed to Ipqueuesz
|
||||
type IpqueueszEventOptions struct {
|
||||
EventFilterOptions
|
||||
IpqueueszOptions
|
||||
}
|
||||
|
||||
// In the context of system events, RaftzEventOptions are options passed to Raftz
|
||||
type RaftzEventOptions struct {
|
||||
EventFilterOptions
|
||||
RaftzOptions
|
||||
}
|
||||
|
||||
// returns true if the request does NOT apply to this server and can be ignored.
|
||||
// DO NOT hold the server lock when
|
||||
func (s *Server) filterRequest(fOpts *EventFilterOptions) bool {
|
||||
@ -1938,7 +1989,9 @@ type ServerAPIResponse struct {
|
||||
compress compressionType
|
||||
}
|
||||
|
||||
// Specialized response types for unmarshalling.
|
||||
// Specialized response types for unmarshalling. These structures are not
|
||||
// used in the server code and only there for users of the Z endpoints to
|
||||
// unmarshal the data without having to create these structs in their code
|
||||
|
||||
// ServerAPIConnzResponse is the response type connz
|
||||
type ServerAPIConnzResponse struct {
|
||||
@ -1947,6 +2000,83 @@ type ServerAPIConnzResponse struct {
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIRoutezResponse is the response type for routez
|
||||
type ServerAPIRoutezResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *Routez `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIGatewayzResponse is the response type for gatewayz
|
||||
type ServerAPIGatewayzResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *Gatewayz `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIJszResponse is the response type for jsz
|
||||
type ServerAPIJszResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *JSInfo `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIHealthzResponse is the response type for healthz
|
||||
type ServerAPIHealthzResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *HealthStatus `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIVarzResponse is the response type for varz
|
||||
type ServerAPIVarzResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *Varz `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPISubszResponse is the response type for subsz
|
||||
type ServerAPISubszResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *Subsz `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPILeafzResponse is the response type for leafz
|
||||
type ServerAPILeafzResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *Leafz `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIAccountzResponse is the response type for accountz
|
||||
type ServerAPIAccountzResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *Accountz `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIExpvarzResponse is the response type for expvarz
|
||||
type ServerAPIExpvarzResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *ExpvarzStatus `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIpqueueszResponse is the response type for ipqueuesz
|
||||
type ServerAPIpqueueszResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *IpqueueszStatus `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// ServerAPIRaftzResponse is the response type for raftz
|
||||
type ServerAPIRaftzResponse struct {
|
||||
Server *ServerInfo `json:"server"`
|
||||
Data *RaftzStatus `json:"data,omitempty"`
|
||||
Error *ApiError `json:"error,omitempty"`
|
||||
}
|
||||
|
||||
// statszReq is a request for us to respond with current statsz.
|
||||
func (s *Server) statszReq(sub *subscription, c *client, _ *Account, subject, reply string, hdr, msg []byte) {
|
||||
if !s.EventsEnabled() {
|
||||
|
896
vendor/github.com/nats-io/nats-server/v2/server/filestore.go
generated
vendored
896
vendor/github.com/nats-io/nats-server/v2/server/filestore.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/nats-io/nats-server/v2/server/fuzz.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/fuzz.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020 The NATS Authors
|
||||
// Copyright 2020-2022 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
28
vendor/github.com/nats-io/nats-server/v2/server/gateway.go
generated
vendored
28
vendor/github.com/nats-io/nats-server/v2/server/gateway.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2018-2023 The NATS Authors
|
||||
// Copyright 2018-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -1900,7 +1900,7 @@ func (c *client) processGatewayAccountSub(accName string) error {
|
||||
// the sublist if present.
|
||||
// <Invoked from outbound connection's readLoop>
|
||||
func (c *client) processGatewayRUnsub(arg []byte) error {
|
||||
accName, subject, queue, err := c.parseUnsubProto(arg)
|
||||
_, accName, subject, queue, err := c.parseUnsubProto(arg, true, false)
|
||||
if err != nil {
|
||||
return fmt.Errorf("processGatewaySubjectUnsub %s", err.Error())
|
||||
}
|
||||
@ -2499,8 +2499,13 @@ var subPool = &sync.Pool{
|
||||
// that the message is not sent to a given gateway if for instance
|
||||
// it is known that this gateway has no interest in the account or
|
||||
// subject, etc..
|
||||
// When invoked from a LEAF connection, `checkLeafQF` should be passed as `true`
|
||||
// so that we skip any queue subscription interest that is not part of the
|
||||
// `c.pa.queues` filter (similar to what we do in `processMsgResults`). However,
|
||||
// when processing service imports, then this boolean should be passes as `false`,
|
||||
// regardless if it is a LEAF connection or not.
|
||||
// <Invoked from any client connection's readLoop>
|
||||
func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgroups [][]byte) bool {
|
||||
func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgroups [][]byte, checkLeafQF bool) bool {
|
||||
// We had some times when we were sending across a GW with no subject, and the other side would break
|
||||
// due to parser error. These need to be fixed upstream but also double check here.
|
||||
if len(subject) == 0 {
|
||||
@ -2577,6 +2582,21 @@ func (c *client) sendMsgToGateways(acc *Account, msg, subject, reply []byte, qgr
|
||||
qsubs := qr.qsubs[i]
|
||||
if len(qsubs) > 0 {
|
||||
queue := qsubs[0].queue
|
||||
if checkLeafQF {
|
||||
// Skip any queue that is not in the leaf's queue filter.
|
||||
skip := true
|
||||
for _, qn := range c.pa.queues {
|
||||
if bytes.Equal(queue, qn) {
|
||||
skip = false
|
||||
break
|
||||
}
|
||||
}
|
||||
if skip {
|
||||
continue
|
||||
}
|
||||
// Now we still need to check that it was not delivered
|
||||
// locally by checking the given `qgroups`.
|
||||
}
|
||||
add := true
|
||||
for _, qn := range qgroups {
|
||||
if bytes.Equal(queue, qn) {
|
||||
@ -2969,7 +2989,7 @@ func (c *client) handleGatewayReply(msg []byte) (processed bool) {
|
||||
// we now need to send the message with the real subject to
|
||||
// gateways in case they have interest on that reply subject.
|
||||
if !isServiceReply {
|
||||
c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, queues)
|
||||
c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, queues, false)
|
||||
}
|
||||
} else if c.kind == GATEWAY {
|
||||
// Only if we are a gateway connection should we try to route
|
||||
|
532
vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go
generated
vendored
Normal file
532
vendor/github.com/nats-io/nats-server/v2/server/gsl/gsl.go
generated
vendored
Normal file
@ -0,0 +1,532 @@
|
||||
// Copyright 2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package gsl
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"sync"
|
||||
|
||||
"github.com/nats-io/nats-server/v2/server/stree"
|
||||
)
|
||||
|
||||
// Sublist is a routing mechanism to handle subject distribution and
|
||||
// provides a facility to match subjects from published messages to
|
||||
// interested subscribers. Subscribers can have wildcard subjects to
|
||||
// match multiple published subjects.
|
||||
|
||||
// Common byte variables for wildcards and token separator.
|
||||
const (
|
||||
pwc = '*'
|
||||
pwcs = "*"
|
||||
fwc = '>'
|
||||
fwcs = ">"
|
||||
tsep = "."
|
||||
btsep = '.'
|
||||
_EMPTY_ = ""
|
||||
)
|
||||
|
||||
// Sublist related errors
|
||||
var (
|
||||
ErrInvalidSubject = errors.New("gsl: invalid subject")
|
||||
ErrNotFound = errors.New("gsl: no matches found")
|
||||
ErrNilChan = errors.New("gsl: nil channel")
|
||||
ErrAlreadyRegistered = errors.New("gsl: notification already registered")
|
||||
)
|
||||
|
||||
// A GenericSublist stores and efficiently retrieves subscriptions.
|
||||
type GenericSublist[T comparable] struct {
|
||||
sync.RWMutex
|
||||
root *level[T]
|
||||
count uint32
|
||||
}
|
||||
|
||||
// A node contains subscriptions and a pointer to the next level.
|
||||
type node[T comparable] struct {
|
||||
next *level[T]
|
||||
subs map[T]string // value -> subject
|
||||
}
|
||||
|
||||
// A level represents a group of nodes and special pointers to
|
||||
// wildcard nodes.
|
||||
type level[T comparable] struct {
|
||||
nodes map[string]*node[T]
|
||||
pwc, fwc *node[T]
|
||||
}
|
||||
|
||||
// Create a new default node.
|
||||
func newNode[T comparable]() *node[T] {
|
||||
return &node[T]{subs: make(map[T]string)}
|
||||
}
|
||||
|
||||
// Create a new default level.
|
||||
func newLevel[T comparable]() *level[T] {
|
||||
return &level[T]{nodes: make(map[string]*node[T])}
|
||||
}
|
||||
|
||||
// NewSublist will create a default sublist with caching enabled per the flag.
|
||||
func NewSublist[T comparable]() *GenericSublist[T] {
|
||||
return &GenericSublist[T]{root: newLevel[T]()}
|
||||
}
|
||||
|
||||
// Insert adds a subscription into the sublist
|
||||
func (s *GenericSublist[T]) Insert(subject string, value T) error {
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
s.Lock()
|
||||
|
||||
var sfwc bool
|
||||
var n *node[T]
|
||||
l := s.root
|
||||
|
||||
for _, t := range tokens {
|
||||
lt := len(t)
|
||||
if lt == 0 || sfwc {
|
||||
s.Unlock()
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
|
||||
if lt > 1 {
|
||||
n = l.nodes[t]
|
||||
} else {
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
}
|
||||
if n == nil {
|
||||
n = newNode[T]()
|
||||
if lt > 1 {
|
||||
l.nodes[t] = n
|
||||
} else {
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
l.pwc = n
|
||||
case fwc:
|
||||
l.fwc = n
|
||||
default:
|
||||
l.nodes[t] = n
|
||||
}
|
||||
}
|
||||
}
|
||||
if n.next == nil {
|
||||
n.next = newLevel[T]()
|
||||
}
|
||||
l = n.next
|
||||
}
|
||||
|
||||
n.subs[value] = subject
|
||||
|
||||
s.count++
|
||||
s.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Match will match all entries to the literal subject.
|
||||
// It will return a set of results for both normal and queue subscribers.
|
||||
func (s *GenericSublist[T]) Match(subject string, cb func(T)) {
|
||||
s.match(subject, cb, true)
|
||||
}
|
||||
|
||||
// MatchBytes will match all entries to the literal subject.
|
||||
// It will return a set of results for both normal and queue subscribers.
|
||||
func (s *GenericSublist[T]) MatchBytes(subject []byte, cb func(T)) {
|
||||
s.match(string(subject), cb, true)
|
||||
}
|
||||
|
||||
// HasInterest will return whether or not there is any interest in the subject.
|
||||
// In cases where more detail is not required, this may be faster than Match.
|
||||
func (s *GenericSublist[T]) HasInterest(subject string) bool {
|
||||
return s.hasInterest(subject, true, nil)
|
||||
}
|
||||
|
||||
// NumInterest will return the number of subs interested in the subject.
|
||||
// In cases where more detail is not required, this may be faster than Match.
|
||||
func (s *GenericSublist[T]) NumInterest(subject string) (np int) {
|
||||
s.hasInterest(subject, true, &np)
|
||||
return
|
||||
}
|
||||
|
||||
func (s *GenericSublist[T]) match(subject string, cb func(T), doLock bool) {
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
if i-start == 0 {
|
||||
return
|
||||
}
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if start >= len(subject) {
|
||||
return
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
if doLock {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
}
|
||||
matchLevel(s.root, tokens, cb)
|
||||
}
|
||||
|
||||
func (s *GenericSublist[T]) hasInterest(subject string, doLock bool, np *int) bool {
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
if i-start == 0 {
|
||||
return false
|
||||
}
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
if start >= len(subject) {
|
||||
return false
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
if doLock {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
}
|
||||
return matchLevelForAny(s.root, tokens, np)
|
||||
}
|
||||
|
||||
func matchLevelForAny[T comparable](l *level[T], toks []string, np *int) bool {
|
||||
var pwc, n *node[T]
|
||||
for i, t := range toks {
|
||||
if l == nil {
|
||||
return false
|
||||
}
|
||||
if l.fwc != nil {
|
||||
if np != nil {
|
||||
*np += len(l.fwc.subs)
|
||||
}
|
||||
return true
|
||||
}
|
||||
if pwc = l.pwc; pwc != nil {
|
||||
if match := matchLevelForAny(pwc.next, toks[i+1:], np); match {
|
||||
return true
|
||||
}
|
||||
}
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
if np != nil {
|
||||
*np += len(n.subs)
|
||||
}
|
||||
return len(n.subs) > 0
|
||||
}
|
||||
if pwc != nil {
|
||||
if np != nil {
|
||||
*np += len(pwc.subs)
|
||||
}
|
||||
return len(pwc.subs) > 0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// callbacksForResults will make the necessary callbacks for each
|
||||
// result in this node.
|
||||
func callbacksForResults[T comparable](n *node[T], cb func(T)) {
|
||||
for sub := range n.subs {
|
||||
cb(sub)
|
||||
}
|
||||
}
|
||||
|
||||
// matchLevel is used to recursively descend into the trie.
|
||||
func matchLevel[T comparable](l *level[T], toks []string, cb func(T)) {
|
||||
var pwc, n *node[T]
|
||||
for i, t := range toks {
|
||||
if l == nil {
|
||||
return
|
||||
}
|
||||
if l.fwc != nil {
|
||||
callbacksForResults(l.fwc, cb)
|
||||
}
|
||||
if pwc = l.pwc; pwc != nil {
|
||||
matchLevel(pwc.next, toks[i+1:], cb)
|
||||
}
|
||||
n = l.nodes[t]
|
||||
if n != nil {
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
callbacksForResults(n, cb)
|
||||
}
|
||||
if pwc != nil {
|
||||
callbacksForResults(pwc, cb)
|
||||
}
|
||||
}
|
||||
|
||||
// lnt is used to track descent into levels for a removal for pruning.
|
||||
type lnt[T comparable] struct {
|
||||
l *level[T]
|
||||
n *node[T]
|
||||
t string
|
||||
}
|
||||
|
||||
// Raw low level remove, can do batches with lock held outside.
|
||||
func (s *GenericSublist[T]) remove(subject string, value T, shouldLock bool) error {
|
||||
tsa := [32]string{}
|
||||
tokens := tsa[:0]
|
||||
start := 0
|
||||
for i := 0; i < len(subject); i++ {
|
||||
if subject[i] == btsep {
|
||||
tokens = append(tokens, subject[start:i])
|
||||
start = i + 1
|
||||
}
|
||||
}
|
||||
tokens = append(tokens, subject[start:])
|
||||
|
||||
if shouldLock {
|
||||
s.Lock()
|
||||
defer s.Unlock()
|
||||
}
|
||||
|
||||
var sfwc bool
|
||||
var n *node[T]
|
||||
l := s.root
|
||||
|
||||
// Track levels for pruning
|
||||
var lnts [32]lnt[T]
|
||||
levels := lnts[:0]
|
||||
|
||||
for _, t := range tokens {
|
||||
lt := len(t)
|
||||
if lt == 0 || sfwc {
|
||||
return ErrInvalidSubject
|
||||
}
|
||||
if l == nil {
|
||||
return ErrNotFound
|
||||
}
|
||||
if lt > 1 {
|
||||
n = l.nodes[t]
|
||||
} else {
|
||||
switch t[0] {
|
||||
case pwc:
|
||||
n = l.pwc
|
||||
case fwc:
|
||||
n = l.fwc
|
||||
sfwc = true
|
||||
default:
|
||||
n = l.nodes[t]
|
||||
}
|
||||
}
|
||||
if n != nil {
|
||||
levels = append(levels, lnt[T]{l, n, t})
|
||||
l = n.next
|
||||
} else {
|
||||
l = nil
|
||||
}
|
||||
}
|
||||
|
||||
if !s.removeFromNode(n, value) {
|
||||
return ErrNotFound
|
||||
}
|
||||
|
||||
s.count--
|
||||
|
||||
for i := len(levels) - 1; i >= 0; i-- {
|
||||
l, n, t := levels[i].l, levels[i].n, levels[i].t
|
||||
if n.isEmpty() {
|
||||
l.pruneNode(n, t)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Remove will remove a subscription.
|
||||
func (s *GenericSublist[T]) Remove(subject string, value T) error {
|
||||
return s.remove(subject, value, true)
|
||||
}
|
||||
|
||||
// pruneNode is used to prune an empty node from the tree.
|
||||
func (l *level[T]) pruneNode(n *node[T], t string) {
|
||||
if n == nil {
|
||||
return
|
||||
}
|
||||
if n == l.fwc {
|
||||
l.fwc = nil
|
||||
} else if n == l.pwc {
|
||||
l.pwc = nil
|
||||
} else {
|
||||
delete(l.nodes, t)
|
||||
}
|
||||
}
|
||||
|
||||
// isEmpty will test if the node has any entries. Used
|
||||
// in pruning.
|
||||
func (n *node[T]) isEmpty() bool {
|
||||
return len(n.subs) == 0 && (n.next == nil || n.next.numNodes() == 0)
|
||||
}
|
||||
|
||||
// Return the number of nodes for the given level.
|
||||
func (l *level[T]) numNodes() int {
|
||||
num := len(l.nodes)
|
||||
if l.pwc != nil {
|
||||
num++
|
||||
}
|
||||
if l.fwc != nil {
|
||||
num++
|
||||
}
|
||||
return num
|
||||
}
|
||||
|
||||
// Remove the sub for the given node.
|
||||
func (s *GenericSublist[T]) removeFromNode(n *node[T], value T) (found bool) {
|
||||
if n == nil {
|
||||
return false
|
||||
}
|
||||
if _, found = n.subs[value]; found {
|
||||
delete(n.subs, value)
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
// Count returns the number of subscriptions.
|
||||
func (s *GenericSublist[T]) Count() uint32 {
|
||||
s.RLock()
|
||||
defer s.RUnlock()
|
||||
return s.count
|
||||
}
|
||||
|
||||
// numLevels will return the maximum number of levels
|
||||
// contained in the Sublist tree.
|
||||
func (s *GenericSublist[T]) numLevels() int {
|
||||
return visitLevel(s.root, 0)
|
||||
}
|
||||
|
||||
// visitLevel is used to descend the Sublist tree structure
|
||||
// recursively.
|
||||
func visitLevel[T comparable](l *level[T], depth int) int {
|
||||
if l == nil || l.numNodes() == 0 {
|
||||
return depth
|
||||
}
|
||||
|
||||
depth++
|
||||
maxDepth := depth
|
||||
|
||||
for _, n := range l.nodes {
|
||||
if n == nil {
|
||||
continue
|
||||
}
|
||||
newDepth := visitLevel(n.next, depth)
|
||||
if newDepth > maxDepth {
|
||||
maxDepth = newDepth
|
||||
}
|
||||
}
|
||||
if l.pwc != nil {
|
||||
pwcDepth := visitLevel(l.pwc.next, depth)
|
||||
if pwcDepth > maxDepth {
|
||||
maxDepth = pwcDepth
|
||||
}
|
||||
}
|
||||
if l.fwc != nil {
|
||||
fwcDepth := visitLevel(l.fwc.next, depth)
|
||||
if fwcDepth > maxDepth {
|
||||
maxDepth = fwcDepth
|
||||
}
|
||||
}
|
||||
return maxDepth
|
||||
}
|
||||
|
||||
// IntersectStree will match all items in the given subject tree that
|
||||
// have interest expressed in the given sublist. The callback will only be called
|
||||
// once for each subject, regardless of overlapping subscriptions in the sublist.
|
||||
func IntersectStree[T1 any, T2 comparable](st *stree.SubjectTree[T1], sl *GenericSublist[T2], cb func(subj []byte, entry *T1)) {
|
||||
var _subj [255]byte
|
||||
intersectStree(st, sl.root, _subj[:0], cb)
|
||||
}
|
||||
|
||||
func intersectStree[T1 any, T2 comparable](st *stree.SubjectTree[T1], r *level[T2], subj []byte, cb func(subj []byte, entry *T1)) {
|
||||
if r.numNodes() == 0 {
|
||||
// For wildcards we can't avoid Match, but if it's a literal subject at
|
||||
// this point, using Find is considerably cheaper.
|
||||
if subjectHasWildcard(string(subj)) {
|
||||
st.Match(subj, cb)
|
||||
} else if e, ok := st.Find(subj); ok {
|
||||
cb(subj, e)
|
||||
}
|
||||
return
|
||||
}
|
||||
nsubj := subj
|
||||
if len(nsubj) > 0 {
|
||||
nsubj = append(subj, '.')
|
||||
}
|
||||
switch {
|
||||
case r.fwc != nil:
|
||||
// We've reached a full wildcard, do a FWC match on the stree at this point
|
||||
// and don't keep iterating downward.
|
||||
nsubj := append(nsubj, '>')
|
||||
st.Match(nsubj, cb)
|
||||
case r.pwc != nil:
|
||||
// We've found a partial wildcard. We'll keep iterating downwards, but first
|
||||
// check whether there's interest at this level (without triggering dupes) and
|
||||
// match if so.
|
||||
nsubj := append(nsubj, '*')
|
||||
if len(r.pwc.subs) > 0 && r.pwc.next != nil && r.pwc.next.numNodes() > 0 {
|
||||
st.Match(nsubj, cb)
|
||||
}
|
||||
intersectStree(st, r.pwc.next, nsubj, cb)
|
||||
case r.numNodes() > 0:
|
||||
// Normal node with subject literals, keep iterating.
|
||||
for t, n := range r.nodes {
|
||||
nsubj := append(nsubj, t...)
|
||||
intersectStree(st, n.next, nsubj, cb)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Determine if a subject has any wildcard tokens.
|
||||
func subjectHasWildcard(subject string) bool {
|
||||
// This one exits earlier then !subjectIsLiteral(subject)
|
||||
for i, c := range subject {
|
||||
if c == pwc || c == fwc {
|
||||
if (i == 0 || subject[i-1] == btsep) &&
|
||||
(i+1 == len(subject) || subject[i+1] == btsep) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
9
vendor/github.com/nats-io/nats-server/v2/server/ipqueue.go
generated
vendored
9
vendor/github.com/nats-io/nats-server/v2/server/ipqueue.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2021-2023 The NATS Authors
|
||||
// Copyright 2021-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -190,14 +190,16 @@ func (q *ipQueue[T]) len() int {
|
||||
}
|
||||
|
||||
// Empty the queue and consumes the notification signal if present.
|
||||
// Returns the number of items that were drained from the queue.
|
||||
// Note that this could cause a reader go routine that has been
|
||||
// notified that there is something in the queue (reading from queue's `ch`)
|
||||
// may then get nothing if `drain()` is invoked before the `pop()` or `popOne()`.
|
||||
func (q *ipQueue[T]) drain() {
|
||||
func (q *ipQueue[T]) drain() int {
|
||||
if q == nil {
|
||||
return
|
||||
return 0
|
||||
}
|
||||
q.Lock()
|
||||
olen := len(q.elts)
|
||||
if q.elts != nil {
|
||||
q.resetAndReturnToPool(&q.elts)
|
||||
q.elts, q.pos = nil, 0
|
||||
@ -209,6 +211,7 @@ func (q *ipQueue[T]) drain() {
|
||||
default:
|
||||
}
|
||||
q.Unlock()
|
||||
return olen
|
||||
}
|
||||
|
||||
// Since the length of the queue goes to 0 after a pop(), it is good to
|
||||
|
38
vendor/github.com/nats-io/nats-server/v2/server/jetstream.go
generated
vendored
38
vendor/github.com/nats-io/nats-server/v2/server/jetstream.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2019-2024 The NATS Authors
|
||||
// Copyright 2019-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -461,6 +461,8 @@ func (s *Server) enableJetStream(cfg JetStreamConfig) error {
|
||||
if err := s.enableJetStreamClustering(); err != nil {
|
||||
return err
|
||||
}
|
||||
// Set our atomic bool to clustered.
|
||||
s.jsClustered.Store(true)
|
||||
}
|
||||
|
||||
// Mark when we are up and running.
|
||||
@ -965,6 +967,8 @@ func (s *Server) shutdownJetStream() {
|
||||
cc.c = nil
|
||||
}
|
||||
cc.meta = nil
|
||||
// Set our atomic bool to false.
|
||||
s.jsClustered.Store(false)
|
||||
}
|
||||
js.mu.Unlock()
|
||||
|
||||
@ -1497,12 +1501,14 @@ func (a *Account) filteredStreams(filter string) []*stream {
|
||||
var msets []*stream
|
||||
for _, mset := range jsa.streams {
|
||||
if filter != _EMPTY_ {
|
||||
mset.cfgMu.RLock()
|
||||
for _, subj := range mset.cfg.Subjects {
|
||||
if SubjectsCollide(filter, subj) {
|
||||
msets = append(msets, mset)
|
||||
break
|
||||
}
|
||||
}
|
||||
mset.cfgMu.RUnlock()
|
||||
} else {
|
||||
msets = append(msets, mset)
|
||||
}
|
||||
@ -2103,7 +2109,7 @@ func (js *jetStream) wouldExceedLimits(storeType StorageType, sz int) bool {
|
||||
} else {
|
||||
total, max = &js.storeUsed, js.config.MaxStore
|
||||
}
|
||||
return atomic.LoadInt64(total) > (max + int64(sz))
|
||||
return (atomic.LoadInt64(total) + int64(sz)) > max
|
||||
}
|
||||
|
||||
func (js *jetStream) limitsExceeded(storeType StorageType) bool {
|
||||
@ -2143,14 +2149,11 @@ func (jsa *jsAccount) selectLimits(replicas int) (JetStreamAccountLimits, string
|
||||
}
|
||||
|
||||
// Lock should be held.
|
||||
func (jsa *jsAccount) countStreams(tier string, cfg *StreamConfig) int {
|
||||
streams := len(jsa.streams)
|
||||
if tier != _EMPTY_ {
|
||||
streams = 0
|
||||
for _, sa := range jsa.streams {
|
||||
if isSameTier(&sa.cfg, cfg) {
|
||||
streams++
|
||||
}
|
||||
func (jsa *jsAccount) countStreams(tier string, cfg *StreamConfig) (streams int) {
|
||||
for _, sa := range jsa.streams {
|
||||
// Don't count the stream toward the limit if it already exists.
|
||||
if (tier == _EMPTY_ || isSameTier(&sa.cfg, cfg)) && sa.cfg.Name != cfg.Name {
|
||||
streams++
|
||||
}
|
||||
}
|
||||
return streams
|
||||
@ -2256,7 +2259,7 @@ func (js *jetStream) checkBytesLimits(selectedLimits *JetStreamAccountLimits, ad
|
||||
return NewJSMemoryResourcesExceededError()
|
||||
}
|
||||
// Check if this server can handle request.
|
||||
if checkServer && js.memReserved+addBytes > js.config.MaxMemory {
|
||||
if checkServer && js.memReserved+totalBytes > js.config.MaxMemory {
|
||||
return NewJSMemoryResourcesExceededError()
|
||||
}
|
||||
case FileStorage:
|
||||
@ -2265,7 +2268,7 @@ func (js *jetStream) checkBytesLimits(selectedLimits *JetStreamAccountLimits, ad
|
||||
return NewJSStorageResourcesExceededError()
|
||||
}
|
||||
// Check if this server can handle request.
|
||||
if checkServer && js.storeReserved+addBytes > js.config.MaxStore {
|
||||
if checkServer && js.storeReserved+totalBytes > js.config.MaxStore {
|
||||
return NewJSStorageResourcesExceededError()
|
||||
}
|
||||
}
|
||||
@ -2970,3 +2973,14 @@ func fixCfgMirrorWithDedupWindow(cfg *StreamConfig) {
|
||||
cfg.Duplicates = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Server) handleWritePermissionError() {
|
||||
//TODO Check if we should add s.jetStreamOOSPending in condition
|
||||
if s.JetStreamEnabled() {
|
||||
s.Errorf("File system permission denied while writing, disabling JetStream")
|
||||
|
||||
go s.DisableJetStream()
|
||||
|
||||
//TODO Send respective advisory if needed, same as in handleOutOfSpace
|
||||
}
|
||||
}
|
||||
|
75
vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go
generated
vendored
75
vendor/github.com/nats-io/nats-server/v2/server/jetstream_api.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020-2023 The NATS Authors
|
||||
// Copyright 2020-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -765,7 +765,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, sub
|
||||
s, rr := js.srv, js.apiSubs.Match(subject)
|
||||
|
||||
hdr, msg := c.msgParts(rmsg)
|
||||
if len(getHeader(ClientInfoHdr, hdr)) == 0 {
|
||||
if len(sliceHeader(ClientInfoHdr, hdr)) == 0 {
|
||||
// Check if this is the system account. We will let these through for the account info only.
|
||||
sacc := s.SystemAccount()
|
||||
if sacc != acc {
|
||||
@ -836,7 +836,8 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, sub
|
||||
limit := atomic.LoadInt64(&js.queueLimit)
|
||||
if pending >= int(limit) {
|
||||
s.rateLimitFormatWarnf("JetStream API queue limit reached, dropping %d requests", pending)
|
||||
s.jsAPIRoutedReqs.drain()
|
||||
drained := int64(s.jsAPIRoutedReqs.drain())
|
||||
atomic.AddInt64(&js.apiInflight, -drained)
|
||||
|
||||
s.publishAdvisory(nil, JSAdvisoryAPILimitReached, JSAPILimitReachedAdvisory{
|
||||
TypedEvent: TypedEvent{
|
||||
@ -846,7 +847,7 @@ func (js *jetStream) apiDispatch(sub *subscription, c *client, acc *Account, sub
|
||||
},
|
||||
Server: s.Name(),
|
||||
Domain: js.config.Domain,
|
||||
Dropped: int64(pending),
|
||||
Dropped: drained,
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -864,8 +865,10 @@ func (s *Server) processJSAPIRoutedRequests() {
|
||||
for {
|
||||
select {
|
||||
case <-queue.ch:
|
||||
reqs := queue.pop()
|
||||
for _, r := range reqs {
|
||||
// Only pop one item at a time here, otherwise if the system is recovering
|
||||
// from queue buildup, then one worker will pull off all the tasks and the
|
||||
// others will be starved of work.
|
||||
for r, ok := queue.popOne(); ok && r != nil; r, ok = queue.popOne() {
|
||||
client.pa = r.pa
|
||||
start := time.Now()
|
||||
r.jsub.icb(r.sub, client, r.acc, r.subject, r.reply, r.msg)
|
||||
@ -874,7 +877,6 @@ func (s *Server) processJSAPIRoutedRequests() {
|
||||
}
|
||||
atomic.AddInt64(&js.apiInflight, -1)
|
||||
}
|
||||
queue.recycle(&reqs)
|
||||
case <-s.quitCh:
|
||||
return
|
||||
}
|
||||
@ -1006,7 +1008,7 @@ func (s *Server) getRequestInfo(c *client, raw []byte) (pci *ClientInfo, acc *Ac
|
||||
var ci ClientInfo
|
||||
|
||||
if len(hdr) > 0 {
|
||||
if err := json.Unmarshal(getHeader(ClientInfoHdr, hdr), &ci); err != nil {
|
||||
if err := json.Unmarshal(sliceHeader(ClientInfoHdr, hdr), &ci); err != nil {
|
||||
return nil, nil, nil, nil, err
|
||||
}
|
||||
}
|
||||
@ -1871,13 +1873,14 @@ func (s *Server) jsStreamInfoRequest(sub *subscription, c *client, a *Account, s
|
||||
if cc.meta != nil {
|
||||
ourID = cc.meta.ID()
|
||||
}
|
||||
// We have seen cases where rg or rg.node is nil at this point,
|
||||
// so check explicitly on those conditions and bail if that is
|
||||
// the case.
|
||||
bail := rg == nil || rg.node == nil || !rg.isMember(ourID)
|
||||
// We have seen cases where rg is nil at this point,
|
||||
// so check explicitly and bail if that is the case.
|
||||
bail := rg == nil || !rg.isMember(ourID)
|
||||
if !bail {
|
||||
// We know we are a member here, if this group is new and we are preferred allow us to answer.
|
||||
bail = rg.Preferred != ourID || time.Since(rg.node.Created()) > lostQuorumIntervalDefault
|
||||
// Also, we have seen cases where rg.node is nil at this point,
|
||||
// so check explicitly and bail if that is the case.
|
||||
bail = rg.Preferred != ourID || (rg.node != nil && time.Since(rg.node.Created()) > lostQuorumIntervalDefault)
|
||||
}
|
||||
js.mu.RUnlock()
|
||||
if bail {
|
||||
@ -2313,6 +2316,9 @@ func (s *Server) jsLeaderServerRemoveRequest(sub *subscription, c *client, _ *Ac
|
||||
s.Warnf(badAPIRequestT, msg)
|
||||
return
|
||||
}
|
||||
if acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
js, cc := s.getJetStreamCluster()
|
||||
if js == nil || cc == nil || cc.meta == nil {
|
||||
@ -2437,6 +2443,10 @@ func (s *Server) jsLeaderServerStreamMoveRequest(sub *subscription, c *client, _
|
||||
accName := tokenAt(subject, 6)
|
||||
streamName := tokenAt(subject, 7)
|
||||
|
||||
if acc.GetName() != accName && acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
var resp = JSApiStreamUpdateResponse{ApiResponse: ApiResponse{Type: JSApiStreamUpdateResponseType}}
|
||||
|
||||
var req JSApiMetaServerStreamMoveRequest
|
||||
@ -2556,7 +2566,7 @@ func (s *Server) jsLeaderServerStreamMoveRequest(sub *subscription, c *client, _
|
||||
cfg.Placement = origPlacement
|
||||
|
||||
s.Noticef("Requested move for stream '%s > %s' R=%d from %+v to %+v",
|
||||
streamName, accName, cfg.Replicas, s.peerSetToNames(currPeers), s.peerSetToNames(peers))
|
||||
accName, streamName, cfg.Replicas, s.peerSetToNames(currPeers), s.peerSetToNames(peers))
|
||||
|
||||
// We will always have peers and therefore never do a callout, therefore it is safe to call inline
|
||||
s.jsClusteredStreamUpdateRequest(&ciNew, targetAcc.(*Account), subject, reply, rmsg, &cfg, peers)
|
||||
@ -2593,6 +2603,10 @@ func (s *Server) jsLeaderServerStreamCancelMoveRequest(sub *subscription, c *cli
|
||||
accName := tokenAt(subject, 6)
|
||||
streamName := tokenAt(subject, 7)
|
||||
|
||||
if acc.GetName() != accName && acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
targetAcc, ok := s.accounts.Load(accName)
|
||||
if !ok {
|
||||
resp.Error = NewJSNoAccountError()
|
||||
@ -2662,7 +2676,7 @@ func (s *Server) jsLeaderServerStreamCancelMoveRequest(sub *subscription, c *cli
|
||||
}
|
||||
|
||||
s.Noticef("Requested cancel of move: R=%d '%s > %s' to peer set %+v and restore previous peer set %+v",
|
||||
cfg.Replicas, streamName, accName, s.peerSetToNames(currPeers), s.peerSetToNames(peers))
|
||||
cfg.Replicas, accName, streamName, s.peerSetToNames(currPeers), s.peerSetToNames(peers))
|
||||
|
||||
// We will always have peers and therefore never do a callout, therefore it is safe to call inline
|
||||
s.jsClusteredStreamUpdateRequest(&ciNew, targetAcc.(*Account), subject, reply, rmsg, &cfg, peers)
|
||||
@ -2679,6 +2693,9 @@ func (s *Server) jsLeaderAccountPurgeRequest(sub *subscription, c *client, _ *Ac
|
||||
s.Warnf(badAPIRequestT, msg)
|
||||
return
|
||||
}
|
||||
if acc != s.SystemAccount() {
|
||||
return
|
||||
}
|
||||
|
||||
js := s.getJetStream()
|
||||
if js == nil {
|
||||
@ -3416,7 +3433,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC
|
||||
Time: start,
|
||||
},
|
||||
Stream: streamName,
|
||||
Client: ci,
|
||||
Client: ci.forAdvisory(),
|
||||
Domain: domain,
|
||||
})
|
||||
|
||||
@ -3548,7 +3565,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC
|
||||
Start: start,
|
||||
End: end,
|
||||
Bytes: int64(total),
|
||||
Client: ci,
|
||||
Client: ci.forAdvisory(),
|
||||
Domain: domain,
|
||||
})
|
||||
|
||||
@ -3557,7 +3574,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC
|
||||
if err != nil {
|
||||
resp.Error = NewJSStreamRestoreError(err, Unless(err))
|
||||
s.Warnf("Restore failed for %s for stream '%s > %s' in %v",
|
||||
friendlyBytes(int64(total)), streamName, acc.Name, end.Sub(start))
|
||||
friendlyBytes(int64(total)), acc.Name, streamName, end.Sub(start))
|
||||
} else {
|
||||
resp.StreamInfo = &StreamInfo{
|
||||
Created: mset.createdTime(),
|
||||
@ -3566,7 +3583,7 @@ func (s *Server) processStreamRestore(ci *ClientInfo, acc *Account, cfg *StreamC
|
||||
TimeStamp: time.Now().UTC(),
|
||||
}
|
||||
s.Noticef("Completed restore of %s for stream '%s > %s' in %v",
|
||||
friendlyBytes(int64(total)), streamName, acc.Name, end.Sub(start).Round(time.Millisecond))
|
||||
friendlyBytes(int64(total)), acc.Name, streamName, end.Sub(start).Round(time.Millisecond))
|
||||
}
|
||||
|
||||
// On the last EOF, send back the stream info or error status.
|
||||
@ -3681,7 +3698,7 @@ func (s *Server) jsStreamSnapshotRequest(sub *subscription, c *client, _ *Accoun
|
||||
},
|
||||
Stream: mset.name(),
|
||||
State: sr.State,
|
||||
Client: ci,
|
||||
Client: ci.forAdvisory(),
|
||||
Domain: s.getOpts().JetStreamDomain,
|
||||
})
|
||||
|
||||
@ -3699,7 +3716,7 @@ func (s *Server) jsStreamSnapshotRequest(sub *subscription, c *client, _ *Accoun
|
||||
Stream: mset.name(),
|
||||
Start: start,
|
||||
End: end,
|
||||
Client: ci,
|
||||
Client: ci.forAdvisory(),
|
||||
Domain: s.getOpts().JetStreamDomain,
|
||||
})
|
||||
|
||||
@ -4263,9 +4280,17 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account,
|
||||
return
|
||||
}
|
||||
|
||||
js.mu.RLock()
|
||||
meta := cc.meta
|
||||
js.mu.RUnlock()
|
||||
|
||||
// Since these could wait on the Raft group lock, don't do so under the JS lock.
|
||||
ourID := meta.ID()
|
||||
groupLeaderless := meta.Leaderless()
|
||||
groupCreated := meta.Created()
|
||||
|
||||
js.mu.RLock()
|
||||
isLeader, sa, ca := cc.isLeader(), js.streamAssignment(acc.Name, streamName), js.consumerAssignment(acc.Name, streamName, consumerName)
|
||||
ourID := cc.meta.ID()
|
||||
var rg *raftGroup
|
||||
var offline, isMember bool
|
||||
if ca != nil {
|
||||
@ -4279,7 +4304,7 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account,
|
||||
// Also capture if we think there is no meta leader.
|
||||
var isLeaderLess bool
|
||||
if !isLeader {
|
||||
isLeaderLess = cc.meta.GroupLeader() == _EMPTY_ && time.Since(cc.meta.Created()) > lostQuorumIntervalDefault
|
||||
isLeaderLess = groupLeaderless && time.Since(groupCreated) > lostQuorumIntervalDefault
|
||||
}
|
||||
js.mu.RUnlock()
|
||||
|
||||
@ -4366,7 +4391,7 @@ func (s *Server) jsConsumerInfoRequest(sub *subscription, c *client, _ *Account,
|
||||
return
|
||||
}
|
||||
// If we are a member and we have a group leader or we had a previous leader consider bailing out.
|
||||
if node.GroupLeader() != _EMPTY_ || node.HadPreviousLeader() {
|
||||
if !node.Leaderless() || node.HadPreviousLeader() {
|
||||
if leaderNotPartOfGroup {
|
||||
resp.Error = NewJSConsumerOfflineError()
|
||||
s.sendDelayedAPIErrResponse(ci, acc, subject, reply, string(msg), s.jsonResponse(&resp), nil)
|
||||
@ -4489,7 +4514,7 @@ func (s *Server) sendJetStreamAPIAuditAdvisory(ci *ClientInfo, acc *Account, sub
|
||||
Time: time.Now().UTC(),
|
||||
},
|
||||
Server: s.Name(),
|
||||
Client: ci,
|
||||
Client: ci.forAdvisory(),
|
||||
Subject: subject,
|
||||
Request: request,
|
||||
Response: response,
|
||||
|
905
vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go
generated
vendored
905
vendor/github.com/nats-io/nats-server/v2/server/jetstream_cluster.go
generated
vendored
File diff suppressed because it is too large
Load Diff
16
vendor/github.com/nats-io/nats-server/v2/server/jetstream_events.go
generated
vendored
16
vendor/github.com/nats-io/nats-server/v2/server/jetstream_events.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020-2021 The NATS Authors
|
||||
// Copyright 2020-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -18,13 +18,22 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
func (s *Server) publishAdvisory(acc *Account, subject string, adv any) {
|
||||
// publishAdvisory sends the given advisory into the account. Returns true if
|
||||
// it was sent, false if not (i.e. due to lack of interest or a marshal error).
|
||||
func (s *Server) publishAdvisory(acc *Account, subject string, adv any) bool {
|
||||
if acc == nil {
|
||||
acc = s.SystemAccount()
|
||||
if acc == nil {
|
||||
return
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// If there is no one listening for this advisory then save ourselves the effort
|
||||
// and don't bother encoding the JSON or sending it.
|
||||
if sl := acc.sl; (sl != nil && !sl.HasInterest(subject)) && !s.hasGatewayInterest(acc.Name, subject) {
|
||||
return false
|
||||
}
|
||||
|
||||
ej, err := json.Marshal(adv)
|
||||
if err == nil {
|
||||
err = s.sendInternalAccountMsg(acc, subject, ej)
|
||||
@ -34,6 +43,7 @@ func (s *Server) publishAdvisory(acc *Account, subject string, adv any) {
|
||||
} else {
|
||||
s.Warnf("Advisory could not be serialized for account %q: %v", acc.Name, err)
|
||||
}
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// JSAPIAudit is an advisory about administrative actions taken on JetStream
|
||||
|
113
vendor/github.com/nats-io/nats-server/v2/server/leafnode.go
generated
vendored
113
vendor/github.com/nats-io/nats-server/v2/server/leafnode.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2019-2024 The NATS Authors
|
||||
// Copyright 2019-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -774,7 +774,7 @@ func (s *Server) startLeafNodeAcceptLoop() {
|
||||
}
|
||||
|
||||
// RegEx to match a creds file with user JWT and Seed.
|
||||
var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}[^\n]*[-]{3,}\n)(.+)(?:\n\s*[-]{3,}[^\n]*[-]{3,}\n))`)
|
||||
var credsRe = regexp.MustCompile(`\s*(?:(?:[-]{3,}.*[-]{3,}\r?\n)([\w\-.=]+)(?:\r?\n[-]{3,}.*[-]{3,}(\r?\n|\z)))`)
|
||||
|
||||
// clusterName is provided as argument to avoid lock ordering issues with the locked client c
|
||||
// Lock should be held entering here.
|
||||
@ -855,9 +855,18 @@ func (c *client) sendLeafConnect(clusterName string, headers bool) error {
|
||||
pkey, _ := kp.PublicKey()
|
||||
cinfo.Nkey = pkey
|
||||
cinfo.Sig = sig
|
||||
} else if userInfo := c.leaf.remote.curURL.User; userInfo != nil {
|
||||
}
|
||||
// In addition, and this is to allow auth callout, set user/password or
|
||||
// token if applicable.
|
||||
if userInfo := c.leaf.remote.curURL.User; userInfo != nil {
|
||||
// For backward compatibility, if only username is provided, set both
|
||||
// Token and User, not just Token.
|
||||
cinfo.User = userInfo.Username()
|
||||
cinfo.Pass, _ = userInfo.Password()
|
||||
var ok bool
|
||||
cinfo.Pass, ok = userInfo.Password()
|
||||
if !ok {
|
||||
cinfo.Token = cinfo.User
|
||||
}
|
||||
} else if c.leaf.remote.username != _EMPTY_ {
|
||||
cinfo.User = c.leaf.remote.username
|
||||
cinfo.Pass = c.leaf.remote.password
|
||||
@ -988,6 +997,7 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf
|
||||
c.Noticef("Leafnode connection created%s %s", remoteSuffix, c.opts.Name)
|
||||
|
||||
var tlsFirst bool
|
||||
var infoTimeout time.Duration
|
||||
if remote != nil {
|
||||
solicited = true
|
||||
remote.Lock()
|
||||
@ -997,6 +1007,7 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf
|
||||
c.leaf.isSpoke = true
|
||||
}
|
||||
tlsFirst = remote.TLSHandshakeFirst
|
||||
infoTimeout = remote.FirstInfoTimeout
|
||||
remote.Unlock()
|
||||
c.acc = acc
|
||||
} else {
|
||||
@ -1054,7 +1065,7 @@ func (s *Server) createLeafNode(conn net.Conn, rURL *url.URL, remote *leafNodeCf
|
||||
}
|
||||
}
|
||||
// We need to wait for the info, but not for too long.
|
||||
c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
|
||||
c.nc.SetReadDeadline(time.Now().Add(infoTimeout))
|
||||
}
|
||||
|
||||
// We will process the INFO from the readloop and finish by
|
||||
@ -1725,6 +1736,7 @@ type leafConnectInfo struct {
|
||||
Sig string `json:"sig,omitempty"`
|
||||
User string `json:"user,omitempty"`
|
||||
Pass string `json:"pass,omitempty"`
|
||||
Token string `json:"auth_token,omitempty"`
|
||||
ID string `json:"server_id,omitempty"`
|
||||
Domain string `json:"domain,omitempty"`
|
||||
Name string `json:"name,omitempty"`
|
||||
@ -2247,8 +2259,16 @@ func (c *client) sendLeafNodeSubUpdate(key string, n int32) {
|
||||
checkPerms = false
|
||||
}
|
||||
}
|
||||
if checkPerms && !c.canSubscribe(key) {
|
||||
return
|
||||
if checkPerms {
|
||||
var subject string
|
||||
if sep := strings.IndexByte(key, ' '); sep != -1 {
|
||||
subject = key[:sep]
|
||||
} else {
|
||||
subject = key
|
||||
}
|
||||
if !c.canSubscribe(subject) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// If we are here we can send over to the other side.
|
||||
@ -2271,6 +2291,42 @@ func keyFromSub(sub *subscription) string {
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
const (
|
||||
keyRoutedSub = "R"
|
||||
keyRoutedSubByte = 'R'
|
||||
keyRoutedLeafSub = "L"
|
||||
keyRoutedLeafSubByte = 'L'
|
||||
)
|
||||
|
||||
// Helper function to build the key that prevents collisions between normal
|
||||
// routed subscriptions and routed subscriptions on behalf of a leafnode.
|
||||
// Keys will look like this:
|
||||
// "R foo" -> plain routed sub on "foo"
|
||||
// "R foo bar" -> queue routed sub on "foo", queue "bar"
|
||||
// "L foo bar" -> plain routed leaf sub on "foo", leaf "bar"
|
||||
// "L foo bar baz" -> queue routed sub on "foo", queue "bar", leaf "baz"
|
||||
func keyFromSubWithOrigin(sub *subscription) string {
|
||||
var sb strings.Builder
|
||||
sb.Grow(2 + len(sub.origin) + 1 + len(sub.subject) + 1 + len(sub.queue))
|
||||
leaf := len(sub.origin) > 0
|
||||
if leaf {
|
||||
sb.WriteByte(keyRoutedLeafSubByte)
|
||||
} else {
|
||||
sb.WriteByte(keyRoutedSubByte)
|
||||
}
|
||||
sb.WriteByte(' ')
|
||||
sb.Write(sub.subject)
|
||||
if sub.queue != nil {
|
||||
sb.WriteByte(' ')
|
||||
sb.Write(sub.queue)
|
||||
}
|
||||
if leaf {
|
||||
sb.WriteByte(' ')
|
||||
sb.Write(sub.origin)
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// Lock should be held.
|
||||
func (c *client) writeLeafSub(w *bytes.Buffer, key string, n int32) {
|
||||
if key == _EMPTY_ {
|
||||
@ -2321,12 +2377,21 @@ func (c *client) processLeafSub(argo []byte) (err error) {
|
||||
args := splitArg(arg)
|
||||
sub := &subscription{client: c}
|
||||
|
||||
delta := int32(1)
|
||||
switch len(args) {
|
||||
case 1:
|
||||
sub.queue = nil
|
||||
case 3:
|
||||
sub.queue = args[1]
|
||||
sub.qw = int32(parseSize(args[2]))
|
||||
// TODO: (ik) We should have a non empty queue name and a queue
|
||||
// weight >= 1. For 2.11, we may want to return an error if that
|
||||
// is not the case, but for now just overwrite `delta` if queue
|
||||
// weight is greater than 1 (it is possible after a reconnect/
|
||||
// server restart to receive a queue weight > 1 for a new sub).
|
||||
if sub.qw > 1 {
|
||||
delta = sub.qw
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("processLeafSub Parse Error: '%s'", arg)
|
||||
}
|
||||
@ -2390,8 +2455,6 @@ func (c *client) processLeafSub(argo []byte) (err error) {
|
||||
}
|
||||
key := bytesToString(sub.sid)
|
||||
osub := c.subs[key]
|
||||
updateGWs := false
|
||||
delta := int32(1)
|
||||
if osub == nil {
|
||||
c.subs[key] = sub
|
||||
// Now place into the account sl.
|
||||
@ -2402,7 +2465,6 @@ func (c *client) processLeafSub(argo []byte) (err error) {
|
||||
c.sendErr("Invalid Subscription")
|
||||
return nil
|
||||
}
|
||||
updateGWs = srv.gateway.enabled
|
||||
} else if sub.queue != nil {
|
||||
// For a queue we need to update the weight.
|
||||
delta = sub.qw - atomic.LoadInt32(&osub.qw)
|
||||
@ -2425,7 +2487,7 @@ func (c *client) processLeafSub(argo []byte) (err error) {
|
||||
if !spoke {
|
||||
// If we are routing add to the route map for the associated account.
|
||||
srv.updateRouteSubscriptionMap(acc, sub, delta)
|
||||
if updateGWs {
|
||||
if srv.gateway.enabled {
|
||||
srv.gatewayUpdateSubInterest(acc.Name, sub, delta)
|
||||
}
|
||||
}
|
||||
@ -2467,28 +2529,32 @@ func (c *client) processLeafUnsub(arg []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
updateGWs := false
|
||||
spoke := c.isSpokeLeafNode()
|
||||
// We store local subs by account and subject and optionally queue name.
|
||||
// LS- will have the arg exactly as the key.
|
||||
sub, ok := c.subs[string(arg)]
|
||||
if !ok {
|
||||
// If not found, don't try to update routes/gws/leaf nodes.
|
||||
c.mu.Unlock()
|
||||
return nil
|
||||
}
|
||||
delta := int32(1)
|
||||
if len(sub.queue) > 0 {
|
||||
delta = sub.qw
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
if ok {
|
||||
c.unsubscribe(acc, sub, true, true)
|
||||
updateGWs = srv.gateway.enabled
|
||||
}
|
||||
|
||||
c.unsubscribe(acc, sub, true, true)
|
||||
if !spoke {
|
||||
// If we are routing subtract from the route map for the associated account.
|
||||
srv.updateRouteSubscriptionMap(acc, sub, -1)
|
||||
srv.updateRouteSubscriptionMap(acc, sub, -delta)
|
||||
// Gateways
|
||||
if updateGWs {
|
||||
srv.gatewayUpdateSubInterest(acc.Name, sub, -1)
|
||||
if srv.gateway.enabled {
|
||||
srv.gatewayUpdateSubInterest(acc.Name, sub, -delta)
|
||||
}
|
||||
}
|
||||
// Now check on leafnode updates for other leaf nodes.
|
||||
acc.updateLeafNodes(sub, -1)
|
||||
acc.updateLeafNodes(sub, -delta)
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -2717,7 +2783,7 @@ func (c *client) processInboundLeafMsg(msg []byte) {
|
||||
|
||||
// Now deal with gateways
|
||||
if c.srv.gateway.enabled {
|
||||
c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames)
|
||||
c.sendMsgToGateways(acc, msg, c.pa.subject, c.pa.reply, qnames, true)
|
||||
}
|
||||
}
|
||||
|
||||
@ -2833,6 +2899,7 @@ func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remot
|
||||
compress := remote.Websocket.Compression
|
||||
// By default the server will mask outbound frames, but it can be disabled with this option.
|
||||
noMasking := remote.Websocket.NoMasking
|
||||
infoTimeout := remote.FirstInfoTimeout
|
||||
remote.RUnlock()
|
||||
// Will do the client-side TLS handshake if needed.
|
||||
tlsRequired, err := c.leafClientHandshakeIfNeeded(remote, opts)
|
||||
@ -2885,6 +2952,7 @@ func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remot
|
||||
if noMasking {
|
||||
req.Header.Add(wsNoMaskingHeader, wsNoMaskingValue)
|
||||
}
|
||||
c.nc.SetDeadline(time.Now().Add(infoTimeout))
|
||||
if err := req.Write(c.nc); err != nil {
|
||||
return nil, WriteError, err
|
||||
}
|
||||
@ -2892,7 +2960,6 @@ func (c *client) leafNodeSolicitWSConnection(opts *Options, rURL *url.URL, remot
|
||||
var resp *http.Response
|
||||
|
||||
br := bufio.NewReaderSize(c.nc, MAX_CONTROL_LINE_SIZE)
|
||||
c.nc.SetReadDeadline(time.Now().Add(DEFAULT_LEAFNODE_INFO_WAIT))
|
||||
resp, err = http.ReadResponse(br, req)
|
||||
if err == nil &&
|
||||
(resp.StatusCode != 101 ||
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/log.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/log.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2020 The NATS Authors
|
||||
// Copyright 2012-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
324
vendor/github.com/nats-io/nats-server/v2/server/memstore.go
generated
vendored
324
vendor/github.com/nats-io/nats-server/v2/server/memstore.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2019-2024 The NATS Authors
|
||||
// Copyright 2019-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -84,12 +84,15 @@ func (ms *memStore) UpdateConfig(cfg *StreamConfig) error {
|
||||
ms.ageChk = nil
|
||||
}
|
||||
// Make sure to update MaxMsgsPer
|
||||
if cfg.MaxMsgsPer < -1 {
|
||||
cfg.MaxMsgsPer = -1
|
||||
}
|
||||
maxp := ms.maxp
|
||||
ms.maxp = cfg.MaxMsgsPer
|
||||
// If the value is smaller we need to enforce that.
|
||||
if ms.maxp != 0 && ms.maxp < maxp {
|
||||
// If the value is smaller, or was unset before, we need to enforce that.
|
||||
if ms.maxp > 0 && (maxp == 0 || ms.maxp < maxp) {
|
||||
lm := uint64(ms.maxp)
|
||||
ms.fss.Iter(func(subj []byte, ss *SimpleState) bool {
|
||||
ms.fss.IterFast(func(subj []byte, ss *SimpleState) bool {
|
||||
if ss.Msgs > lm {
|
||||
ms.enforcePerSubjectLimit(bytesToString(subj), ss)
|
||||
}
|
||||
@ -140,8 +143,8 @@ func (ms *memStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int
|
||||
return ErrMaxBytes
|
||||
}
|
||||
// If we are here we are at a subject maximum, need to determine if dropping last message gives us enough room.
|
||||
if ss.firstNeedsUpdate {
|
||||
ms.recalculateFirstForSubj(subj, ss.First, ss)
|
||||
if ss.firstNeedsUpdate || ss.lastNeedsUpdate {
|
||||
ms.recalculateForSubj(subj, ss)
|
||||
}
|
||||
sm, ok := ms.msgs[ss.First]
|
||||
if !ok || memStoreMsgSize(sm.subj, sm.hdr, sm.msg) < memStoreMsgSize(subj, hdr, msg) {
|
||||
@ -193,6 +196,7 @@ func (ms *memStore) storeRawMsg(subj string, hdr, msg []byte, seq uint64, ts int
|
||||
if ss != nil {
|
||||
ss.Msgs++
|
||||
ss.Last = seq
|
||||
ss.lastNeedsUpdate = false
|
||||
// Check per subject limits.
|
||||
if ms.maxp > 0 && ss.Msgs > uint64(ms.maxp) {
|
||||
ms.enforcePerSubjectLimit(subj, ss)
|
||||
@ -359,15 +363,13 @@ func (ms *memStore) FilteredState(sseq uint64, subj string) SimpleState {
|
||||
}
|
||||
|
||||
func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubject bool) SimpleState {
|
||||
var ss SimpleState
|
||||
|
||||
if sseq < ms.state.FirstSeq {
|
||||
sseq = ms.state.FirstSeq
|
||||
}
|
||||
|
||||
// If past the end no results.
|
||||
if sseq > ms.state.LastSeq {
|
||||
return ss
|
||||
return SimpleState{}
|
||||
}
|
||||
|
||||
if filter == _EMPTY_ {
|
||||
@ -391,9 +393,10 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje
|
||||
|
||||
_tsa, _fsa := [32]string{}, [32]string{}
|
||||
tsa, fsa := _tsa[:0], _fsa[:0]
|
||||
fsa = tokenizeSubjectIntoSlice(fsa[:0], filter)
|
||||
wc := subjectHasWildcard(filter)
|
||||
|
||||
if wc {
|
||||
fsa = tokenizeSubjectIntoSlice(fsa[:0], filter)
|
||||
}
|
||||
// 1. See if we match any subs from fss.
|
||||
// 2. If we match and the sseq is past ss.Last then we can use meta only.
|
||||
// 3. If we match we need to do a partial, break and clear any totals and do a full scan like num pending.
|
||||
@ -409,6 +412,7 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje
|
||||
return isSubsetMatchTokenized(tsa, fsa)
|
||||
}
|
||||
|
||||
var ss SimpleState
|
||||
update := func(fss *SimpleState) {
|
||||
msgs, first, last := fss.Msgs, fss.First, fss.Last
|
||||
if lastPerSubject {
|
||||
@ -424,10 +428,11 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje
|
||||
}
|
||||
|
||||
var havePartial bool
|
||||
var totalSkipped uint64
|
||||
// We will track start and end sequences as we go.
|
||||
ms.fss.Match(stringToBytes(filter), func(subj []byte, fss *SimpleState) {
|
||||
if fss.firstNeedsUpdate {
|
||||
ms.recalculateFirstForSubj(bytesToString(subj), fss.First, fss)
|
||||
if fss.firstNeedsUpdate || fss.lastNeedsUpdate {
|
||||
ms.recalculateForSubj(bytesToString(subj), fss)
|
||||
}
|
||||
if sseq <= fss.First {
|
||||
update(fss)
|
||||
@ -436,6 +441,8 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje
|
||||
havePartial = true
|
||||
// Don't break here, we will update to keep tracking last.
|
||||
update(fss)
|
||||
} else {
|
||||
totalSkipped += fss.Msgs
|
||||
}
|
||||
})
|
||||
|
||||
@ -492,6 +499,7 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje
|
||||
} else {
|
||||
// We will adjust from the totals above by scanning what we need to exclude.
|
||||
ss.First = first
|
||||
ss.Msgs += totalSkipped
|
||||
var adjust uint64
|
||||
var tss *SimpleState
|
||||
|
||||
@ -563,8 +571,9 @@ func (ms *memStore) filteredStateLocked(sseq uint64, filter string, lastPerSubje
|
||||
|
||||
// SubjectsState returns a map of SimpleState for all matching subjects.
|
||||
func (ms *memStore) SubjectsState(subject string) map[string]SimpleState {
|
||||
ms.mu.RLock()
|
||||
defer ms.mu.RUnlock()
|
||||
// This needs to be a write lock, as we can mutate the per-subject state.
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
|
||||
if ms.fss.Size() == 0 {
|
||||
return nil
|
||||
@ -577,8 +586,8 @@ func (ms *memStore) SubjectsState(subject string) map[string]SimpleState {
|
||||
fss := make(map[string]SimpleState)
|
||||
ms.fss.Match(stringToBytes(subject), func(subj []byte, ss *SimpleState) {
|
||||
subjs := string(subj)
|
||||
if ss.firstNeedsUpdate {
|
||||
ms.recalculateFirstForSubj(subjs, ss.First, ss)
|
||||
if ss.firstNeedsUpdate || ss.lastNeedsUpdate {
|
||||
ms.recalculateForSubj(subjs, ss)
|
||||
}
|
||||
oss := fss[subjs]
|
||||
if oss.First == 0 { // New
|
||||
@ -630,6 +639,154 @@ func (ms *memStore) NumPending(sseq uint64, filter string, lastPerSubject bool)
|
||||
return ss.Msgs, ms.state.LastSeq
|
||||
}
|
||||
|
||||
// NumPending will return the number of pending messages matching any subject in the sublist starting at sequence.
|
||||
func (ms *memStore) NumPendingMulti(sseq uint64, sl *Sublist, lastPerSubject bool) (total, validThrough uint64) {
|
||||
if sl == nil {
|
||||
return ms.NumPending(sseq, fwcs, lastPerSubject)
|
||||
}
|
||||
|
||||
// This needs to be a write lock, as we can mutate the per-subject state.
|
||||
ms.mu.Lock()
|
||||
defer ms.mu.Unlock()
|
||||
|
||||
var ss SimpleState
|
||||
if sseq < ms.state.FirstSeq {
|
||||
sseq = ms.state.FirstSeq
|
||||
}
|
||||
// If past the end no results.
|
||||
if sseq > ms.state.LastSeq {
|
||||
return 0, ms.state.LastSeq
|
||||
}
|
||||
|
||||
update := func(fss *SimpleState) {
|
||||
msgs, first, last := fss.Msgs, fss.First, fss.Last
|
||||
if lastPerSubject {
|
||||
msgs, first = 1, last
|
||||
}
|
||||
ss.Msgs += msgs
|
||||
if ss.First == 0 || first < ss.First {
|
||||
ss.First = first
|
||||
}
|
||||
if last > ss.Last {
|
||||
ss.Last = last
|
||||
}
|
||||
}
|
||||
|
||||
var havePartial bool
|
||||
var totalSkipped uint64
|
||||
// We will track start and end sequences as we go.
|
||||
IntersectStree[SimpleState](ms.fss, sl, func(subj []byte, fss *SimpleState) {
|
||||
if fss.firstNeedsUpdate || fss.lastNeedsUpdate {
|
||||
ms.recalculateForSubj(bytesToString(subj), fss)
|
||||
}
|
||||
if sseq <= fss.First {
|
||||
update(fss)
|
||||
} else if sseq <= fss.Last {
|
||||
// We matched but it is a partial.
|
||||
havePartial = true
|
||||
// Don't break here, we will update to keep tracking last.
|
||||
update(fss)
|
||||
} else {
|
||||
totalSkipped += fss.Msgs
|
||||
}
|
||||
})
|
||||
|
||||
// If we did not encounter any partials we can return here.
|
||||
if !havePartial {
|
||||
return ss.Msgs, ms.state.LastSeq
|
||||
}
|
||||
|
||||
// If we are here we need to scan the msgs.
|
||||
// Capture first and last sequences for scan and then clear what we had.
|
||||
first, last := ss.First, ss.Last
|
||||
// To track if we decide to exclude we need to calculate first.
|
||||
if first < sseq {
|
||||
first = sseq
|
||||
}
|
||||
|
||||
// Now we want to check if it is better to scan inclusive and recalculate that way
|
||||
// or leave and scan exclusive and adjust our totals.
|
||||
// ss.Last is always correct here.
|
||||
toScan, toExclude := last-first, first-ms.state.FirstSeq+ms.state.LastSeq-ss.Last
|
||||
var seen map[string]bool
|
||||
if lastPerSubject {
|
||||
seen = make(map[string]bool)
|
||||
}
|
||||
if toScan < toExclude {
|
||||
ss.Msgs, ss.First = 0, 0
|
||||
|
||||
update := func(sm *StoreMsg) {
|
||||
ss.Msgs++
|
||||
if ss.First == 0 {
|
||||
ss.First = sm.seq
|
||||
}
|
||||
if seen != nil {
|
||||
seen[sm.subj] = true
|
||||
}
|
||||
}
|
||||
// Check if easier to just scan msgs vs the sequence range.
|
||||
// This can happen with lots of interior deletes.
|
||||
if last-first > uint64(len(ms.msgs)) {
|
||||
for _, sm := range ms.msgs {
|
||||
if sm.seq >= first && sm.seq <= last && !seen[sm.subj] && sl.HasInterest(sm.subj) {
|
||||
update(sm)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for seq := first; seq <= last; seq++ {
|
||||
if sm, ok := ms.msgs[seq]; ok && !seen[sm.subj] && sl.HasInterest(sm.subj) {
|
||||
update(sm)
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// We will adjust from the totals above by scanning what we need to exclude.
|
||||
ss.First = first
|
||||
ss.Msgs += totalSkipped
|
||||
var adjust uint64
|
||||
var tss *SimpleState
|
||||
|
||||
update := func(sm *StoreMsg) {
|
||||
if lastPerSubject {
|
||||
tss, _ = ms.fss.Find(stringToBytes(sm.subj))
|
||||
}
|
||||
// If we are last per subject, make sure to only adjust if all messages are before our first.
|
||||
if tss == nil || tss.Last < first {
|
||||
adjust++
|
||||
}
|
||||
if seen != nil {
|
||||
seen[sm.subj] = true
|
||||
}
|
||||
}
|
||||
// Check if easier to just scan msgs vs the sequence range.
|
||||
if first-ms.state.FirstSeq > uint64(len(ms.msgs)) {
|
||||
for _, sm := range ms.msgs {
|
||||
if sm.seq < first && !seen[sm.subj] && sl.HasInterest(sm.subj) {
|
||||
update(sm)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
for seq := ms.state.FirstSeq; seq < first; seq++ {
|
||||
if sm, ok := ms.msgs[seq]; ok && !seen[sm.subj] && sl.HasInterest(sm.subj) {
|
||||
update(sm)
|
||||
}
|
||||
}
|
||||
}
|
||||
// Now do range at end.
|
||||
for seq := last + 1; seq < ms.state.LastSeq; seq++ {
|
||||
if sm, ok := ms.msgs[seq]; ok && !seen[sm.subj] && sl.HasInterest(sm.subj) {
|
||||
adjust++
|
||||
if seen != nil {
|
||||
seen[sm.subj] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
ss.Msgs -= adjust
|
||||
}
|
||||
|
||||
return ss.Msgs, ms.state.LastSeq
|
||||
}
|
||||
|
||||
// Will check the msg limit for this tracked subject.
|
||||
// Lock should be held.
|
||||
func (ms *memStore) enforcePerSubjectLimit(subj string, ss *SimpleState) {
|
||||
@ -637,8 +794,8 @@ func (ms *memStore) enforcePerSubjectLimit(subj string, ss *SimpleState) {
|
||||
return
|
||||
}
|
||||
for nmsgs := ss.Msgs; nmsgs > uint64(ms.maxp); nmsgs = ss.Msgs {
|
||||
if ss.firstNeedsUpdate {
|
||||
ms.recalculateFirstForSubj(subj, ss.First, ss)
|
||||
if ss.firstNeedsUpdate || ss.lastNeedsUpdate {
|
||||
ms.recalculateForSubj(subj, ss)
|
||||
}
|
||||
if !ms.removeMsg(ss.First, false) {
|
||||
break
|
||||
@ -853,8 +1010,11 @@ func (ms *memStore) Compact(seq uint64) (uint64, error) {
|
||||
if sm := ms.msgs[seq]; sm != nil {
|
||||
bytes += memStoreMsgSize(sm.subj, sm.hdr, sm.msg)
|
||||
purged++
|
||||
delete(ms.msgs, seq)
|
||||
ms.removeSeqPerSubject(sm.subj, seq)
|
||||
// Must delete message after updating per-subject info, to be consistent with file store.
|
||||
delete(ms.msgs, seq)
|
||||
} else if !ms.dmap.IsEmpty() {
|
||||
ms.dmap.Delete(seq)
|
||||
}
|
||||
}
|
||||
if purged > ms.state.Msgs {
|
||||
@ -875,7 +1035,10 @@ func (ms *memStore) Compact(seq uint64) (uint64, error) {
|
||||
ms.state.FirstSeq = seq
|
||||
ms.state.FirstTime = time.Time{}
|
||||
ms.state.LastSeq = seq - 1
|
||||
// Reset msgs, fss and dmap.
|
||||
ms.msgs = make(map[uint64]*StoreMsg)
|
||||
ms.fss = stree.NewSubjectTree[SimpleState]()
|
||||
ms.dmap.Empty()
|
||||
}
|
||||
ms.mu.Unlock()
|
||||
|
||||
@ -907,9 +1070,10 @@ func (ms *memStore) reset() error {
|
||||
// Update msgs and bytes.
|
||||
ms.state.Msgs = 0
|
||||
ms.state.Bytes = 0
|
||||
// Reset msgs and fss.
|
||||
// Reset msgs, fss and dmap.
|
||||
ms.msgs = make(map[uint64]*StoreMsg)
|
||||
ms.fss = stree.NewSubjectTree[SimpleState]()
|
||||
ms.dmap.Empty()
|
||||
|
||||
ms.mu.Unlock()
|
||||
|
||||
@ -940,8 +1104,11 @@ func (ms *memStore) Truncate(seq uint64) error {
|
||||
if sm := ms.msgs[i]; sm != nil {
|
||||
purged++
|
||||
bytes += memStoreMsgSize(sm.subj, sm.hdr, sm.msg)
|
||||
delete(ms.msgs, i)
|
||||
ms.removeSeqPerSubject(sm.subj, i)
|
||||
// Must delete message after updating per-subject info, to be consistent with file store.
|
||||
delete(ms.msgs, i)
|
||||
} else if !ms.dmap.IsEmpty() {
|
||||
ms.dmap.Delete(i)
|
||||
}
|
||||
}
|
||||
// Reset last.
|
||||
@ -1107,8 +1274,8 @@ func (ms *memStore) LoadNextMsg(filter string, wc bool, start uint64, smp *Store
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if ss.firstNeedsUpdate {
|
||||
ms.recalculateFirstForSubj(subj, ss.First, ss)
|
||||
if ss.firstNeedsUpdate || ss.lastNeedsUpdate {
|
||||
ms.recalculateForSubj(subj, ss)
|
||||
}
|
||||
if ss.First < fseq {
|
||||
fseq = ss.First
|
||||
@ -1139,6 +1306,33 @@ func (ms *memStore) LoadNextMsg(filter string, wc bool, start uint64, smp *Store
|
||||
return nil, ms.state.LastSeq, ErrStoreEOF
|
||||
}
|
||||
|
||||
// Will load the next non-deleted msg starting at the start sequence and walking backwards.
|
||||
func (ms *memStore) LoadPrevMsg(start uint64, smp *StoreMsg) (sm *StoreMsg, err error) {
|
||||
ms.mu.RLock()
|
||||
defer ms.mu.RUnlock()
|
||||
|
||||
if ms.msgs == nil {
|
||||
return nil, ErrStoreClosed
|
||||
}
|
||||
if ms.state.Msgs == 0 || start < ms.state.FirstSeq {
|
||||
return nil, ErrStoreEOF
|
||||
}
|
||||
if start > ms.state.LastSeq {
|
||||
start = ms.state.LastSeq
|
||||
}
|
||||
|
||||
for seq := start; seq >= ms.state.FirstSeq; seq-- {
|
||||
if sm, ok := ms.msgs[seq]; ok {
|
||||
if smp == nil {
|
||||
smp = new(StoreMsg)
|
||||
}
|
||||
sm.copy(smp)
|
||||
return smp, nil
|
||||
}
|
||||
}
|
||||
return nil, ErrStoreEOF
|
||||
}
|
||||
|
||||
// RemoveMsg will remove the message from this store.
|
||||
// Will return the number of bytes removed.
|
||||
func (ms *memStore) RemoveMsg(seq uint64) (bool, error) {
|
||||
@ -1202,32 +1396,62 @@ func (ms *memStore) removeSeqPerSubject(subj string, seq uint64) {
|
||||
}
|
||||
ss.Msgs--
|
||||
|
||||
// If we know we only have 1 msg left don't need to search for next first.
|
||||
// Only one left
|
||||
if ss.Msgs == 1 {
|
||||
if seq == ss.Last {
|
||||
ss.Last = ss.First
|
||||
} else {
|
||||
if !ss.lastNeedsUpdate && seq != ss.Last {
|
||||
ss.First = ss.Last
|
||||
}
|
||||
ss.firstNeedsUpdate = false
|
||||
} else {
|
||||
ss.firstNeedsUpdate = seq == ss.First || ss.firstNeedsUpdate
|
||||
}
|
||||
}
|
||||
|
||||
// Will recalculate the first sequence for this subject in this block.
|
||||
// Lock should be held.
|
||||
func (ms *memStore) recalculateFirstForSubj(subj string, startSeq uint64, ss *SimpleState) {
|
||||
tseq := startSeq + 1
|
||||
if tseq < ms.state.FirstSeq {
|
||||
tseq = ms.state.FirstSeq
|
||||
}
|
||||
for ; tseq <= ss.Last; tseq++ {
|
||||
if sm := ms.msgs[tseq]; sm != nil && sm.subj == subj {
|
||||
ss.First = tseq
|
||||
ss.firstNeedsUpdate = false
|
||||
return
|
||||
}
|
||||
if !ss.firstNeedsUpdate && seq != ss.First {
|
||||
ss.Last = ss.First
|
||||
ss.lastNeedsUpdate = false
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// We can lazily calculate the first/last sequence when needed.
|
||||
ss.firstNeedsUpdate = seq == ss.First || ss.firstNeedsUpdate
|
||||
ss.lastNeedsUpdate = seq == ss.Last || ss.lastNeedsUpdate
|
||||
}
|
||||
|
||||
// Will recalculate the first and/or last sequence for this subject.
|
||||
// Lock should be held.
|
||||
func (ms *memStore) recalculateForSubj(subj string, ss *SimpleState) {
|
||||
if ss.firstNeedsUpdate {
|
||||
tseq := ss.First + 1
|
||||
if tseq < ms.state.FirstSeq {
|
||||
tseq = ms.state.FirstSeq
|
||||
}
|
||||
for ; tseq <= ss.Last; tseq++ {
|
||||
if sm := ms.msgs[tseq]; sm != nil && sm.subj == subj {
|
||||
ss.First = tseq
|
||||
ss.firstNeedsUpdate = false
|
||||
if ss.Msgs == 1 {
|
||||
ss.Last = tseq
|
||||
ss.lastNeedsUpdate = false
|
||||
return
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if ss.lastNeedsUpdate {
|
||||
tseq := ss.Last - 1
|
||||
if tseq > ms.state.LastSeq {
|
||||
tseq = ms.state.LastSeq
|
||||
}
|
||||
for ; tseq >= ss.First; tseq-- {
|
||||
if sm := ms.msgs[tseq]; sm != nil && sm.subj == subj {
|
||||
ss.Last = tseq
|
||||
ss.lastNeedsUpdate = false
|
||||
if ss.Msgs == 1 {
|
||||
ss.First = tseq
|
||||
ss.firstNeedsUpdate = false
|
||||
}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1242,7 +1466,6 @@ func (ms *memStore) removeMsg(seq uint64, secure bool) bool {
|
||||
|
||||
ss = memStoreMsgSize(sm.subj, sm.hdr, sm.msg)
|
||||
|
||||
delete(ms.msgs, seq)
|
||||
if ms.state.Msgs > 0 {
|
||||
ms.state.Msgs--
|
||||
if ss > ms.state.Bytes {
|
||||
@ -1267,6 +1490,8 @@ func (ms *memStore) removeMsg(seq uint64, secure bool) bool {
|
||||
|
||||
// Remove any per subject tracking.
|
||||
ms.removeSeqPerSubject(sm.subj, seq)
|
||||
// Must delete message after updating per-subject info, to be consistent with file store.
|
||||
delete(ms.msgs, seq)
|
||||
|
||||
if ms.scb != nil {
|
||||
// We do not want to hold any locks here.
|
||||
@ -1488,8 +1713,6 @@ func (o *consumerMemStore) Update(state *ConsumerState) error {
|
||||
pending = make(map[uint64]*Pending, len(state.Pending))
|
||||
for seq, p := range state.Pending {
|
||||
pending[seq] = &Pending{p.Sequence, p.Timestamp}
|
||||
}
|
||||
for seq := range pending {
|
||||
if seq <= state.AckFloor.Stream || seq > state.Delivered.Stream {
|
||||
return fmt.Errorf("bad pending entry, sequence [%d] out of range", seq)
|
||||
}
|
||||
@ -1504,10 +1727,10 @@ func (o *consumerMemStore) Update(state *ConsumerState) error {
|
||||
|
||||
// Replace our state.
|
||||
o.mu.Lock()
|
||||
defer o.mu.Unlock()
|
||||
|
||||
// Check to see if this is an outdated update.
|
||||
if state.Delivered.Consumer < o.state.Delivered.Consumer {
|
||||
o.mu.Unlock()
|
||||
if state.Delivered.Consumer < o.state.Delivered.Consumer || state.AckFloor.Stream < o.state.AckFloor.Stream {
|
||||
return fmt.Errorf("old update ignored")
|
||||
}
|
||||
|
||||
@ -1515,7 +1738,6 @@ func (o *consumerMemStore) Update(state *ConsumerState) error {
|
||||
o.state.AckFloor = state.AckFloor
|
||||
o.state.Pending = pending
|
||||
o.state.Redelivered = redelivered
|
||||
o.mu.Unlock()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
160
vendor/github.com/nats-io/nats-server/v2/server/monitor.go
generated
vendored
160
vendor/github.com/nats-io/nats-server/v2/server/monitor.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2013-2024 The NATS Authors
|
||||
// Copyright 2013-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -831,6 +831,7 @@ func (s *Server) Routez(routezOpts *RoutezOptions) (*Routez, error) {
|
||||
OutBytes: r.outBytes,
|
||||
NumSubs: uint32(len(r.subs)),
|
||||
Import: r.opts.Import,
|
||||
Pending: int(r.out.pb),
|
||||
Export: r.opts.Export,
|
||||
RTT: r.getRTT().String(),
|
||||
Start: r.start,
|
||||
@ -1122,20 +1123,16 @@ func (s *Server) HandleStacksz(w http.ResponseWriter, r *http.Request) {
|
||||
ResponseHandler(w, r, buf[:n])
|
||||
}
|
||||
|
||||
type monitorIPQueue struct {
|
||||
type IpqueueszStatusIPQ struct {
|
||||
Pending int `json:"pending"`
|
||||
InProgress int `json:"in_progress,omitempty"`
|
||||
}
|
||||
|
||||
func (s *Server) HandleIPQueuesz(w http.ResponseWriter, r *http.Request) {
|
||||
all, err := decodeBool(w, r, "all")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
qfilter := r.URL.Query().Get("queues")
|
||||
|
||||
queues := map[string]monitorIPQueue{}
|
||||
type IpqueueszStatus map[string]IpqueueszStatusIPQ
|
||||
|
||||
func (s *Server) Ipqueuesz(opts *IpqueueszOptions) *IpqueueszStatus {
|
||||
all, qfilter := opts.All, opts.Filter
|
||||
queues := IpqueueszStatus{}
|
||||
s.ipQueues.Range(func(k, v any) bool {
|
||||
var pending, inProgress int
|
||||
name := k.(string)
|
||||
@ -1152,9 +1149,23 @@ func (s *Server) HandleIPQueuesz(w http.ResponseWriter, r *http.Request) {
|
||||
} else if qfilter != _EMPTY_ && !strings.Contains(name, qfilter) {
|
||||
return true
|
||||
}
|
||||
queues[name] = monitorIPQueue{Pending: pending, InProgress: inProgress}
|
||||
queues[name] = IpqueueszStatusIPQ{Pending: pending, InProgress: inProgress}
|
||||
return true
|
||||
})
|
||||
return &queues
|
||||
}
|
||||
|
||||
func (s *Server) HandleIPQueuesz(w http.ResponseWriter, r *http.Request) {
|
||||
all, err := decodeBool(w, r, "all")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
qfilter := r.URL.Query().Get("queues")
|
||||
|
||||
queues := s.Ipqueuesz(&IpqueueszOptions{
|
||||
All: all,
|
||||
Filter: qfilter,
|
||||
})
|
||||
|
||||
b, _ := json.MarshalIndent(queues, "", " ")
|
||||
ResponseHandler(w, r, b)
|
||||
@ -1858,6 +1869,14 @@ type GatewayzOptions struct {
|
||||
|
||||
// AccountName will limit the list of accounts to that account name (makes Accounts implicit)
|
||||
AccountName string `json:"account_name"`
|
||||
|
||||
// AccountSubscriptions indicates if subscriptions should be included in the results.
|
||||
// Note: This is used only if `Accounts` or `AccountName` are specified.
|
||||
AccountSubscriptions bool `json:"subscriptions"`
|
||||
|
||||
// AccountSubscriptionsDetail indicates if subscription details should be included in the results.
|
||||
// Note: This is used only if `Accounts` or `AccountName` are specified.
|
||||
AccountSubscriptionsDetail bool `json:"subscriptions_detail"`
|
||||
}
|
||||
|
||||
// Gatewayz represents detailed information on Gateways
|
||||
@ -1880,12 +1899,14 @@ type RemoteGatewayz struct {
|
||||
|
||||
// AccountGatewayz represents interest mode for this account
|
||||
type AccountGatewayz struct {
|
||||
Name string `json:"name"`
|
||||
InterestMode string `json:"interest_mode"`
|
||||
NoInterestCount int `json:"no_interest_count,omitempty"`
|
||||
InterestOnlyThreshold int `json:"interest_only_threshold,omitempty"`
|
||||
TotalSubscriptions int `json:"num_subs,omitempty"`
|
||||
NumQueueSubscriptions int `json:"num_queue_subs,omitempty"`
|
||||
Name string `json:"name"`
|
||||
InterestMode string `json:"interest_mode"`
|
||||
NoInterestCount int `json:"no_interest_count,omitempty"`
|
||||
InterestOnlyThreshold int `json:"interest_only_threshold,omitempty"`
|
||||
TotalSubscriptions int `json:"num_subs,omitempty"`
|
||||
NumQueueSubscriptions int `json:"num_queue_subs,omitempty"`
|
||||
Subs []string `json:"subscriptions_list,omitempty"`
|
||||
SubsDetail []SubDetail `json:"subscriptions_list_detail,omitempty"`
|
||||
}
|
||||
|
||||
// Gatewayz returns a Gatewayz struct containing information about gateways.
|
||||
@ -2011,14 +2032,14 @@ func createOutboundAccountsGatewayz(opts *GatewayzOptions, gw *gateway) []*Accou
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
a := createAccountOutboundGatewayz(accName, ei)
|
||||
a := createAccountOutboundGatewayz(opts, accName, ei)
|
||||
return []*AccountGatewayz{a}
|
||||
}
|
||||
|
||||
accs := make([]*AccountGatewayz, 0, 4)
|
||||
gw.outsim.Range(func(k, v any) bool {
|
||||
name := k.(string)
|
||||
a := createAccountOutboundGatewayz(name, v)
|
||||
a := createAccountOutboundGatewayz(opts, name, v)
|
||||
accs = append(accs, a)
|
||||
return true
|
||||
})
|
||||
@ -2026,7 +2047,7 @@ func createOutboundAccountsGatewayz(opts *GatewayzOptions, gw *gateway) []*Accou
|
||||
}
|
||||
|
||||
// Returns an AccountGatewayz for this gateway outbound connection
|
||||
func createAccountOutboundGatewayz(name string, ei any) *AccountGatewayz {
|
||||
func createAccountOutboundGatewayz(opts *GatewayzOptions, name string, ei any) *AccountGatewayz {
|
||||
a := &AccountGatewayz{
|
||||
Name: name,
|
||||
InterestOnlyThreshold: gatewayMaxRUnsubBeforeSwitch,
|
||||
@ -2038,6 +2059,23 @@ func createAccountOutboundGatewayz(name string, ei any) *AccountGatewayz {
|
||||
a.NoInterestCount = len(e.ni)
|
||||
a.NumQueueSubscriptions = e.qsubs
|
||||
a.TotalSubscriptions = int(e.sl.Count())
|
||||
if opts.AccountSubscriptions || opts.AccountSubscriptionsDetail {
|
||||
var subsa [4096]*subscription
|
||||
subs := subsa[:0]
|
||||
e.sl.All(&subs)
|
||||
if opts.AccountSubscriptions {
|
||||
a.Subs = make([]string, 0, len(subs))
|
||||
} else {
|
||||
a.SubsDetail = make([]SubDetail, 0, len(subs))
|
||||
}
|
||||
for _, sub := range subs {
|
||||
if opts.AccountSubscriptions {
|
||||
a.Subs = append(a.Subs, string(sub.subject))
|
||||
} else {
|
||||
a.SubsDetail = append(a.SubsDetail, newClientSubDetail(sub))
|
||||
}
|
||||
}
|
||||
}
|
||||
e.RUnlock()
|
||||
} else {
|
||||
a.InterestMode = Optimistic.String()
|
||||
@ -2129,6 +2167,10 @@ func (s *Server) HandleGatewayz(w http.ResponseWriter, r *http.Request) {
|
||||
s.httpReqStats[GatewayzPath]++
|
||||
s.mu.Unlock()
|
||||
|
||||
subs, subsDet, err := decodeSubs(w, r)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
accs, err := decodeBool(w, r, "accs")
|
||||
if err != nil {
|
||||
return
|
||||
@ -2140,9 +2182,11 @@ func (s *Server) HandleGatewayz(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
|
||||
opts := &GatewayzOptions{
|
||||
Name: gwName,
|
||||
Accounts: accs,
|
||||
AccountName: accName,
|
||||
Name: gwName,
|
||||
Accounts: accs,
|
||||
AccountName: accName,
|
||||
AccountSubscriptions: subs,
|
||||
AccountSubscriptionsDetail: subsDet,
|
||||
}
|
||||
gw, err := s.Gatewayz(opts)
|
||||
if err != nil {
|
||||
@ -2282,7 +2326,7 @@ type AccountStatz struct {
|
||||
Accounts []*AccountStat `json:"account_statz"`
|
||||
}
|
||||
|
||||
// LeafzOptions are options passed to Leafz
|
||||
// AccountStatzOptions are options passed to account stats requests.
|
||||
type AccountStatzOptions struct {
|
||||
Accounts []string `json:"accounts"`
|
||||
IncludeUnused bool `json:"include_unused"`
|
||||
@ -2760,6 +2804,18 @@ type ProfilezOptions struct {
|
||||
Duration time.Duration `json:"duration,omitempty"`
|
||||
}
|
||||
|
||||
// IpqueueszOptions are options passed to Ipqueuesz
|
||||
type IpqueueszOptions struct {
|
||||
All bool `json:"all"`
|
||||
Filter string `json:"filter"`
|
||||
}
|
||||
|
||||
// RaftzOptions are options passed to Raftz
|
||||
type RaftzOptions struct {
|
||||
AccountFilter string `json:"account"`
|
||||
GroupFilter string `json:"group"`
|
||||
}
|
||||
|
||||
// StreamDetail shows information about the stream state and its consumers.
|
||||
type StreamDetail struct {
|
||||
Name string `json:"name"`
|
||||
@ -3228,10 +3284,11 @@ func (s *Server) HandleHealthz(w http.ResponseWriter, r *http.Request) {
|
||||
Details: includeDetails,
|
||||
})
|
||||
|
||||
code := http.StatusOK
|
||||
code := hs.StatusCode
|
||||
if hs.Error != _EMPTY_ {
|
||||
s.Warnf("Healthcheck failed: %q", hs.Error)
|
||||
code = hs.StatusCode
|
||||
} else if len(hs.Errors) != 0 {
|
||||
s.Warnf("Healthcheck failed: %d errors", len(hs.Errors))
|
||||
}
|
||||
// Remove StatusCode from JSON representation when responding via HTTP
|
||||
// since this is already in the response.
|
||||
@ -3675,27 +3732,27 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus {
|
||||
|
||||
for stream, sa := range asa {
|
||||
// Make sure we can look up
|
||||
if !js.isStreamHealthy(acc, sa) {
|
||||
if err := js.isStreamHealthy(acc, sa); err != nil {
|
||||
if !details {
|
||||
health.Status = na
|
||||
health.Error = fmt.Sprintf("JetStream stream '%s > %s' is not current", accName, stream)
|
||||
health.Error = fmt.Sprintf("JetStream stream '%s > %s' is not current: %s", accName, stream, err)
|
||||
return health
|
||||
}
|
||||
health.Errors = append(health.Errors, HealthzError{
|
||||
Type: HealthzErrorStream,
|
||||
Account: accName,
|
||||
Stream: stream,
|
||||
Error: fmt.Sprintf("JetStream stream '%s > %s' is not current", accName, stream),
|
||||
Error: fmt.Sprintf("JetStream stream '%s > %s' is not current: %s", accName, stream, err),
|
||||
})
|
||||
continue
|
||||
}
|
||||
mset, _ := acc.lookupStream(stream)
|
||||
// Now check consumers.
|
||||
for consumer, ca := range sa.consumers {
|
||||
if !js.isConsumerHealthy(mset, consumer, ca) {
|
||||
if err := js.isConsumerHealthy(mset, consumer, ca); err != nil {
|
||||
if !details {
|
||||
health.Status = na
|
||||
health.Error = fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current", acc, stream, consumer)
|
||||
health.Error = fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current: %s", acc, stream, consumer, err)
|
||||
return health
|
||||
}
|
||||
health.Errors = append(health.Errors, HealthzError{
|
||||
@ -3703,7 +3760,7 @@ func (s *Server) healthz(opts *HealthzOptions) *HealthStatus {
|
||||
Account: accName,
|
||||
Stream: stream,
|
||||
Consumer: consumer,
|
||||
Error: fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current", acc, stream, consumer),
|
||||
Error: fmt.Sprintf("JetStream consumer '%s > %s > %s' is not current: %s", acc, stream, consumer, err),
|
||||
})
|
||||
}
|
||||
}
|
||||
@ -3812,6 +3869,8 @@ type RaftzGroupPeer struct {
|
||||
LastSeen string `json:"last_seen,omitempty"`
|
||||
}
|
||||
|
||||
type RaftzStatus map[string]map[string]RaftzGroup
|
||||
|
||||
func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) {
|
||||
if s.raftNodes == nil {
|
||||
w.WriteHeader(404)
|
||||
@ -3819,20 +3878,34 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) {
|
||||
return
|
||||
}
|
||||
|
||||
gfilter := r.URL.Query().Get("group")
|
||||
afilter := r.URL.Query().Get("acc")
|
||||
groups := s.Raftz(&RaftzOptions{
|
||||
AccountFilter: r.URL.Query().Get("acc"),
|
||||
GroupFilter: r.URL.Query().Get("group"),
|
||||
})
|
||||
|
||||
if groups == nil {
|
||||
w.WriteHeader(404)
|
||||
w.Write([]byte("No Raft nodes returned, check supplied filters"))
|
||||
return
|
||||
}
|
||||
|
||||
b, _ := json.MarshalIndent(groups, "", " ")
|
||||
ResponseHandler(w, r, b)
|
||||
}
|
||||
|
||||
func (s *Server) Raftz(opts *RaftzOptions) *RaftzStatus {
|
||||
afilter, gfilter := opts.AccountFilter, opts.GroupFilter
|
||||
|
||||
if afilter == _EMPTY_ {
|
||||
if sys := s.SystemAccount(); sys != nil {
|
||||
afilter = sys.Name
|
||||
} else {
|
||||
w.WriteHeader(404)
|
||||
w.Write([]byte("System account not found, the server may be shutting down"))
|
||||
return
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
groups := map[string]RaftNode{}
|
||||
infos := map[string]map[string]RaftzGroup{} // account -> group ID
|
||||
infos := RaftzStatus{} // account -> group ID
|
||||
|
||||
s.rnMu.RLock()
|
||||
if gfilter != _EMPTY_ {
|
||||
@ -3858,12 +3931,6 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) {
|
||||
}
|
||||
s.rnMu.RUnlock()
|
||||
|
||||
if len(groups) == 0 {
|
||||
w.WriteHeader(404)
|
||||
w.Write([]byte("No Raft nodes found, does the specified account/group exist?"))
|
||||
return
|
||||
}
|
||||
|
||||
for name, rg := range groups {
|
||||
n, ok := rg.(*raft)
|
||||
if n == nil || !ok {
|
||||
@ -3886,7 +3953,7 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) {
|
||||
Applied: n.applied,
|
||||
CatchingUp: n.catchup != nil,
|
||||
Leader: n.leader,
|
||||
EverHadLeader: n.pleader,
|
||||
EverHadLeader: n.pleader.Load(),
|
||||
Term: n.term,
|
||||
Vote: n.vote,
|
||||
PTerm: n.pterm,
|
||||
@ -3917,6 +3984,5 @@ func (s *Server) HandleRaftz(w http.ResponseWriter, r *http.Request) {
|
||||
infos[n.accName][name] = info
|
||||
}
|
||||
|
||||
b, _ := json.MarshalIndent(infos, "", " ")
|
||||
ResponseHandler(w, r, b)
|
||||
return &infos
|
||||
}
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/monitor_sort_opts.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2013-2018 The NATS Authors
|
||||
// Copyright 2013-2023 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/mqtt.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/mqtt.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020-2023 The NATS Authors
|
||||
// Copyright 2020-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/nkey.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/nkey.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2018 The NATS Authors
|
||||
// Copyright 2018-2023 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/ocsp.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/ocsp.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2021-2023 The NATS Authors
|
||||
// Copyright 2021-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/ocsp_peer.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/ocsp_peer.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 The NATS Authors
|
||||
// Copyright 2023-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/ocsp_responsecache.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/ocsp_responsecache.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2023 The NATS Authors
|
||||
// Copyright 2023-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
88
vendor/github.com/nats-io/nats-server/v2/server/opts.go
generated
vendored
88
vendor/github.com/nats-io/nats-server/v2/server/opts.go
generated
vendored
@ -205,6 +205,11 @@ type RemoteLeafOpts struct {
|
||||
DenyImports []string `json:"-"`
|
||||
DenyExports []string `json:"-"`
|
||||
|
||||
// FirstInfoTimeout is the amount of time the server will wait for the
|
||||
// initial INFO protocol from the remote server before closing the
|
||||
// connection.
|
||||
FirstInfoTimeout time.Duration `json:"-"`
|
||||
|
||||
// Compression options for this remote. Each remote could have a different
|
||||
// setting and also be different from the LeafNode options.
|
||||
Compression CompressionOpts `json:"-"`
|
||||
@ -290,6 +295,7 @@ type Options struct {
|
||||
MaxControlLine int32 `json:"max_control_line"`
|
||||
MaxPayload int32 `json:"max_payload"`
|
||||
MaxPending int64 `json:"max_pending"`
|
||||
NoFastProducerStall bool `json:"-"`
|
||||
Cluster ClusterOpts `json:"cluster,omitempty"`
|
||||
Gateway GatewayOpts `json:"gateway,omitempty"`
|
||||
LeafNode LeafNodeOpts `json:"leaf,omitempty"`
|
||||
@ -657,26 +663,28 @@ type authorization struct {
|
||||
// TLSConfigOpts holds the parsed tls config information,
|
||||
// used with flag parsing
|
||||
type TLSConfigOpts struct {
|
||||
CertFile string
|
||||
KeyFile string
|
||||
CaFile string
|
||||
Verify bool
|
||||
Insecure bool
|
||||
Map bool
|
||||
TLSCheckKnownURLs bool
|
||||
HandshakeFirst bool // Indicate that the TLS handshake should occur first, before sending the INFO protocol.
|
||||
FallbackDelay time.Duration // Where supported, indicates how long to wait for the handshake before falling back to sending the INFO protocol first.
|
||||
Timeout float64
|
||||
RateLimit int64
|
||||
Ciphers []uint16
|
||||
CurvePreferences []tls.CurveID
|
||||
PinnedCerts PinnedCertSet
|
||||
CertStore certstore.StoreType
|
||||
CertMatchBy certstore.MatchByType
|
||||
CertMatch string
|
||||
OCSPPeerConfig *certidp.OCSPPeerConfig
|
||||
Certificates []*TLSCertPairOpt
|
||||
MinVersion uint16
|
||||
CertFile string
|
||||
KeyFile string
|
||||
CaFile string
|
||||
Verify bool
|
||||
Insecure bool
|
||||
Map bool
|
||||
TLSCheckKnownURLs bool
|
||||
HandshakeFirst bool // Indicate that the TLS handshake should occur first, before sending the INFO protocol.
|
||||
FallbackDelay time.Duration // Where supported, indicates how long to wait for the handshake before falling back to sending the INFO protocol first.
|
||||
Timeout float64
|
||||
RateLimit int64
|
||||
Ciphers []uint16
|
||||
CurvePreferences []tls.CurveID
|
||||
PinnedCerts PinnedCertSet
|
||||
CertStore certstore.StoreType
|
||||
CertMatchBy certstore.MatchByType
|
||||
CertMatch string
|
||||
CertMatchSkipInvalid bool
|
||||
CaCertsMatch []string
|
||||
OCSPPeerConfig *certidp.OCSPPeerConfig
|
||||
Certificates []*TLSCertPairOpt
|
||||
MinVersion uint16
|
||||
}
|
||||
|
||||
// TLSCertPairOpt are the paths to a certificate and private key.
|
||||
@ -1568,6 +1576,10 @@ func (o *Options) processConfigFileLine(k string, v any, errors *[]error, warnin
|
||||
*errors = append(*errors, err)
|
||||
return
|
||||
}
|
||||
case "no_fast_producer_stall":
|
||||
o.NoFastProducerStall = v.(bool)
|
||||
case "max_closed_clients":
|
||||
o.MaxClosedClients = int(v.(int64))
|
||||
default:
|
||||
if au := atomic.LoadInt32(&allowUnknownTopLevelField); au == 0 && !tk.IsUsedVariable() {
|
||||
err := &unknownConfigFieldErr{
|
||||
@ -2605,6 +2617,8 @@ func parseRemoteLeafNodes(v any, errors *[]error, warnings *[]error) ([]*RemoteL
|
||||
*errors = append(*errors, err)
|
||||
continue
|
||||
}
|
||||
case "first_info_timeout":
|
||||
remote.FirstInfoTimeout = parseDuration(k, tk, v, errors, warnings)
|
||||
default:
|
||||
if !tk.IsUsedVariable() {
|
||||
err := &unknownConfigFieldErr{
|
||||
@ -4419,6 +4433,28 @@ func parseTLS(v any, isClientCtx bool) (t *TLSConfigOpts, retErr error) {
|
||||
return nil, &configErr{tk, certstore.ErrBadCertMatchField.Error()}
|
||||
}
|
||||
tc.CertMatch = certMatch
|
||||
case "ca_certs_match":
|
||||
rv := []string{}
|
||||
switch mv := mv.(type) {
|
||||
case string:
|
||||
rv = append(rv, mv)
|
||||
case []string:
|
||||
rv = append(rv, mv...)
|
||||
case []interface{}:
|
||||
for _, t := range mv {
|
||||
if token, ok := t.(token); ok {
|
||||
if ts, ok := token.Value().(string); ok {
|
||||
rv = append(rv, ts)
|
||||
continue
|
||||
} else {
|
||||
return nil, &configErr{tk, fmt.Sprintf("error parsing ca_cert_match: unsupported type %T where string is expected", token)}
|
||||
}
|
||||
} else {
|
||||
return nil, &configErr{tk, fmt.Sprintf("error parsing ca_cert_match: unsupported type %T", t)}
|
||||
}
|
||||
}
|
||||
}
|
||||
tc.CaCertsMatch = rv
|
||||
case "handshake_first", "first", "immediate":
|
||||
switch mv := mv.(type) {
|
||||
case bool:
|
||||
@ -4444,6 +4480,12 @@ func parseTLS(v any, isClientCtx bool) (t *TLSConfigOpts, retErr error) {
|
||||
default:
|
||||
return nil, &configErr{tk, fmt.Sprintf("field %q should be a boolean or a string, got %T", mk, mv)}
|
||||
}
|
||||
case "cert_match_skip_invalid":
|
||||
certMatchSkipInvalid, ok := mv.(bool)
|
||||
if !ok {
|
||||
return nil, &configErr{tk, certstore.ErrBadCertMatchSkipInvalidField.Error()}
|
||||
}
|
||||
tc.CertMatchSkipInvalid = certMatchSkipInvalid
|
||||
case "ocsp_peer":
|
||||
switch vv := mv.(type) {
|
||||
case bool:
|
||||
@ -4819,7 +4861,7 @@ func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
|
||||
}
|
||||
config.Certificates = []tls.Certificate{cert}
|
||||
case tc.CertStore != certstore.STOREEMPTY:
|
||||
err := certstore.TLSConfig(tc.CertStore, tc.CertMatchBy, tc.CertMatch, &config)
|
||||
err := certstore.TLSConfig(tc.CertStore, tc.CertMatchBy, tc.CertMatch, tc.CaCertsMatch, tc.CertMatchSkipInvalid, &config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -5163,6 +5205,10 @@ func setBaselineOptions(opts *Options) {
|
||||
c.Mode = CompressionS2Auto
|
||||
}
|
||||
}
|
||||
// Set default first info timeout value if not set.
|
||||
if r.FirstInfoTimeout <= 0 {
|
||||
r.FirstInfoTimeout = DEFAULT_LEAFNODE_INFO_WAIT
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
35
vendor/github.com/nats-io/nats-server/v2/server/parser.go
generated
vendored
35
vendor/github.com/nats-io/nats-server/v2/server/parser.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2012-2020 The NATS Authors
|
||||
// Copyright 2012-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -35,20 +35,21 @@ type parseState struct {
|
||||
}
|
||||
|
||||
type pubArg struct {
|
||||
arg []byte
|
||||
pacache []byte
|
||||
origin []byte
|
||||
account []byte
|
||||
subject []byte
|
||||
deliver []byte
|
||||
mapped []byte
|
||||
reply []byte
|
||||
szb []byte
|
||||
hdb []byte
|
||||
queues [][]byte
|
||||
size int
|
||||
hdr int
|
||||
psi []*serviceImport
|
||||
arg []byte
|
||||
pacache []byte
|
||||
origin []byte
|
||||
account []byte
|
||||
subject []byte
|
||||
deliver []byte
|
||||
mapped []byte
|
||||
reply []byte
|
||||
szb []byte
|
||||
hdb []byte
|
||||
queues [][]byte
|
||||
size int
|
||||
hdr int
|
||||
psi []*serviceImport
|
||||
delivered bool // Only used for service imports
|
||||
}
|
||||
|
||||
// Parser constants
|
||||
@ -500,6 +501,7 @@ func (c *client) parse(buf []byte) error {
|
||||
// Drop all pub args
|
||||
c.pa.arg, c.pa.pacache, c.pa.origin, c.pa.account, c.pa.subject, c.pa.mapped = nil, nil, nil, nil, nil, nil
|
||||
c.pa.reply, c.pa.hdr, c.pa.size, c.pa.szb, c.pa.hdb, c.pa.queues = nil, -1, 0, nil, nil, nil
|
||||
c.pa.delivered = false
|
||||
lmsg = false
|
||||
case OP_A:
|
||||
switch b {
|
||||
@ -788,7 +790,8 @@ func (c *client) parse(buf []byte) error {
|
||||
c.traceInOp("LS-", arg)
|
||||
}
|
||||
}
|
||||
err = c.processRemoteUnsub(arg)
|
||||
leafUnsub := c.op == 'L' || c.op == 'l'
|
||||
err = c.processRemoteUnsub(arg, leafUnsub)
|
||||
case GATEWAY:
|
||||
if trace {
|
||||
c.traceInOp("RS-", arg)
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_freebsd.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_freebsd.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2015-2018 The NATS Authors
|
||||
// Copyright 2015-2021 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_linux.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_linux.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2015-2018 The NATS Authors
|
||||
// Copyright 2015-2022 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_rumprun.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_rumprun.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2015-2018 The NATS Authors
|
||||
// Copyright 2015-2021 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_windows.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/pse/pse_windows.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2015-2018 The NATS Authors
|
||||
// Copyright 2015-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
653
vendor/github.com/nats-io/nats-server/v2/server/raft.go
generated
vendored
653
vendor/github.com/nats-io/nats-server/v2/server/raft.go
generated
vendored
File diff suppressed because it is too large
Load Diff
2
vendor/github.com/nats-io/nats-server/v2/server/rate_counter.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/rate_counter.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2021-2022 The NATS Authors
|
||||
// Copyright 2021-2021 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
30
vendor/github.com/nats-io/nats-server/v2/server/reload.go
generated
vendored
30
vendor/github.com/nats-io/nats-server/v2/server/reload.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2017-2023 The NATS Authors
|
||||
// Copyright 2017-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -916,6 +916,19 @@ func (l *leafNodeOption) Apply(s *Server) {
|
||||
}
|
||||
}
|
||||
|
||||
type noFastProdStallReload struct {
|
||||
noopOption
|
||||
noStall bool
|
||||
}
|
||||
|
||||
func (l *noFastProdStallReload) Apply(s *Server) {
|
||||
var not string
|
||||
if l.noStall {
|
||||
not = "not "
|
||||
}
|
||||
s.Noticef("Reloaded: fast producers will %sbe stalled", not)
|
||||
}
|
||||
|
||||
// Compares options and disconnects clients that are no longer listed in pinned certs. Lock must not be held.
|
||||
func (s *Server) recheckPinnedCerts(curOpts *Options, newOpts *Options) {
|
||||
s.mu.Lock()
|
||||
@ -1623,6 +1636,8 @@ func (s *Server) diffOptions(newOpts *Options) ([]option, error) {
|
||||
if new != old {
|
||||
diffOpts = append(diffOpts, &profBlockRateReload{newValue: new})
|
||||
}
|
||||
case "nofastproducerstall":
|
||||
diffOpts = append(diffOpts, &noFastProdStallReload{noStall: newValue.(bool)})
|
||||
default:
|
||||
// TODO(ik): Implement String() on those options to have a nice print.
|
||||
// %v is difficult to figure what's what, %+v print private fields and
|
||||
@ -2172,15 +2187,22 @@ func (s *Server) reloadClusterPermissions(oldPerms *RoutePermissions) {
|
||||
}
|
||||
deleteRoutedSubs = deleteRoutedSubs[:0]
|
||||
route.mu.Lock()
|
||||
pa, _, hasSubType := route.getRoutedSubKeyInfo()
|
||||
for key, sub := range route.subs {
|
||||
if an := strings.Fields(key)[0]; an != accName {
|
||||
continue
|
||||
// If this is not a pinned-account route, we need to get the
|
||||
// account name from the key to see if we collect this sub.
|
||||
if !pa {
|
||||
if an := getAccNameFromRoutedSubKey(sub, key, hasSubType); an != accName {
|
||||
continue
|
||||
}
|
||||
}
|
||||
// If we can't export, we need to drop the subscriptions that
|
||||
// we have on behalf of this route.
|
||||
// Need to make a string cast here since canExport call sl.Match()
|
||||
subj := string(sub.subject)
|
||||
if !route.canExport(subj) {
|
||||
delete(route.subs, string(sub.sid))
|
||||
// We can use bytesToString() here.
|
||||
delete(route.subs, bytesToString(sub.sid))
|
||||
deleteRoutedSubs = append(deleteRoutedSubs, sub)
|
||||
}
|
||||
}
|
||||
|
2
vendor/github.com/nats-io/nats-server/v2/server/ring.go
generated
vendored
2
vendor/github.com/nats-io/nats-server/v2/server/ring.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2018 The NATS Authors
|
||||
// Copyright 2018-2020 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
|
357
vendor/github.com/nats-io/nats-server/v2/server/route.go
generated
vendored
357
vendor/github.com/nats-io/nats-server/v2/server/route.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2013-2023 The NATS Authors
|
||||
// Copyright 2013-2025 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -74,6 +74,7 @@ type route struct {
|
||||
didSolicit bool
|
||||
retry bool
|
||||
lnoc bool
|
||||
lnocu bool
|
||||
routeType RouteType
|
||||
url *url.URL
|
||||
authRequired bool
|
||||
@ -112,6 +113,7 @@ type connectInfo struct {
|
||||
Cluster string `json:"cluster"`
|
||||
Dynamic bool `json:"cluster_dynamic,omitempty"`
|
||||
LNOC bool `json:"lnoc,omitempty"`
|
||||
LNOCU bool `json:"lnocu,omitempty"` // Support for LS- with origin cluster name
|
||||
Gateway string `json:"gateway,omitempty"`
|
||||
}
|
||||
|
||||
@ -767,6 +769,7 @@ func (c *client) processRouteInfo(info *Info) {
|
||||
c.route.gatewayURL = info.GatewayURL
|
||||
c.route.remoteName = info.Name
|
||||
c.route.lnoc = info.LNOC
|
||||
c.route.lnocu = info.LNOCU
|
||||
c.route.jetstream = info.JetStream
|
||||
|
||||
// When sent through route INFO, if the field is set, it should be of size 1.
|
||||
@ -1169,6 +1172,36 @@ type asubs struct {
|
||||
subs []*subscription
|
||||
}
|
||||
|
||||
// Returns the account name from the subscription's key.
|
||||
// This is invoked knowing that the key contains an account name, so for a sub
|
||||
// that is not from a pinned-account route.
|
||||
// The `keyHasSubType` boolean indicates that the key starts with the indicator
|
||||
// for leaf or regular routed subscriptions.
|
||||
func getAccNameFromRoutedSubKey(sub *subscription, key string, keyHasSubType bool) string {
|
||||
var accIdx int
|
||||
if keyHasSubType {
|
||||
// Start after the sub type indicator.
|
||||
accIdx = 1
|
||||
// But if there is an origin, bump its index.
|
||||
if len(sub.origin) > 0 {
|
||||
accIdx = 2
|
||||
}
|
||||
}
|
||||
return strings.Fields(key)[accIdx]
|
||||
}
|
||||
|
||||
// Returns if the route is dedicated to an account, its name, and a boolean
|
||||
// that indicates if this route uses the routed subscription indicator at
|
||||
// the beginning of the subscription key.
|
||||
// Lock held on entry.
|
||||
func (c *client) getRoutedSubKeyInfo() (bool, string, bool) {
|
||||
var accName string
|
||||
if an := c.route.accName; len(an) > 0 {
|
||||
accName = string(an)
|
||||
}
|
||||
return accName != _EMPTY_, accName, c.route.lnocu
|
||||
}
|
||||
|
||||
// removeRemoteSubs will walk the subs and remove them from the appropriate account.
|
||||
func (c *client) removeRemoteSubs() {
|
||||
// We need to gather these on a per account basis.
|
||||
@ -1178,14 +1211,18 @@ func (c *client) removeRemoteSubs() {
|
||||
srv := c.srv
|
||||
subs := c.subs
|
||||
c.subs = nil
|
||||
pa, accountName, hasSubType := c.getRoutedSubKeyInfo()
|
||||
c.mu.Unlock()
|
||||
|
||||
for key, sub := range subs {
|
||||
c.mu.Lock()
|
||||
sub.max = 0
|
||||
c.mu.Unlock()
|
||||
// Grab the account
|
||||
accountName := strings.Fields(key)[0]
|
||||
// If not a pinned-account route, we need to find the account
|
||||
// name from the sub's key.
|
||||
if !pa {
|
||||
accountName = getAccNameFromRoutedSubKey(sub, key, hasSubType)
|
||||
}
|
||||
ase := as[accountName]
|
||||
if ase == nil {
|
||||
if v, ok := srv.accounts.Load(accountName); ok {
|
||||
@ -1197,10 +1234,14 @@ func (c *client) removeRemoteSubs() {
|
||||
} else {
|
||||
ase.subs = append(ase.subs, sub)
|
||||
}
|
||||
if srv.gateway.enabled {
|
||||
srv.gatewayUpdateSubInterest(accountName, sub, -1)
|
||||
delta := int32(1)
|
||||
if len(sub.queue) > 0 {
|
||||
delta = sub.qw
|
||||
}
|
||||
ase.acc.updateLeafNodes(sub, -1)
|
||||
if srv.gateway.enabled {
|
||||
srv.gatewayUpdateSubInterest(accountName, sub, -delta)
|
||||
}
|
||||
ase.acc.updateLeafNodes(sub, -delta)
|
||||
}
|
||||
|
||||
// Now remove the subs by batch for each account sublist.
|
||||
@ -1217,8 +1258,9 @@ func (c *client) removeRemoteSubs() {
|
||||
// Lock is held on entry
|
||||
func (c *client) removeRemoteSubsForAcc(name string) []*subscription {
|
||||
var subs []*subscription
|
||||
_, _, hasSubType := c.getRoutedSubKeyInfo()
|
||||
for key, sub := range c.subs {
|
||||
an := strings.Fields(key)[0]
|
||||
an := getAccNameFromRoutedSubKey(sub, key, hasSubType)
|
||||
if an == name {
|
||||
sub.max = 0
|
||||
subs = append(subs, sub)
|
||||
@ -1228,46 +1270,69 @@ func (c *client) removeRemoteSubsForAcc(name string) []*subscription {
|
||||
return subs
|
||||
}
|
||||
|
||||
func (c *client) parseUnsubProto(arg []byte) (string, []byte, []byte, error) {
|
||||
func (c *client) parseUnsubProto(arg []byte, accInProto, hasOrigin bool) ([]byte, string, []byte, []byte, error) {
|
||||
// Indicate any activity, so pub and sub or unsubs.
|
||||
c.in.subs++
|
||||
|
||||
args := splitArg(arg)
|
||||
var queue []byte
|
||||
|
||||
var accountName string
|
||||
subjIdx := 1
|
||||
c.mu.Lock()
|
||||
if c.kind == ROUTER && c.route != nil {
|
||||
if accountName = string(c.route.accName); accountName != _EMPTY_ {
|
||||
subjIdx = 0
|
||||
}
|
||||
var (
|
||||
origin []byte
|
||||
accountName string
|
||||
queue []byte
|
||||
subjIdx int
|
||||
)
|
||||
// If `hasOrigin` is true, then it means this is a LS- with origin in proto.
|
||||
if hasOrigin {
|
||||
// We would not be here if there was not at least 1 field.
|
||||
origin = args[0]
|
||||
subjIdx = 1
|
||||
}
|
||||
// If there is an account in the protocol, bump the subject index.
|
||||
if accInProto {
|
||||
subjIdx++
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
switch len(args) {
|
||||
case subjIdx + 1:
|
||||
case subjIdx + 2:
|
||||
queue = args[subjIdx+1]
|
||||
default:
|
||||
return _EMPTY_, nil, nil, fmt.Errorf("parse error: '%s'", arg)
|
||||
return nil, _EMPTY_, nil, nil, fmt.Errorf("parse error: '%s'", arg)
|
||||
}
|
||||
if accountName == _EMPTY_ {
|
||||
accountName = string(args[0])
|
||||
if accInProto {
|
||||
// If there is an account in the protocol, it is before the subject.
|
||||
accountName = string(args[subjIdx-1])
|
||||
}
|
||||
return accountName, args[subjIdx], queue, nil
|
||||
return origin, accountName, args[subjIdx], queue, nil
|
||||
}
|
||||
|
||||
// Indicates no more interest in the given account/subject for the remote side.
|
||||
func (c *client) processRemoteUnsub(arg []byte) (err error) {
|
||||
func (c *client) processRemoteUnsub(arg []byte, leafUnsub bool) (err error) {
|
||||
srv := c.srv
|
||||
if srv == nil {
|
||||
return nil
|
||||
}
|
||||
accountName, subject, _, err := c.parseUnsubProto(arg)
|
||||
|
||||
var accountName string
|
||||
// Assume the account will be in the protocol.
|
||||
accInProto := true
|
||||
|
||||
c.mu.Lock()
|
||||
originSupport := c.route.lnocu
|
||||
if c.route != nil && len(c.route.accName) > 0 {
|
||||
accountName, accInProto = string(c.route.accName), false
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
hasOrigin := leafUnsub && originSupport
|
||||
_, accNameFromProto, subject, _, err := c.parseUnsubProto(arg, accInProto, hasOrigin)
|
||||
if err != nil {
|
||||
return fmt.Errorf("processRemoteUnsub %s", err.Error())
|
||||
}
|
||||
if accInProto {
|
||||
accountName = accNameFromProto
|
||||
}
|
||||
// Lookup the account
|
||||
var acc *Account
|
||||
if v, ok := srv.accounts.Load(accountName); ok {
|
||||
@ -1283,29 +1348,44 @@ func (c *client) processRemoteUnsub(arg []byte) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
updateGWs := false
|
||||
// We store local subs by account and subject and optionally queue name.
|
||||
// RS- will have the arg exactly as the key.
|
||||
_keya := [128]byte{}
|
||||
_key := _keya[:0]
|
||||
|
||||
var key string
|
||||
if c.kind == ROUTER && c.route != nil && len(c.route.accName) > 0 {
|
||||
key = accountName + " " + bytesToString(arg)
|
||||
} else {
|
||||
if !originSupport {
|
||||
// If it is an LS- or RS-, we use the protocol as-is as the key.
|
||||
key = bytesToString(arg)
|
||||
} else {
|
||||
// We need to prefix with the sub type.
|
||||
if leafUnsub {
|
||||
_key = append(_key, keyRoutedLeafSubByte)
|
||||
} else {
|
||||
_key = append(_key, keyRoutedSubByte)
|
||||
}
|
||||
_key = append(_key, ' ')
|
||||
_key = append(_key, arg...)
|
||||
key = bytesToString(_key)
|
||||
}
|
||||
delta := int32(1)
|
||||
sub, ok := c.subs[key]
|
||||
if ok {
|
||||
delete(c.subs, key)
|
||||
acc.sl.Remove(sub)
|
||||
updateGWs = srv.gateway.enabled
|
||||
if len(sub.queue) > 0 {
|
||||
delta = sub.qw
|
||||
}
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
if updateGWs {
|
||||
srv.gatewayUpdateSubInterest(accountName, sub, -1)
|
||||
}
|
||||
// Update gateways and leaf nodes only if the subscription was found.
|
||||
if ok {
|
||||
if srv.gateway.enabled {
|
||||
srv.gatewayUpdateSubInterest(accountName, sub, -delta)
|
||||
}
|
||||
|
||||
// Now check on leafnode updates.
|
||||
acc.updateLeafNodes(sub, -1)
|
||||
// Now check on leafnode updates.
|
||||
acc.updateLeafNodes(sub, -delta)
|
||||
}
|
||||
|
||||
if c.opts.Verbose {
|
||||
c.sendOK()
|
||||
@ -1322,35 +1402,78 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy so we do not reference a potentially large buffer
|
||||
arg := make([]byte, len(argo))
|
||||
copy(arg, argo)
|
||||
|
||||
args := splitArg(arg)
|
||||
sub := &subscription{client: c}
|
||||
|
||||
// This value indicate what is the mandatory subject offset in the args
|
||||
// slice. It varies based on the optional presence of origin or account name
|
||||
// fields (tha latter would not be present for "per-account" routes).
|
||||
var subjIdx int
|
||||
// If account is present, this is its "char" position in arg slice.
|
||||
var accPos int
|
||||
if hasOrigin {
|
||||
// Set to 1, will be adjusted if the account is also expected.
|
||||
subjIdx = 1
|
||||
sub.origin = args[0]
|
||||
// The account would start after the origin and trailing space.
|
||||
accPos = len(sub.origin) + 1
|
||||
}
|
||||
// We copy `argo` to not reference the read buffer. However, we will
|
||||
// prefix with a code that says if the remote sub is for a leaf
|
||||
// (hasOrigin == true) or not to prevent key collisions. Imagine:
|
||||
// "RS+ foo bar baz 1\r\n" => "foo bar baz" (a routed queue sub)
|
||||
// "LS+ foo bar baz\r\n" => "foo bar baz" (a route leaf sub on "baz",
|
||||
// for account "bar" with origin "foo").
|
||||
//
|
||||
// The sub.sid/key will be set respectively to "R foo bar baz" and
|
||||
// "L foo bar baz".
|
||||
//
|
||||
// We also no longer add the account if it was not present (due to
|
||||
// pinned-account route) since there is no need really.
|
||||
//
|
||||
// For routes to older server, we will still create the "arg" with
|
||||
// the above layout, but we will create the sub.sid/key as before,
|
||||
// that is, not including the origin for LS+ because older server
|
||||
// only send LS- without origin, so we would not be able to find
|
||||
// the sub in the map.
|
||||
c.mu.Lock()
|
||||
accountName := string(c.route.accName)
|
||||
oldStyle := !c.route.lnocu
|
||||
c.mu.Unlock()
|
||||
// If the route is dedicated to an account, accountName will not
|
||||
// be empty. If it is, then the account must be in the protocol.
|
||||
var accInProto bool
|
||||
if accountName == _EMPTY_ {
|
||||
|
||||
// Indicate if the account name should be in the protocol. It would be the
|
||||
// case if accountName is empty.
|
||||
accInProto := accountName == _EMPTY_
|
||||
|
||||
// Copy so we do not reference a potentially large buffer.
|
||||
// Add 2 more bytes for the routed sub type.
|
||||
arg := make([]byte, 0, 2+len(argo))
|
||||
if hasOrigin {
|
||||
arg = append(arg, keyRoutedLeafSubByte)
|
||||
} else {
|
||||
arg = append(arg, keyRoutedSubByte)
|
||||
}
|
||||
arg = append(arg, ' ')
|
||||
arg = append(arg, argo...)
|
||||
|
||||
// Now split to get all fields. Unroll splitArgs to avoid runtime/heap issues.
|
||||
a := [MAX_RSUB_ARGS][]byte{}
|
||||
args := a[:0]
|
||||
start := -1
|
||||
for i, b := range arg {
|
||||
switch b {
|
||||
case ' ', '\t', '\r', '\n':
|
||||
if start >= 0 {
|
||||
args = append(args, arg[start:i])
|
||||
start = -1
|
||||
}
|
||||
default:
|
||||
if start < 0 {
|
||||
start = i
|
||||
}
|
||||
}
|
||||
}
|
||||
if start >= 0 {
|
||||
args = append(args, arg[start:])
|
||||
}
|
||||
|
||||
delta := int32(1)
|
||||
sub := &subscription{client: c}
|
||||
|
||||
// There will always be at least a subject, but its location will depend
|
||||
// on if there is an origin, an account name, etc.. Since we know that
|
||||
// we have added the sub type indicator as the first field, the subject
|
||||
// position will be at minimum at index 1.
|
||||
subjIdx := 1
|
||||
if hasOrigin {
|
||||
subjIdx++
|
||||
}
|
||||
if accInProto {
|
||||
subjIdx++
|
||||
accInProto = true
|
||||
}
|
||||
switch len(args) {
|
||||
case subjIdx + 1:
|
||||
@ -1358,15 +1481,50 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) {
|
||||
case subjIdx + 3:
|
||||
sub.queue = args[subjIdx+1]
|
||||
sub.qw = int32(parseSize(args[subjIdx+2]))
|
||||
// TODO: (ik) We should have a non empty queue name and a queue
|
||||
// weight >= 1. For 2.11, we may want to return an error if that
|
||||
// is not the case, but for now just overwrite `delta` if queue
|
||||
// weight is greater than 1 (it is possible after a reconnect/
|
||||
// server restart to receive a queue weight > 1 for a new sub).
|
||||
if sub.qw > 1 {
|
||||
delta = sub.qw
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("processRemoteSub Parse Error: '%s'", arg)
|
||||
}
|
||||
// We know that the number of fields is correct. So we can access args[] based
|
||||
// on where we expect the fields to be.
|
||||
|
||||
// If there is an origin, it will be at index 1.
|
||||
if hasOrigin {
|
||||
sub.origin = args[1]
|
||||
}
|
||||
// For subject, use subjIdx.
|
||||
sub.subject = args[subjIdx]
|
||||
// If the account name is empty (not a "per-account" route), the account
|
||||
// is at the index prior to the subject.
|
||||
if accountName == _EMPTY_ {
|
||||
// If the account name is in the protocol, it will be before the subject.
|
||||
if accInProto {
|
||||
accountName = bytesToString(args[subjIdx-1])
|
||||
}
|
||||
// Now set the sub.sid from the arg slice. However, we will have a different
|
||||
// one if we use the origin or not.
|
||||
start = 0
|
||||
end := len(arg)
|
||||
if sub.queue != nil {
|
||||
// Remove the ' <weight>' from the arg length.
|
||||
end -= 1 + len(args[subjIdx+2])
|
||||
}
|
||||
if oldStyle {
|
||||
// We will start at the account (if present) or at the subject.
|
||||
// We first skip the "R " or "L "
|
||||
start = 2
|
||||
// And if there is an origin skip that.
|
||||
if hasOrigin {
|
||||
start += len(sub.origin) + 1
|
||||
}
|
||||
// Here we are pointing at the account (if present), or at the subject.
|
||||
}
|
||||
sub.sid = arg[start:end]
|
||||
|
||||
// Lookup account while avoiding fetch.
|
||||
// A slow fetch delays subsequent remote messages. It also avoids the expired check (see below).
|
||||
// With all but memory resolver lookup can be delayed or fail.
|
||||
@ -1424,33 +1582,6 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) {
|
||||
return nil
|
||||
}
|
||||
|
||||
// We store local subs by account and subject and optionally queue name.
|
||||
// If we have a queue it will have a trailing weight which we do not want.
|
||||
if sub.queue != nil {
|
||||
// if the account is in the protocol, we can reference directly "arg",
|
||||
// otherwise, we need to allocate/construct the sid.
|
||||
if accInProto {
|
||||
sub.sid = arg[accPos : accPos+len(accountName)+1+len(sub.subject)+1+len(sub.queue)]
|
||||
} else {
|
||||
// It is unfortunate that we have to do this, but the gain of not
|
||||
// having the account name in message protocols outweight the
|
||||
// penalty of having to do this here for the processing of a
|
||||
// subscription.
|
||||
sub.sid = append(sub.sid, accountName...)
|
||||
sub.sid = append(sub.sid, ' ')
|
||||
sub.sid = append(sub.sid, sub.subject...)
|
||||
sub.sid = append(sub.sid, ' ')
|
||||
sub.sid = append(sub.sid, sub.queue...)
|
||||
}
|
||||
} else if accInProto {
|
||||
sub.sid = arg[accPos:]
|
||||
} else {
|
||||
sub.sid = append(sub.sid, accountName...)
|
||||
sub.sid = append(sub.sid, ' ')
|
||||
sub.sid = append(sub.sid, sub.subject...)
|
||||
}
|
||||
key := bytesToString(sub.sid)
|
||||
|
||||
acc.mu.RLock()
|
||||
// For routes (this can be called by leafnodes), check if the account is
|
||||
// transitioning (from pool to dedicated route) and this route is not a
|
||||
@ -1465,9 +1596,10 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) {
|
||||
}
|
||||
sl := acc.sl
|
||||
acc.mu.RUnlock()
|
||||
|
||||
// We use the sub.sid for the key of the c.subs map.
|
||||
key := bytesToString(sub.sid)
|
||||
osub := c.subs[key]
|
||||
updateGWs := false
|
||||
delta := int32(1)
|
||||
if osub == nil {
|
||||
c.subs[key] = sub
|
||||
// Now place into the account sl.
|
||||
@ -1478,7 +1610,6 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) {
|
||||
c.sendErr("Invalid Subscription")
|
||||
return nil
|
||||
}
|
||||
updateGWs = srv.gateway.enabled
|
||||
} else if sub.queue != nil {
|
||||
// For a queue we need to update the weight.
|
||||
delta = sub.qw - atomic.LoadInt32(&osub.qw)
|
||||
@ -1487,7 +1618,7 @@ func (c *client) processRemoteSub(argo []byte, hasOrigin bool) (err error) {
|
||||
}
|
||||
c.mu.Unlock()
|
||||
|
||||
if updateGWs {
|
||||
if srv.gateway.enabled {
|
||||
srv.gatewayUpdateSubInterest(acc.Name, sub, delta)
|
||||
}
|
||||
|
||||
@ -1509,10 +1640,14 @@ func (c *client) addRouteSubOrUnsubProtoToBuf(buf []byte, accName string, sub *s
|
||||
if isSubProto {
|
||||
buf = append(buf, lSubBytes...)
|
||||
buf = append(buf, sub.origin...)
|
||||
buf = append(buf, ' ')
|
||||
} else {
|
||||
buf = append(buf, lUnsubBytes...)
|
||||
if c.route.lnocu {
|
||||
buf = append(buf, sub.origin...)
|
||||
buf = append(buf, ' ')
|
||||
}
|
||||
}
|
||||
buf = append(buf, ' ')
|
||||
} else {
|
||||
if isSubProto {
|
||||
buf = append(buf, rSubBytes...)
|
||||
@ -1613,18 +1748,27 @@ func (s *Server) sendSubsToRoute(route *client, idx int, account string) {
|
||||
for _, a := range accs {
|
||||
a.mu.RLock()
|
||||
for key, n := range a.rm {
|
||||
var subj, qn []byte
|
||||
s := strings.Split(key, " ")
|
||||
subj = []byte(s[0])
|
||||
if len(s) > 1 {
|
||||
qn = []byte(s[1])
|
||||
var origin, qn []byte
|
||||
s := strings.Fields(key)
|
||||
// Subject will always be the second field (index 1).
|
||||
subj := stringToBytes(s[1])
|
||||
// Check if the key is for a leaf (will be field 0).
|
||||
forLeaf := s[0] == keyRoutedLeafSub
|
||||
// For queue, if not for a leaf, we need 3 fields "R foo bar",
|
||||
// but if for a leaf, we need 4 fields "L foo bar leaf_origin".
|
||||
if l := len(s); (!forLeaf && l == 3) || (forLeaf && l == 4) {
|
||||
qn = stringToBytes(s[2])
|
||||
}
|
||||
// s[0] is the subject and already as a string, so use that
|
||||
if forLeaf {
|
||||
// The leaf origin will be the last field.
|
||||
origin = stringToBytes(s[len(s)-1])
|
||||
}
|
||||
// s[1] is the subject and already as a string, so use that
|
||||
// instead of converting back `subj` to a string.
|
||||
if !route.canImport(s[0]) {
|
||||
if !route.canImport(s[1]) {
|
||||
continue
|
||||
}
|
||||
sub := subscription{subject: subj, queue: qn, qw: n}
|
||||
sub := subscription{origin: origin, subject: subj, queue: qn, qw: n}
|
||||
buf = route.addRouteSubOrUnsubProtoToBuf(buf, a.Name, &sub, true)
|
||||
}
|
||||
a.mu.RUnlock()
|
||||
@ -2286,8 +2430,9 @@ func (s *Server) updateRouteSubscriptionMap(acc *Account, sub *subscription, del
|
||||
return
|
||||
}
|
||||
|
||||
// Create the fast key which will use the subject or 'subject<spc>queue' for queue subscribers.
|
||||
key := keyFromSub(sub)
|
||||
// Create the subscription key which will prevent collisions between regular
|
||||
// and leaf routed subscriptions. See keyFromSubWithOrigin() for details.
|
||||
key := keyFromSubWithOrigin(sub)
|
||||
|
||||
// Decide whether we need to send an update out to all the routes.
|
||||
update := isq
|
||||
@ -2481,6 +2626,7 @@ func (s *Server) startRouteAcceptLoop() {
|
||||
Domain: s.info.Domain,
|
||||
Dynamic: s.isClusterNameDynamic(),
|
||||
LNOC: true,
|
||||
LNOCU: true,
|
||||
}
|
||||
// For tests that want to simulate old servers, do not set the compression
|
||||
// on the INFO protocol if configured with CompressionNotSupported.
|
||||
@ -2795,6 +2941,7 @@ func (c *client) processRouteConnect(srv *Server, arg []byte, lang string) error
|
||||
c.mu.Lock()
|
||||
c.route.remoteID = c.opts.Name
|
||||
c.route.lnoc = proto.LNOC
|
||||
c.route.lnocu = proto.LNOCU
|
||||
c.setRoutePermissions(perms)
|
||||
c.headers = supportsHeaders && proto.Headers
|
||||
c.mu.Unlock()
|
||||
|
25
vendor/github.com/nats-io/nats-server/v2/server/sendq.go
generated
vendored
25
vendor/github.com/nats-io/nats-server/v2/server/sendq.go
generated
vendored
@ -1,4 +1,4 @@
|
||||
// Copyright 2020-2023 The NATS Authors
|
||||
// Copyright 2020-2024 The NATS Authors
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
@ -56,6 +56,8 @@ func (sq *sendq) internalLoop() {
|
||||
rply [256]byte
|
||||
szb [10]byte
|
||||
hdb [10]byte
|
||||
_msg [4096]byte
|
||||
msg = _msg[:0]
|
||||
)
|
||||
|
||||
for s.isRunning() {
|
||||
@ -73,16 +75,18 @@ func (sq *sendq) internalLoop() {
|
||||
} else {
|
||||
c.pa.reply = nil
|
||||
}
|
||||
var msg []byte
|
||||
msg = msg[:0]
|
||||
if len(pm.hdr) > 0 {
|
||||
c.pa.hdr = len(pm.hdr)
|
||||
c.pa.hdb = append(hdb[:0], strconv.Itoa(c.pa.hdr)...)
|
||||
msg = append(pm.hdr, pm.msg...)
|
||||
msg = append(msg, pm.hdr...)
|
||||
msg = append(msg, pm.msg...)
|
||||
msg = append(msg, _CRLF_...)
|
||||
} else {
|
||||
c.pa.hdr = -1
|
||||
c.pa.hdb = nil
|
||||
msg = append(pm.msg, _CRLF_...)
|
||||
msg = append(msg, pm.msg...)
|
||||
msg = append(msg, _CRLF_...)
|
||||
}
|
||||
c.processInboundClientMsg(msg)
|
||||
c.pa.szb = nil
|
||||
@ -107,16 +111,7 @@ func (sq *sendq) send(subj, rply string, hdr, msg []byte) {
|
||||
}
|
||||
out := outMsgPool.Get().(*outMsg)
|
||||
out.subj, out.rply = subj, rply
|
||||
out.hdr, out.msg = nil, nil
|
||||
|
||||
// We will copy these for now.
|
||||
if len(hdr) > 0 {
|
||||
hdr = copyBytes(hdr)
|
||||
out.hdr = hdr
|
||||
}
|
||||
if len(msg) > 0 {
|
||||
msg = copyBytes(msg)
|
||||
out.msg = msg
|
||||
}
|
||||
out.hdr = append(out.hdr[:0], hdr...)
|
||||
out.msg = append(out.msg[:0], msg...)
|
||||
sq.q.push(out)
|
||||
}
|
||||
|
15
vendor/github.com/nats-io/nats-server/v2/server/server.go
generated
vendored
15
vendor/github.com/nats-io/nats-server/v2/server/server.go
generated
vendored
@ -94,6 +94,7 @@ type Info struct {
|
||||
Import *SubjectPermission `json:"import,omitempty"`
|
||||
Export *SubjectPermission `json:"export,omitempty"`
|
||||
LNOC bool `json:"lnoc,omitempty"`
|
||||
LNOCU bool `json:"lnocu,omitempty"`
|
||||
InfoOnConnect bool `json:"info_on_connect,omitempty"` // When true the server will respond to CONNECT with an INFO
|
||||
ConnectInfo bool `json:"connect_info,omitempty"` // When true this is the server INFO response to CONNECT
|
||||
RoutePoolSize int `json:"route_pool_size,omitempty"`
|
||||
@ -140,8 +141,10 @@ type Server struct {
|
||||
listenerErr error
|
||||
gacc *Account
|
||||
sys *internal
|
||||
sysAcc atomic.Pointer[Account]
|
||||
js atomic.Pointer[jetStream]
|
||||
isMetaLeader atomic.Bool
|
||||
jsClustered atomic.Bool
|
||||
accounts sync.Map
|
||||
tmpAccounts sync.Map // Temporarily stores accounts that are being built
|
||||
activeAccounts int32
|
||||
@ -1280,6 +1283,7 @@ func (s *Server) configureAccounts(reloading bool) (map[string]struct{}, error)
|
||||
if err == nil && s.sys != nil && acc != s.sys.account {
|
||||
// sys.account.clients (including internal client)/respmap/etc... are transferred separately
|
||||
s.sys.account = acc
|
||||
s.sysAcc.Store(acc)
|
||||
}
|
||||
if err != nil {
|
||||
return awcsti, fmt.Errorf("error resolving system account: %v", err)
|
||||
@ -1635,13 +1639,7 @@ func (s *Server) SetSystemAccount(accName string) error {
|
||||
|
||||
// SystemAccount returns the system account if set.
|
||||
func (s *Server) SystemAccount() *Account {
|
||||
var sacc *Account
|
||||
s.mu.RLock()
|
||||
if s.sys != nil {
|
||||
sacc = s.sys.account
|
||||
}
|
||||
s.mu.RUnlock()
|
||||
return sacc
|
||||
return s.sysAcc.Load()
|
||||
}
|
||||
|
||||
// GlobalAccount returns the global account.
|
||||
@ -1713,6 +1711,9 @@ func (s *Server) setSystemAccount(acc *Account) error {
|
||||
s.sys.wg.Add(1)
|
||||
s.mu.Unlock()
|
||||
|
||||
// Store in atomic for fast lookup.
|
||||
s.sysAcc.Store(acc)
|
||||
|
||||
// Register with the account.
|
||||
s.sys.client.registerWithAccount(acc)
|
||||
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user