1
0
mirror of https://github.com/cs3org/reva.git synced 2025-04-18 13:44:12 +03:00

Refactor of CI (#4797)

This commit is contained in:
Hugo Labrador 2024-09-09 18:52:20 +02:00 committed by GitHub
parent 4e6412ffb8
commit 5114d3326c
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
50 changed files with 385 additions and 3887 deletions

29
.github/workflows/test-docker.yml vendored Normal file
View File

@ -0,0 +1,29 @@
name: Test
on:
pull_request:
push:
branches:
- "*"
workflow_dispatch:
jobs:
docker:
strategy:
fail-fast: false
matrix:
file: [docker/Dockerfile.reva, docker/Dockerfile.revad-eos, docker/Dockerfile.revad-ceph]
uses: ./.github/workflows/docker.yml
with:
file: ${{ matrix.file }}
docker-revad:
name: docker (docker/Dockerfile.revad)
uses: ./.github/workflows/docker.yml
with:
file: docker/Dockerfile.revad
load: true
docker-revad-eos:
name: docker (docker/Dockerfile.revad-eos)
uses: ./.github/workflows/docker.yml
with:
file: docker/Dockerfile.revad-eos
load: false

37
.github/workflows/test-integration.yml vendored Normal file
View File

@ -0,0 +1,37 @@
name: Test
on:
pull_request:
push:
branches:
- "*"
workflow_dispatch:
jobs:
integration:
runs-on: self-hosted
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go environment
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache: false
- name: Test integration
run: make test-integration
env:
REDIS_ADDRESS: redis:6379
SQL_USERNAME: root
SQL_PASSWORD: my-secret-pw
SQL_ADDRESS: localhost:3306
SQL_DBNAME: reva
services:
redis:
image: registry.cern.ch/docker.io/webhippie/redis
mysql:
image: mysql
ports:
- 3306:3306
env:
MYSQL_ROOT_PASSWORD: my-secret-pw
MYSQL_DATABASE: reva

19
.github/workflows/test-litmus.yml vendored Normal file
View File

@ -0,0 +1,19 @@
name: Test
on:
pull_request:
push:
branches:
- "*"
workflow_dispatch:
jobs:
litmus:
needs:
- docker-revad-eos
strategy:
fail-fast: false
matrix:
test: [litmus-1, litmus-2]
uses: ./.github/workflows/compose.yml
with:
test: ${{ matrix.test }}

30
.github/workflows/test-unit.yml vendored Normal file
View File

@ -0,0 +1,30 @@
name: Test
on:
pull_request:
push:
branches:
- "*"
workflow_dispatch:
jobs:
unit-tests:
runs-on: self-hosted
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go environment
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache: false
- name: Test
run: make test-go
env:
COVER_PROFILE: ${{ github.event_name == 'push' && 'coverage.out' || '' }}
- name: Run codacy-coverage-reporter
uses: codacy/codacy-coverage-reporter-action@v1
if: github.event_name == 'push'
with:
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
coverage-reports: coverage.out
force-coverage-parser: go

View File

@ -1,108 +0,0 @@
name: Test
on:
pull_request:
push:
branches:
- "*"
workflow_dispatch:
jobs:
integration:
runs-on: self-hosted
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go environment
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache: false
- name: Test integration
run: make test-integration
env:
REDIS_ADDRESS: redis:6379
SQL_USERNAME: root
SQL_PASSWORD: my-secret-pw
SQL_ADDRESS: localhost:3306
SQL_DBNAME: reva
services:
redis:
image: registry.cern.ch/docker.io/webhippie/redis
mysql:
image: mysql
ports:
- 3306:3306
env:
MYSQL_ROOT_PASSWORD: my-secret-pw
MYSQL_DATABASE: reva
go:
runs-on: self-hosted
steps:
- name: Checkout
uses: actions/checkout@v4
- name: Setup Go environment
uses: actions/setup-go@v4
with:
go-version-file: go.mod
cache: false
- name: Test
run: make test-go
env:
COVER_PROFILE: ${{ github.event_name == 'push' && 'coverage.out' || '' }}
- name: Run codacy-coverage-reporter
uses: codacy/codacy-coverage-reporter-action@v1
if: github.event_name == 'push'
with:
project-token: ${{ secrets.CODACY_PROJECT_TOKEN }}
coverage-reports: coverage.out
force-coverage-parser: go
docker:
strategy:
fail-fast: false
matrix:
file: [docker/Dockerfile.reva, docker/Dockerfile.revad-eos, docker/Dockerfile.revad-ceph]
uses: ./.github/workflows/docker.yml
with:
file: ${{ matrix.file }}
docker-revad:
name: docker (docker/Dockerfile.revad)
uses: ./.github/workflows/docker.yml
with:
file: docker/Dockerfile.revad
load: true
docker-revad-eos:
name: docker (docker/Dockerfile.revad-eos)
uses: ./.github/workflows/docker.yml
with:
file: docker/Dockerfile.revad-eos
load: false
litmus:
needs:
- docker-revad-eos
strategy:
fail-fast: false
matrix:
test: [litmus-1, litmus-2]
uses: ./.github/workflows/compose.yml
with:
test: ${{ matrix.test }}
acceptance-1:
needs:
- docker-revad-eos
uses: ./.github/workflows/compose.yml
with:
test: acceptance-1
submodules: true
acceptance-2:
needs:
- docker-revad-eos
strategy:
fail-fast: false
matrix:
part: [1, 2, 3, 4, 5, 6]
uses: ./.github/workflows/compose.yml
with:
test: acceptance-2
submodules: true
parts: 6
part: ${{ matrix.part }}

View File

@ -2,74 +2,9 @@ run:
timeout: 20m
linters:
enable-all: true
disable:
- exhaustive # TODO: consider enabling the 'exhaustive' linter to check the exhaustiveness of enum switch statements and map literals.
- wrapcheck # TODO: consider enabling the 'wrapcheck' linter to check that errors from external packages are wrapped during return to help identify the error source during debugging.
- cyclop # TODO: consider enabling the 'cyclop' linter to calculate the cyclomatic complexities of functions/packages.
- varnamelen # TODO: consider enabling the 'varnamelen' linter to check that the length of a variable's name matches its usage scope.
- testpackage # TODO: consider enabling the 'testpackage' linter to make sure that separate _test packages are used.
- gosec # TODO: consider enabling the 'gosec' linter to inspect source code for security problems.
- tagliatelle # TODO: consider enabling the 'tagliatelle' linter to check the struct tags.
- thelper # TODO: consider enabling the 'thelper' linter to detect golang test helpers without t.Helper() call and check the consistency of test helpers.
- predeclared # TODO: consider enabling the 'predeclared' linter to find code that shadows one of Go's predeclared identifiers.
- paralleltest # TODO: consider enabling the 'paralleltest' linter to detect missing usage of t.Parallel() method in Go test.
- ireturn # TODO: consider enabling the 'ireturn' linter to accept interfaces and return concrete types.
- nosprintfhostport # TODO: consider enabling the 'nosprintfhostport' linter to check for misuse of Sprintf to construct a host with port in a URL.
- nonamedreturns # TODO: consider enabling the 'nonamedreturns' linter to reports all named returns.
- gomnd # TODO: consider enabling the 'gomnd' linter to detect magic numbers.
- noctx # TODO: consider enabling the 'noctx' linter to find sending http request without context.Context.
- nlreturn # TODO: consider enabling the 'nlreturn' linter to check for a new line before return and branch statements to increase code clarity.
- nilnil # TODO: consider enabling the 'nilnil' linter to check that there is no simultaneous return of nil error and an invalid value.
- nilerr # TODO: consider enabling the 'nilerr' linter to find the code that returns nil even if it checks that the error is not nil.
- interfacebloat # TODO: consider enabling the 'interfacebloat' linter to check the number of methods inside an interface.
- goerr113 # TODO: consider enabling the 'goerr113' linter to check the errors handling expressions.
- gochecknoglobals # TODO: consider enabling the 'gochecknoglobals' linter to check that no global variables exist.
- forcetypeassert # TODO: consider enabling the 'forcetypeassert' linter to find forced type assertions.
- exhaustruct # TODO: consider enabling the 'exhaustruct' linter to check if all structure fields are initialized.
- execinquery # TODO: consider enabling the 'execinquery' linter to check query strings.
- errorlint # TODO: consider enabling the 'errorlint' linter to find code that will cause problems with the error wrapping scheme introduced in Go 1.13.
- errname # TODO: consider enabling the 'errname' linter to check that sentinel errors are prefixed with the Err and error types are suffixed with the Error.
- wsl # TODO: consider enabling the 'wsl' linter to force the use of empty lines.
- nestif # TODO: consider enabling the 'nestif' linter to report deeply nested if statements.
- errchkjson # TODO: consider enabling the 'errchkjson' linter to checks types passed to the json encoding functions.
- contextcheck # TODO: consider enabling the 'contextcheck' linter to check whether the function uses a non-inherited context.
- asasalint # TODO: consider enabling the 'asasalint' linter to check for pass []any as any in variadic func(...any).
- containedctx # TODO: consider enabling the 'containedctx' linter to detect struct contained context.Context field.
- unparam # TODO: consider enabling the 'unparam' linter to report unused function parameters.
- nakedret # TODO: consider enabling the 'nakedret' linter to find naked returns in functions greater than a specified function length.
- makezero # TODO: consider enabling the 'makezero' linter to find slice declarations with non-zero initial length.
- lll # TODO: consider enabling the 'lll' linter to report long lines.
- gomoddirectives # TODO: consider enabling the 'gomoddirectives' linter to manage the use of 'replace', 'retract', and 'excludes' directives in go.mod.
- gofumpt # TODO: consider enabling the 'gofumpt' linter to check whether code was gofumpt-ed.
- godox # TODO: consider enabling the 'godox' linter to detect FIXME, TODO and other comment keywords.
- goconst # TODO: consider enabling the 'goconst' linter to find repeated strings that could be replaced by a constant.
- gocognit # TODO: consider enabling the 'gocognit' linter to compute and check the cognitive complexity of functions.
- gochecknoinits # TODO: consider enabling the 'gochecknoinits' linter to check that no init functions are present in Go code.
- gci # TODO: consider enabling the 'gci' linter to control golang package import order and make it always deterministic.
- funlen # TODO: consider enabling the 'funlen' linter to detect long functions.
- maintidx # TODO: consider enabling the 'maintidx' linter to measure the maintainability index of each function.
- gocyclo # TODO: consider enabling the 'gocyclo' linter to compute and check the cyclomatic complexity of functions.
- forbidigo # TODO: consider enabling the 'forbidigo' linter to forbid identifiers.
- dupl # TODO: consider enabling the 'dupl' linter to detect code cloning.
- musttag
- ginkgolinter
- depguard
- revive
- nolintlint
- golint # deprecated since v1.41.0 - replaced by 'revive'.
- ifshort # deprecated since v1.48.0
- structcheck # deprecated since v1.49.0 - replaced by 'unused'.
- exhaustivestruct # deprecated since v1.46.0 - replaced by 'exhaustruct'.
- deadcode # deprecated since v1.49.0 - replaced by 'unused'.
- interfacer # deprecated since v1.38.0
- nosnakecase # deprecated since v1.48.1 - replaced by 'revive'(var-naming).
- varcheck # deprecated since v1.49.0 - replaced by 'unused'.
- maligned # deprecated since v1.38.0 - replaced by 'govet' 'fieldalignment'.
- scopelint # deprecated since v1.39.0 - replaced by 'exportloopref'.
- rowserrcheck # disabled because of generics - https://github.com/golangci/golangci-lint/issues/2649.
- sqlclosecheck # disabled because of generics - https://github.com/golangci/golangci-lint/issues/2649.
- wastedassign # disabled because of generics - https://github.com/golangci/golangci-lint/issues/2649.
disable-all: true
enable:
- govet
linters-settings:
goheader:
@ -90,4 +25,9 @@ linters-settings:
In applying this license, CERN does not waive the privileges and immunities
granted to it by virtue of its status as an Intergovernmental Organization
or submit itself to any jurisdiction.
or submit itself to any jurisdiction.
issues:
exclude-rules:
- linters:
- govet
text: ".*lock.*"

View File

@ -18,7 +18,7 @@ toolchain: $(GOLANGCI_LINT) $(CALENS)
$(GOLANGCI_LINT):
@mkdir -p $(@D)
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | BINDIR=$(@D) sh -s v1.54.2
curl -sSfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | BINDIR=$(@D) sh -s v1.60.3
CALENS_DIR := $(shell mktemp -d)
$(CALENS):
@ -87,7 +87,7 @@ docker-eos-full-tests:
# Test
################################################################################
TEST = litmus-1 litmus-2 acceptance-1 acceptance-2
TEST = litmus-1 litmus-2
export REVAD_IMAGE ?= revad-eos
export EOS_FULL_IMAGE ?= eos-full
export PARTS ?= 1

View File

@ -0,0 +1,3 @@
Enhancement: Refactor CI jobs and bump to latest deps
https://github.com/cs3org/reva/pull/4797

View File

@ -57,14 +57,14 @@ func getClient() (gateway.GatewayAPIClient, error) {
func getConn() (*grpc.ClientConn, error) {
if insecure {
return grpc.Dial(conf.Host, grpc.WithTransportCredentials(ins.NewCredentials()))
return grpc.NewClient(conf.Host, grpc.WithTransportCredentials(ins.NewCredentials()))
}
// TODO(labkode): if in the future we want client-side certificate validation,
// we need to load the client cert here
tlsconf := &tls.Config{InsecureSkipVerify: skipverify}
creds := credentials.NewTLS(tlsconf)
return grpc.Dial(conf.Host, grpc.WithTransportCredentials(creds))
return grpc.NewClient(conf.Host, grpc.WithTransportCredentials(creds))
}
func formatError(status *rpc.Status) error {

View File

@ -72,7 +72,7 @@ func lsCommand() *command {
}
if len(w) == 0 {
if *longFlag {
fmt.Printf("%s %d %d %v %s\n", info.Type, info.Mtime, info.Size, info.Id, p)
fmt.Printf("%s %s %d %v %s\n", info.Type, info.Mtime, info.Size, info.Id, p)
} else {
fmt.Println(p)
}

30
go.mod
View File

@ -9,11 +9,12 @@ require (
github.com/bluele/gcache v0.0.2
github.com/c-bata/go-prompt v0.2.6
github.com/ceph/go-ceph v0.26.0
github.com/cern-eos/go-eosgrpc v0.0.0-20240812132646-f105d2304f38
github.com/cheggaaa/pb v1.0.29
github.com/coreos/go-oidc/v3 v3.9.0
github.com/creasty/defaults v1.7.0
github.com/cs3org/cato v0.0.0-20200828125504-e418fc54dd5e
github.com/cs3org/go-cs3apis v0.0.0-20230727093620-0f4399be4543
github.com/cs3org/go-cs3apis v0.0.0-20240802083356-d617314e1795
github.com/dgraph-io/ristretto v0.1.1
github.com/dolthub/go-mysql-server v0.14.0
github.com/gdexlab/go-render v1.0.1
@ -33,6 +34,7 @@ require (
github.com/grpc-ecosystem/go-grpc-middleware v1.4.0
github.com/grpc-ecosystem/go-grpc-middleware/providers/prometheus v1.0.0
github.com/jedib0t/go-pretty v4.3.0+incompatible
github.com/jt-nti/gproto v0.0.0-20210304092907-23e645af1351
github.com/juliangruber/go-intersect v1.1.0
github.com/mattn/go-sqlite3 v1.14.22
github.com/maxymania/go-system v0.0.0-20170110133659-647cc364bf0b
@ -53,14 +55,14 @@ require (
github.com/wk8/go-ordered-map v1.0.0
go.opencensus.io v0.24.0
go.step.sm/crypto v0.43.1
golang.org/x/crypto v0.21.0
golang.org/x/oauth2 v0.18.0
golang.org/x/sync v0.6.0
golang.org/x/sys v0.18.0
golang.org/x/term v0.18.0
golang.org/x/crypto v0.23.0
golang.org/x/oauth2 v0.20.0
golang.org/x/sync v0.7.0
golang.org/x/sys v0.20.0
golang.org/x/term v0.20.0
google.golang.org/genproto v0.0.0-20240314234333-6e1732d8331c
google.golang.org/grpc v1.62.1
google.golang.org/protobuf v1.33.0
google.golang.org/grpc v1.65.0
google.golang.org/protobuf v1.34.1
gotest.tools v2.2.0+incompatible
)
@ -72,9 +74,8 @@ require (
github.com/asaskevich/govalidator v0.0.0-20230301143203-a9d515a09cc2 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/bmizerany/pat v0.0.0-20210406213842-e4b6760bdd6f // indirect
github.com/cern-eos/go-eosgrpc v0.0.0-20240812132646-f105d2304f38 // indirect
github.com/cespare/xxhash v1.1.0 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/cespare/xxhash/v2 v2.3.0 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/dolthub/vitess v0.0.0-20221031111135-9aad77e7b39f // indirect
github.com/dustin/go-humanize v1.0.1 // indirect
@ -87,7 +88,7 @@ require (
github.com/go-openapi/errors v0.22.0 // indirect
github.com/go-openapi/strfmt v0.23.0 // indirect
github.com/gocraft/dbr/v2 v2.7.2 // indirect
github.com/golang/glog v1.2.0 // indirect
github.com/golang/glog v1.2.1 // indirect
github.com/google/flatbuffers v2.0.8+incompatible // indirect
github.com/grpc-ecosystem/go-grpc-middleware/v2 v2.1.0 // indirect
github.com/hashicorp/golang-lru v1.0.2 // indirect
@ -122,11 +123,10 @@ require (
go.opentelemetry.io/otel v1.23.1 // indirect
go.opentelemetry.io/otel/trace v1.23.1 // indirect
golang.org/x/mod v0.13.0 // indirect
golang.org/x/net v0.23.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/net v0.25.0 // indirect
golang.org/x/text v0.15.0 // indirect
golang.org/x/tools v0.14.0 // indirect
google.golang.org/appengine v1.6.8 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 // indirect
gopkg.in/src-d/go-errors.v1 v1.0.0 // indirect
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect

58
go.sum
View File

@ -854,8 +854,9 @@ github.com/cespare/xxhash v1.1.0 h1:a6HrQnmkObjyL+Gs60czilIUGqrzKutQD6XZog3p+ko=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cheggaaa/pb v1.0.29 h1:FckUN5ngEk2LpvuG0fw1GEFx6LtyY2pWI/Z2QgCnEYo=
github.com/cheggaaa/pb v1.0.29/go.mod h1:W40334L7FMC5JKWldsTWbdGjLo0RxUKK73K+TuPxX30=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
@ -892,8 +893,8 @@ github.com/creasty/defaults v1.7.0 h1:eNdqZvc5B509z18lD8yc212CAqJNvfT1Jq6L8WowdB
github.com/creasty/defaults v1.7.0/go.mod h1:iGzKe6pbEHnpMPtfDXZEr0NVxWnPTjb1bbDy08fPzYM=
github.com/cs3org/cato v0.0.0-20200828125504-e418fc54dd5e h1:tqSPWQeueWTKnJVMJffz4pz0o1WuQxJ28+5x5JgaHD8=
github.com/cs3org/cato v0.0.0-20200828125504-e418fc54dd5e/go.mod h1:XJEZ3/EQuI3BXTp/6DUzFr850vlxq11I6satRtz0YQ4=
github.com/cs3org/go-cs3apis v0.0.0-20230727093620-0f4399be4543 h1:IFo6dj0XEOIA6i2baRWMC3vd+fAmuIUAVfSf77ZhoQg=
github.com/cs3org/go-cs3apis v0.0.0-20230727093620-0f4399be4543/go.mod h1:UXha4TguuB52H14EMoSsCqDj7k8a/t7g4gVP+bgY5LY=
github.com/cs3org/go-cs3apis v0.0.0-20240802083356-d617314e1795 h1:8WkweBxMQ1W6IhcK0X3eWY+aQCjEktGwVt/4KLrtOZ8=
github.com/cs3org/go-cs3apis v0.0.0-20240802083356-d617314e1795/go.mod h1:yyP8PRo0EZou3nSH7H4qjlzQwaydPeIRNgX50npQHpE=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@ -1027,8 +1028,8 @@ github.com/golang/freetype v0.0.0-20170609003504-e2365dfdc4a0/go.mod h1:E/TSTwGw
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.0.0/go.mod h1:EWib/APOK0SL3dFbYqvxE3UYd8E6s1ouQ7iEp/0LWV4=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/glog v1.2.0 h1:uCdmnmatrKCgMBlM4rMuJZWOkPDqdbZPnrMXDY4gI68=
github.com/golang/glog v1.2.0/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/glog v1.2.1 h1:OptwRhECazUx5ix5TTWC3EZhsZEHWcYWY4FQHTIubm4=
github.com/golang/glog v1.2.1/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20160516000752-02826c3e7903/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
@ -1218,6 +1219,8 @@ github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/jt-nti/gproto v0.0.0-20210304092907-23e645af1351 h1:jYsiD6zdBzctjZ4sDB+gGJJPB3NROHrUuCp/wUj5p9Y=
github.com/jt-nti/gproto v0.0.0-20210304092907-23e645af1351/go.mod h1:yfoLDf8VFUCWSxFJsPuQT5BlqdDbGkDl5m6hzABroMI=
github.com/jtolds/gls v4.20.0+incompatible/go.mod h1:QJZ7F/aHp+rZTRtaJ1ow/lLfFfVYBRgL+9YlvaHOwJU=
github.com/juliangruber/go-intersect v1.1.0 h1:sc+y5dCjMMx0pAdYk/N6KBm00tD/f3tq+Iox7dYDUrY=
github.com/juliangruber/go-intersect v1.1.0/go.mod h1:WMau+1kAmnlQnKiikekNJbtGtfmILU/mMU6H7AgKbWQ=
@ -1362,6 +1365,7 @@ github.com/onsi/ginkgo/v2 v2.13.0/go.mod h1:TE309ZR8s5FsKKpuB1YAQYBzCaAfUgatB/xl
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY=
github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo=
github.com/onsi/gomega v1.10.5/go.mod h1:gza4q3jKQJijlu05nKWRCW/GavJumGt8aNRxWg7mt48=
github.com/onsi/gomega v1.30.0 h1:hvMK7xYz4D3HapigLTeGdId/NcfQx1VHMJc60ew99+8=
github.com/onsi/gomega v1.30.0/go.mod h1:9sxs+SwGrKI0+PWe4Fxa9tFQQBG5xSsSbMXOI8PPpoQ=
github.com/op/go-logging v0.0.0-20160315200505-970db520ece7/go.mod h1:HzydrMdWErDVzsI23lYNej1Htcns9BCg93Dk0bBINWk=
@ -1591,8 +1595,8 @@ golang.org/x/crypto v0.11.0/go.mod h1:xgJhtzW8F9jGdVFWZESrid1U1bjeNy4zgy5cRr/CIi
golang.org/x/crypto v0.12.0/go.mod h1:NF0Gs7EO5K4qLn+Ylc+fih8BSTeIjAP05siRnAh98yw=
golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliYc=
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.21.0 h1:X31++rzVUdKhX5sWmSOFZxx8UW/ldWx55cbf08iNAMA=
golang.org/x/crypto v0.21.0/go.mod h1:0BP7YvVV9gBbVKyeTG0Gyn+gZm94bibOW5BjDEYAOMs=
golang.org/x/crypto v0.23.0 h1:dIJU/v2J8Mdglj/8rJ6UUOM3Zc9zLZxVZwwxMooUSAI=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/exp v0.0.0-20180321215751-8460e604b9de/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20180807140117-3d87b88a115f/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
@ -1693,6 +1697,7 @@ golang.org/x/net v0.0.0-20200822124328-c89045814202/go.mod h1:/O7V0waA8r7cgGh81R
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201202161906-c7110b5ffcbb/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
@ -1727,8 +1732,8 @@ golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.12.0/go.mod h1:zEVYFnQC7m/vmpQFELhcD1EWkZlX69l4oqgmer6hfKA=
golang.org/x/net v0.14.0/go.mod h1:PpSgVXXLK0OxS0F31C1/tv6XNguvCrnXIDrFMspZIUI=
golang.org/x/net v0.23.0 h1:7EYJ93RZ9vYSZAIb2x3lnuvqO5zneoD6IvWjuhfxjTs=
golang.org/x/net v0.23.0/go.mod h1:JKghWKKOSdJwpW2GEx0Ja7fmaKnMsbu+MWVZTokSYmg=
golang.org/x/net v0.25.0 h1:d/OCCoBEUq33pjydKrGQhw7IlUPI2Oylr+8qLx49kac=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -1761,8 +1766,8 @@ golang.org/x/oauth2 v0.7.0/go.mod h1:hPLQkd9LyjfXTiRohC/41GhcFqxisoUQ99sCUOHO9x4
golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE=
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/oauth2 v0.18.0 h1:09qnuIAgzdx1XplqJvW6CQqMCtGZykZWcXzPMPUusvI=
golang.org/x/oauth2 v0.18.0/go.mod h1:Wf7knwG0MPoWIMMBgFlEaSUDaKskp0dCfrlJRJXbBi8=
golang.org/x/oauth2 v0.20.0 h1:4mQdhULixXKP1rwYBW0vAijoXnkTG0BLCDRzfe1idMo=
golang.org/x/oauth2 v0.20.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -1781,8 +1786,8 @@ golang.org/x/sync v0.0.0-20220929204114-8fcdb60fdcc0/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.2.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0 h1:5BMeUDZ7vkXGfEr1x9B4bRcTH4lpkTkpdh0T/J+qjbQ=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0 h1:YsImfSBoP9QPYL0xyKJPq0gcaJdG3rInoqxTWbfQu9M=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180823144017-11551d06cbcc/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -1795,7 +1800,6 @@ golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5h
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190415081028-16da32be82c5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190502145724-3ef323f4f1fd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20190507160741-ecd444e8653b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1898,8 +1902,8 @@ golang.org/x/sys v0.10.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.11.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.18.0 h1:DBdB3niSjOA/O0blCZBqDefyWNYveAYMNF1Wum0DYQ4=
golang.org/x/sys v0.18.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0 h1:Od9JTbYCk261bKm4M/mw7AklTlFYIa0bIp9BgSm1S8Y=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.1.0/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@ -1915,8 +1919,8 @@ golang.org/x/term v0.10.0/go.mod h1:lpqdcUyK/oCiQxvxVrppt5ggO2KCZ5QblwqPnfZ6d5o=
golang.org/x/term v0.11.0/go.mod h1:zC9APTIj3jG3FdV/Ons+XE1riIZXG4aZ4GTHiPZJPIU=
golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.18.0 h1:FcHjZXDMxI8mM3nwhX9HlKop4C0YQvCVCdwYl2wOtE8=
golang.org/x/term v0.18.0/go.mod h1:ILwASektA3OnRv7amZ1xhE/KTR+u50pbXfZ03+6Nx58=
golang.org/x/term v0.20.0 h1:VnkxpohqXaOBYJtBmEppKUG6mXpi+4O6purfc2+sMhw=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -1937,8 +1941,9 @@ golang.org/x/text v0.10.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.11.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.12.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0 h1:ScX5w1eTa3QqT8oi6+ziP7dTV1S2+ALU0bI+0zXKWiQ=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0 h1:h1V/4gjBv8v9cjcR6+AR5+/cIYK5N/WAgiv4xlsEtAk=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/time v0.0.0-20180412165947-fbb02b2291d2/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -2111,11 +2116,8 @@ google.golang.org/appengine v1.6.1/go.mod h1:i06prIuMbXzDqacNJfV5OdTW448YApPu5ww
google.golang.org/appengine v1.6.5/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.6/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc=
google.golang.org/appengine v1.6.8 h1:IhEN5q69dyKagZPYMSdIjS2HqprW324FRQZJcGqPAsM=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190307195333-5fe7a883aa19/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190404172233-64821d5d2107/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190418145605-e7d98fc518a7/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190425155659-357c62f0e4bb/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
google.golang.org/genproto v0.0.0-20190502173448-54afdca5d873/go.mod h1:VzzqZJRnGkLBvHegQrXjBqPurQTc5/KpmUdxsrq26oE=
@ -2278,8 +2280,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20230706204954-ccb25ca9f130/go.
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230731190214-cbb8c96f2d6d/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230807174057-1744710a1577/go.mod h1:+Bk1OCOj40wS2hwAMA+aCW9ypzm63QTBBHp6lQ3p+9M=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c h1:lfpJ/2rWPa/kJgxyyXM8PrNnfCzcmxJ265mADgwmvLI=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240314234333-6e1732d8331c/go.mod h1:WtryC6hu0hhx87FDGxWCDptyssuo68sk10vYjF+T9fY=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157 h1:Zy9XzmMEflZ/MAaA7vNcoebnRAld7FsPW1EeBB7V0m8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20240528184218-531527333157/go.mod h1:EfXuqaE1J41VCDicxHzUDm+8rk+7ZdXzHV0IhO/I6s0=
google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.0/go.mod h1:chYK+tFQF0nDUGJgXMSgLCQk3phJEuONr2DCgLDdAQM=
@ -2330,8 +2332,8 @@ google.golang.org/grpc v1.55.0/go.mod h1:iYEXKGkEBhg1PjZQvoYEVPTDkHo1/bjTnfwTeGO
google.golang.org/grpc v1.56.1/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc v1.56.2/go.mod h1:I9bI3vqKfayGqPUAwGdOSu7kt6oIJLixfffKrpXqQ9s=
google.golang.org/grpc v1.57.0/go.mod h1:Sd+9RMTACXwmub0zcNY2c4arhtrbBYD1AUHI/dt16Mo=
google.golang.org/grpc v1.62.1 h1:B4n+nfKzOICUXMgyrNd19h/I9oH0L1pizfk1d4zSgTk=
google.golang.org/grpc v1.62.1/go.mod h1:IWTG0VlJLCh1SkC58F7np9ka9mx/WNkjl4PGJaiq+QE=
google.golang.org/grpc v1.65.0 h1:bs/cUb4lp1G5iImFFd3u5ixQzweKizoZJAwBNLR42lc=
google.golang.org/grpc v1.65.0/go.mod h1:WgYC2ypjlB0EiQi6wdKixMqukr6lBc0Vo+oOgjrM5ZQ=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -2351,8 +2353,8 @@ google.golang.org/protobuf v1.28.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqw
google.golang.org/protobuf v1.29.1/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.30.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.31.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
google.golang.org/protobuf v1.33.0 h1:uNO2rsAINq/JlFpSdYEKIZ0uKD/R9cpdv0T+yoGwGmI=
google.golang.org/protobuf v1.33.0/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
google.golang.org/protobuf v1.34.1 h1:9ddQBjfCyZPOHPUiPxpYESBLc+T8P3E+Vo4IbKZgFWg=
google.golang.org/protobuf v1.34.1/go.mod h1:c6P6GXX6sHbq/GpV6MGZEdwhWPcYBgnhAHhKbcUYpos=
gopkg.in/Acconut/lockfile.v1 v1.1.0/go.mod h1:6UCz3wJ8tSFUsPR6uP/j8uegEtDuEEqFxlpi0JI4Umw=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=

View File

@ -308,12 +308,12 @@ func getGRPCConfig(opaque *typespb.Opaque) (bool, bool) {
func getConn(host string, ins, skipverify bool) (*grpc.ClientConn, error) {
if ins {
return grpc.Dial(host, grpc.WithTransportCredentials(insecure.NewCredentials()))
return grpc.NewClient(host, grpc.WithTransportCredentials(insecure.NewCredentials()))
}
// TODO(labkode): if in the future we want client-side certificate validation,
// we need to load the client cert here
tlsconf := &tls.Config{InsecureSkipVerify: skipverify}
creds := credentials.NewTLS(tlsconf)
return grpc.Dial(host, grpc.WithTransportCredentials(creds))
return grpc.NewClient(host, grpc.WithTransportCredentials(creds))
}

View File

@ -21,7 +21,9 @@ package gateway
import (
"context"
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
rpc "github.com/cs3org/go-cs3apis/cs3/rpc/v1beta1"
collaboration "github.com/cs3org/go-cs3apis/cs3/sharing/collaboration/v1beta1"
link "github.com/cs3org/go-cs3apis/cs3/sharing/link/v1beta1"
"github.com/cs3org/reva/pkg/appctx"
"github.com/cs3org/reva/pkg/errtypes"
@ -99,6 +101,16 @@ func (s *svc) GetPublicShare(ctx context.Context, req *link.GetPublicShareReques
return pClient.GetPublicShare(ctx, req)
}
func (s *svc) ListExistingPublicShares(ctx context.Context, req *link.ListPublicSharesRequest) (*gateway.ListExistingPublicSharesResponse, error) {
return nil, nil
}
func (s *svc) ListExistingReceivedShares(ctx context.Context, req *collaboration.ListReceivedSharesRequest) (*gateway.ListExistingReceivedSharesResponse, error) {
return nil, nil
}
func (s *svc) ListExistingShares(ctx context.Context, req *collaboration.ListSharesRequest) (*gateway.ListExistingSharesResponse, error) {
return nil, nil
}
func (s *svc) ListPublicShares(ctx context.Context, req *link.ListPublicSharesRequest) (*link.ListPublicSharesResponse, error) {
log := appctx.GetLogger(ctx)
log.Info().Msg("listing public shares")

View File

@ -0,0 +1,41 @@
// Copyright 2018-2024 CERN
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
// In applying this license, CERN does not waive the privileges and immunities
// granted to it by virtue of its status as an Intergovernmental Organization
// or submit itself to any jurisdiction.
package gateway
import (
"context"
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
)
func (s *svc) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) {
return nil, nil
}
func (s *svc) ListStorageSpaces(ctx context.Context, req *provider.ListStorageSpacesRequest) (*provider.ListStorageSpacesResponse, error) {
return nil, nil
}
func (s *svc) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) {
return nil, nil
}
func (s *svc) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) (*provider.DeleteStorageSpaceResponse, error) {
return nil, nil
}

View File

@ -24,7 +24,6 @@ import (
"net/url"
"path"
"strings"
"sync"
"time"
gateway "github.com/cs3org/go-cs3apis/cs3/gateway/v1beta1"
@ -100,203 +99,6 @@ func (s *svc) CreateHome(ctx context.Context, req *provider.CreateHomeRequest) (
return res, nil
}
func (s *svc) CreateStorageSpace(ctx context.Context, req *provider.CreateStorageSpaceRequest) (*provider.CreateStorageSpaceResponse, error) {
log := appctx.GetLogger(ctx)
// TODO: needs to be fixed
c, err := s.findByPath(ctx, "/users")
if err != nil {
return &provider.CreateStorageSpaceResponse{
Status: status.NewStatusFromErrType(ctx, "error finding path", err),
}, nil
}
res, err := c.CreateStorageSpace(ctx, req)
if err != nil {
log.Err(err).Msg("gateway: error creating storage space on storage provider")
return &provider.CreateStorageSpaceResponse{
Status: status.NewInternal(ctx, err, "error calling CreateStorageSpace"),
}, nil
}
return res, nil
}
func (s *svc) ListStorageSpaces(ctx context.Context, req *provider.ListStorageSpacesRequest) (*provider.ListStorageSpacesResponse, error) {
log := appctx.GetLogger(ctx)
var id *provider.StorageSpaceId
for _, f := range req.Filters {
if f.Type == provider.ListStorageSpacesRequest_Filter_TYPE_ID {
id = f.GetId()
}
}
var (
providers []*registry.ProviderInfo
err error
)
c, err := pool.GetStorageRegistryClient(pool.Endpoint(s.c.StorageRegistryEndpoint))
if err != nil {
return nil, errors.Wrap(err, "gateway: error getting storage registry client")
}
if id != nil {
// query that specific storage provider
storageid, opaqeid, err := utils.SplitStorageSpaceID(id.OpaqueId)
if err != nil {
return &provider.ListStorageSpacesResponse{
Status: status.NewInvalidArg(ctx, "space id must be separated by !"),
}, nil
}
res, err := c.GetStorageProviders(ctx, &registry.GetStorageProvidersRequest{
Ref: &provider.Reference{ResourceId: &provider.ResourceId{
StorageId: storageid,
OpaqueId: opaqeid,
}},
})
if err != nil {
return &provider.ListStorageSpacesResponse{
Status: status.NewStatusFromErrType(ctx, "ListStorageSpaces filters: req "+req.String(), err),
}, nil
}
if res.Status.Code != rpc.Code_CODE_OK {
return &provider.ListStorageSpacesResponse{
Status: res.Status,
}, nil
}
providers = res.Providers
} else {
// get list of all storage providers
res, err := c.ListStorageProviders(ctx, &registry.ListStorageProvidersRequest{})
if err != nil {
return &provider.ListStorageSpacesResponse{
Status: status.NewStatusFromErrType(ctx, "error listing providers", err),
}, nil
}
if res.Status.Code != rpc.Code_CODE_OK {
return &provider.ListStorageSpacesResponse{
Status: res.Status,
}, nil
}
providers = make([]*registry.ProviderInfo, 0, len(res.Providers))
// FIXME filter only providers that have an id set ... currently none have?
// bug? only ProviderPath is set
for i := range res.Providers {
// use only providers whose path does not start with a /?
if strings.HasPrefix(res.Providers[i].ProviderPath, "/") {
continue
}
providers = append(providers, res.Providers[i])
}
}
spacesFromProviders := make([][]*provider.StorageSpace, len(providers))
errors := make([]error, len(providers))
var wg sync.WaitGroup
for i, p := range providers {
wg.Add(1)
go s.listStorageSpacesOnProvider(ctx, req, &spacesFromProviders[i], p, &errors[i], &wg)
}
wg.Wait()
uniqueSpaces := map[string]*provider.StorageSpace{}
for i := range providers {
if errors[i] != nil {
if len(providers) > 1 {
log.Debug().Err(errors[i]).Msg("skipping provider")
continue
}
return &provider.ListStorageSpacesResponse{
Status: status.NewStatusFromErrType(ctx, "error listing space", errors[i]),
}, nil
}
for j := range spacesFromProviders[i] {
uniqueSpaces[spacesFromProviders[i][j].Id.OpaqueId] = spacesFromProviders[i][j]
}
}
spaces := make([]*provider.StorageSpace, 0, len(uniqueSpaces))
for spaceID := range uniqueSpaces {
spaces = append(spaces, uniqueSpaces[spaceID])
}
if len(spaces) == 0 {
return &provider.ListStorageSpacesResponse{
Status: status.NewNotFound(ctx, "space not found"),
}, nil
}
return &provider.ListStorageSpacesResponse{
Status: status.NewOK(ctx),
StorageSpaces: spaces,
}, nil
}
func (s *svc) listStorageSpacesOnProvider(ctx context.Context, req *provider.ListStorageSpacesRequest, res *[]*provider.StorageSpace, p *registry.ProviderInfo, e *error, wg *sync.WaitGroup) {
defer wg.Done()
c, err := s.getStorageProviderClient(ctx, p)
if err != nil {
*e = errors.Wrap(err, "error connecting to storage provider="+p.Address)
return
}
r, err := c.ListStorageSpaces(ctx, req)
if err != nil {
*e = errors.Wrap(err, "gateway: error calling ListStorageSpaces")
return
}
*res = r.StorageSpaces
}
func (s *svc) UpdateStorageSpace(ctx context.Context, req *provider.UpdateStorageSpaceRequest) (*provider.UpdateStorageSpaceResponse, error) {
log := appctx.GetLogger(ctx)
// TODO: needs to be fixed
c, err := s.find(ctx, &provider.Reference{ResourceId: req.StorageSpace.Root})
if err != nil {
return &provider.UpdateStorageSpaceResponse{
Status: status.NewStatusFromErrType(ctx, "error finding ID", err),
}, nil
}
res, err := c.UpdateStorageSpace(ctx, req)
if err != nil {
log.Err(err).Msg("gateway: error creating update space on storage provider")
return &provider.UpdateStorageSpaceResponse{
Status: status.NewInternal(ctx, err, "error calling UpdateStorageSpace"),
}, nil
}
return res, nil
}
func (s *svc) DeleteStorageSpace(ctx context.Context, req *provider.DeleteStorageSpaceRequest) (*provider.DeleteStorageSpaceResponse, error) {
log := appctx.GetLogger(ctx)
// TODO: needs to be fixed
storageid, opaqeid, err := utils.SplitStorageSpaceID(req.Id.OpaqueId)
if err != nil {
return &provider.DeleteStorageSpaceResponse{
Status: status.NewInvalidArg(ctx, "space id must be separated by !"),
}, nil
}
c, err := s.find(ctx, &provider.Reference{ResourceId: &provider.ResourceId{
StorageId: storageid,
OpaqueId: opaqeid,
}})
if err != nil {
return &provider.DeleteStorageSpaceResponse{
Status: status.NewStatusFromErrType(ctx, "error finding path", err),
}, nil
}
res, err := c.DeleteStorageSpace(ctx, req)
if err != nil {
log.Err(err).Msg("gateway: error deleting storage space on storage provider")
return &provider.DeleteStorageSpaceResponse{
Status: status.NewInternal(ctx, err, "error calling DeleteStorageSpace"),
}, nil
}
return res, nil
}
func (s *svc) GetHome(ctx context.Context, _ *provider.GetHomeRequest) (*provider.GetHomeResponse, error) {
return &provider.GetHomeResponse{
Path: s.getHome(ctx),

View File

@ -97,7 +97,7 @@ func (s *svc) Handler() http.Handler {
}
func (s *svc) getClient() (proto.PingPongServiceClient, error) {
conn, err := grpc.Dial(
conn, err := grpc.NewClient(
s.conf.Endpoint,
grpc.WithTransportCredentials(insecure.NewCredentials()),
)

View File

@ -132,7 +132,7 @@ func (am *Manager) do(ctx context.Context, a Action) (int, []byte, error) {
log.Info().Msgf("am.do response %d %s", resp.StatusCode, body)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return 0, nil, fmt.Errorf("Unexpected response code from EFSS API: " + strconv.Itoa(resp.StatusCode))
return 0, nil, fmt.Errorf("Unexpected response code from EFSS API: %s", strconv.Itoa(resp.StatusCode))
}
return resp.StatusCode, body, nil
}

View File

@ -631,7 +631,7 @@ func (driver *rclone) CancelTransfer(ctx context.Context, transferID string) (*d
_, endStatusFound := txEndStatuses[job.TransferStatus.String()]
if endStatusFound {
err := errors.Wrapf(errors.New("rclone driver: job already in end state"), transferRemovedMessage)
err := errors.Wrap(errors.New("rclone driver: job already in end state"), transferRemovedMessage)
return &datatx.TxInfo{
Id: &datatx.TxId{OpaqueId: transferID},
Status: datatx.Status_STATUS_INVALID,
@ -649,7 +649,7 @@ func (driver *rclone) CancelTransfer(ctx context.Context, transferID string) (*d
data, err := json.Marshal(rcloneCancelTransferReq)
if err != nil {
err := errors.Wrapf(errors.New("rclone driver: error marshalling rclone job/stop req data"), transferRemovedMessage)
err := errors.Wrap(errors.New("rclone driver: error marshalling rclone job/stop req data"), transferRemovedMessage)
return &datatx.TxInfo{
Id: &datatx.TxId{OpaqueId: transferID},
Status: datatx.Status_STATUS_INVALID,
@ -661,7 +661,7 @@ func (driver *rclone) CancelTransfer(ctx context.Context, transferID string) (*d
u, err := url.Parse(driver.config.Endpoint)
if err != nil {
err := errors.Wrapf(errors.New("rclone driver: error parsing driver endpoint"), transferRemovedMessage)
err := errors.Wrap(errors.New("rclone driver: error parsing driver endpoint"), transferRemovedMessage)
return &datatx.TxInfo{
Id: &datatx.TxId{OpaqueId: transferID},
Status: datatx.Status_STATUS_INVALID,
@ -673,7 +673,7 @@ func (driver *rclone) CancelTransfer(ctx context.Context, transferID string) (*d
req, err := http.NewRequest(http.MethodPost, requestURL, bytes.NewReader(data))
if err != nil {
err := errors.Wrapf(errors.New("rclone driver: error framing post request"), transferRemovedMessage)
err := errors.Wrap(errors.New("rclone driver: error framing post request"), transferRemovedMessage)
return &datatx.TxInfo{
Id: &datatx.TxId{OpaqueId: transferID},
Status: datatx.Status_STATUS_INVALID,
@ -686,7 +686,7 @@ func (driver *rclone) CancelTransfer(ctx context.Context, transferID string) (*d
res, err := driver.client.Do(req)
if err != nil {
err := errors.Wrapf(errors.New("rclone driver: error sending post request"), transferRemovedMessage)
err := errors.Wrap(errors.New("rclone driver: error sending post request"), transferRemovedMessage)
return &datatx.TxInfo{
Id: &datatx.TxId{OpaqueId: transferID},
Status: datatx.Status_STATUS_INVALID,
@ -699,7 +699,7 @@ func (driver *rclone) CancelTransfer(ctx context.Context, transferID string) (*d
if res.StatusCode != http.StatusOK {
var errorResData rcloneHTTPErrorRes
if err = json.NewDecoder(res.Body).Decode(&errorResData); err != nil {
err := errors.Wrapf(errors.New("rclone driver: error decoding response data"), transferRemovedMessage)
err := errors.Wrap(errors.New("rclone driver: error decoding response data"), transferRemovedMessage)
return &datatx.TxInfo{
Id: &datatx.TxId{OpaqueId: transferID},
Status: datatx.Status_STATUS_INVALID,
@ -728,7 +728,7 @@ func (driver *rclone) CancelTransfer(ctx context.Context, transferID string) (*d
}
var resData rcloneCancelTransferResJSON
if err = json.NewDecoder(res.Body).Decode(&resData); err != nil {
err := errors.Wrapf(errors.New("rclone driver: error decoding response data"), transferRemovedMessage)
err := errors.Wrap(errors.New("rclone driver: error decoding response data"), transferRemovedMessage)
return &datatx.TxInfo{
Id: &datatx.TxId{OpaqueId: transferID},
Status: datatx.Status_STATUS_INVALID,

View File

@ -138,7 +138,7 @@ type Client struct {
func newgrpc(ctx context.Context, log *zerolog.Logger, opt *Options) (erpc.EosClient, error) {
log.Debug().Msgf("Setting up GRPC towards '%s'", opt.GrpcURI)
conn, err := grpc.Dial(opt.GrpcURI, grpc.WithTransportCredentials(insecure.NewCredentials()))
conn, err := grpc.NewClient(opt.GrpcURI, grpc.WithTransportCredentials(insecure.NewCredentials()))
if err != nil {
log.Warn().Err(err).Msgf("Error connecting to '%s'", opt.GrpcURI)
}

View File

@ -159,7 +159,7 @@ func (c *Client) doPostToken(token string, initiator string, description string,
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return false, fmt.Errorf("Unexpected response code from EFSS API: " + strconv.Itoa(resp.StatusCode))
return false, fmt.Errorf("Unexpected response code from EFSS API: %s", strconv.Itoa(resp.StatusCode))
}
return true, nil
}
@ -185,7 +185,7 @@ func (c *Client) doGetToken(token string) (*apiToken, error) {
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unexpected response code from API: " + strconv.Itoa(resp.StatusCode))
return nil, fmt.Errorf("Unexpected response code from API: %s", strconv.Itoa(resp.StatusCode))
}
result := &apiToken{}
@ -218,7 +218,7 @@ func (c *Client) doGetAllTokens(initiator string) ([]*apiToken, error) {
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unexpected response code from API: " + strconv.Itoa(resp.StatusCode))
return nil, fmt.Errorf("Unexpected response code from API: %s", strconv.Itoa(resp.StatusCode))
}
result := []*apiToken{}
@ -257,7 +257,7 @@ func (c *Client) doPostRemoteUser(initiator string, opaqueUserID string, idp str
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return false, fmt.Errorf("Unexpected response code from EFSS API: " + strconv.Itoa(resp.StatusCode))
return false, fmt.Errorf("Unexpected response code from EFSS API: %s", strconv.Itoa(resp.StatusCode))
}
return true, nil
}
@ -282,7 +282,7 @@ func (c *Client) doGetRemoteUser(initiator string, opaqueUserID string, idp stri
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unexpected response code from API: " + strconv.Itoa(resp.StatusCode))
return nil, fmt.Errorf("Unexpected response code from API: %s", strconv.Itoa(resp.StatusCode))
}
result := &apiOCMUser{}
@ -315,7 +315,7 @@ func (c *Client) doGetAllRemoteUsers(initiator string, search string) ([]*apiOCM
}
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("Unexpected response code from API: " + strconv.Itoa(resp.StatusCode))
return nil, fmt.Errorf("Unexpected response code from API: %s", strconv.Itoa(resp.StatusCode))
}
result := []*apiOCMUser{}

View File

@ -483,7 +483,7 @@ func (sm *Manager) do(ctx context.Context, a Action, username string) (int, []by
log.Info().Int("status", resp.StatusCode).Msgf("sent request to EFSS API, response: %s", body)
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return 0, nil, fmt.Errorf("Unexpected response from EFSS API: " + strconv.Itoa(resp.StatusCode))
return 0, nil, fmt.Errorf("Unexpected response from EFSS API: %s", strconv.Itoa(resp.StatusCode))
}
return resp.StatusCode, body, nil
}

View File

@ -234,16 +234,10 @@ var _ = Describe("Nextcloud", func() {
// Ctime: &types.Timestamp{
// Seconds: 1234567890,
// Nanos: 0,
// XXX_NoUnkeyedLiteral: struct{}{},
// XXX_unrecognized: nil,
// XXX_sizecache: 0,
// },
// Mtime: &types.Timestamp{
// Seconds: 1234567890,
// Nanos: 0,
// XXX_NoUnkeyedLiteral: struct{}{},
// XXX_unrecognized: nil,
// XXX_sizecache: 0,
// },
// }))
// checkCalled(called, `POST /apps/sciencemesh/~tester/api/ocm/addReceivedShare {"md":{"opaque_id":"fileid-/some/path"},"g":{"grantee":{"Id":{"UserId":{"idp":"0.0.0.0:19000","opaque_id":"f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c","type":1}}},"permissions":{"permissions":{"get_path":true}}},"provider_domain":"cern.ch","resource_type":"file","provider_id":2,"owner_opaque_id":"einstein","owner_display_name":"Albert Einstein","protocol":{"name":"webdav","options":{"sharedSecret":"secret","permissions":"webdav-property"}}}`)
@ -294,18 +288,12 @@ var _ = Describe("Nextcloud", func() {
ocmshare.NewTransferAccessMethod(),
},
Ctime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
ShareType: ocm.ShareType_SHARE_TYPE_USER,
Token: "some-token",
@ -370,16 +358,10 @@ var _ = Describe("Nextcloud", func() {
// Ctime: &types.Timestamp{
// Seconds: 1234567890,
// Nanos: 0,
// XXX_NoUnkeyedLiteral: struct{}{},
// XXX_unrecognized: nil,
// XXX_sizecache: 0,
// },
// Mtime: &types.Timestamp{
// Seconds: 1234567890,
// Nanos: 0,
// XXX_NoUnkeyedLiteral: struct{}{},
// XXX_unrecognized: nil,
// XXX_sizecache: 0,
// },
// }))
// checkCalled(called, `POST /apps/sciencemesh/~tester/api/ocm/UpdateShare {"ref":{"Spec":{"Id":{"opaque_id":"some-share-id"}}},"p":{"permissions":{"add_grant":true,"create_container":true,"delete":true,"get_path":true,"get_quota":true,"initiate_file_download":true,"initiate_file_upload":true,"list_grants":true,"list_container":true,"list_file_versions":true,"list_recycle":true,"move":true,"remove_grant":true,"purge_recycle":true,"restore_file_version":true,"restore_recycle_item":true,"stat":true,"update_grant":true,"deny_grant":true}}}`)
@ -431,18 +413,12 @@ var _ = Describe("Nextcloud", func() {
OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c",
},
Ctime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
ShareType: ocm.ShareType_SHARE_TYPE_USER,
AccessMethods: []*ocm.AccessMethod{
@ -487,18 +463,12 @@ var _ = Describe("Nextcloud", func() {
OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c",
},
Ctime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
ShareType: ocm.ShareType_SHARE_TYPE_USER,
ResourceType: provider.ResourceType_RESOURCE_TYPE_FILE,
@ -551,18 +521,12 @@ var _ = Describe("Nextcloud", func() {
OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c",
},
Ctime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
ShareType: ocm.ShareType_SHARE_TYPE_USER,
ResourceType: provider.ResourceType_RESOURCE_TYPE_FILE,
@ -610,18 +574,12 @@ var _ = Describe("Nextcloud", func() {
Type: userpb.UserType_USER_TYPE_PRIMARY,
},
Ctime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
ShareType: ocm.ShareType_SHARE_TYPE_USER,
State: ocm.ShareState_SHARE_STATE_ACCEPTED,
@ -652,18 +610,12 @@ var _ = Describe("Nextcloud", func() {
OpaqueId: "f7fbf8c8-139b-4376-b307-cf0a8c2d0d9c",
},
Ctime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
ShareType: ocm.ShareType_SHARE_TYPE_USER,
ResourceType: provider.ResourceType_RESOURCE_TYPE_FILE,

View File

@ -34,6 +34,7 @@ import (
typesv1beta1 "github.com/cs3org/go-cs3apis/cs3/types/v1beta1"
"github.com/cs3org/reva/internal/http/services/owncloud/ocs/conversions"
"google.golang.org/genproto/protobuf/field_mask"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/fieldmaskpb"
"github.com/cs3org/reva/pkg/ocm/share"
@ -632,7 +633,7 @@ func TestGetShare(t *testing.T) {
}
if tt.err == nil {
if !reflect.DeepEqual(got, tt.expected) {
if !proto.Equal(got, tt.expected) {
t.Fatalf("shares do not match. got=%+v expected=%+v", render.AsCode(got), render.AsCode(tt.expected))
}
}

View File

@ -83,7 +83,7 @@ var (
// NewConn creates a new connection to a grpc server
// TODO(labkode): make grpc tls configurable.
func NewConn(options Options) (*grpc.ClientConn, error) {
conn, err := grpc.Dial(
conn, err := grpc.NewClient(
options.Endpoint,
grpc.WithTransportCredentials(insecure.NewCredentials()),
grpc.WithDefaultCallOptions(

View File

@ -217,27 +217,18 @@ var _ = Describe("Nextcloud", func() {
Opaque: nil,
Type: provider.ResourceType_RESOURCE_TYPE_FILE,
Id: &provider.ResourceId{
StorageId: "",
OpaqueId: "fileid-/some/path",
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
StorageId: "",
OpaqueId: "fileid-/some/path",
},
Checksum: &provider.ResourceChecksum{
Type: 0,
Sum: "",
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Type: 0,
Sum: "",
},
Etag: "deadbeef",
MimeType: "text/plain",
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Path: "/some/path",
PermissionSet: conversions.RoleFromOCSPermissions(conversions.Permissions(0)).CS3ResourcePermissions(),
@ -247,12 +238,9 @@ var _ = Describe("Nextcloud", func() {
OpaqueId: "",
Type: 1,
},
Target: "",
CanonicalMetadata: nil,
ArbitraryMetadata: nil,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Target: "",
CanonicalMetadata: nil,
ArbitraryMetadata: nil,
}))
checkCalled(called, `POST /apps/sciencemesh/~tester/api/storage/GetMD {"ref":{"resource_id":{"storage_id":"storage-id","opaque_id":"opaque-id"},"path":"/some/path"},"mdKeys":["val1","val2","val3"]}`)
})
@ -279,27 +267,18 @@ var _ = Describe("Nextcloud", func() {
Opaque: nil,
Type: provider.ResourceType_RESOURCE_TYPE_FILE,
Id: &provider.ResourceId{
StorageId: "",
OpaqueId: "fileid-/some/path",
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
StorageId: "",
OpaqueId: "fileid-/some/path",
},
Checksum: &provider.ResourceChecksum{
Type: 0,
Sum: "",
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Type: 0,
Sum: "",
},
Etag: "deadbeef",
MimeType: "text/plain",
Mtime: &types.Timestamp{
Seconds: 1234567890,
Nanos: 0,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Seconds: 1234567890,
Nanos: 0,
},
Path: "/some/path",
PermissionSet: conversions.RoleFromOCSPermissions(conversions.Permissions(0)).CS3ResourcePermissions(),
@ -309,12 +288,9 @@ var _ = Describe("Nextcloud", func() {
OpaqueId: "",
Type: 1,
},
Target: "",
CanonicalMetadata: nil,
ArbitraryMetadata: nil,
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Target: "",
CanonicalMetadata: nil,
ArbitraryMetadata: nil,
}))
Expect(err).ToNot(HaveOccurred())
checkCalled(called, `POST /apps/sciencemesh/~tester/api/storage/ListFolder {"ref":{"resource_id":{"storage_id":"storage-id","opaque_id":"opaque-id"},"path":"/some"},"mdKeys":["val1","val2","val3"]}`)
@ -419,13 +395,10 @@ var _ = Describe("Nextcloud", func() {
},
},
},
Key: "version-12",
Size: uint64(12345),
Mtime: uint64(1234567890),
Etag: "deadb00f",
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Key: "version-12",
Size: uint64(12345),
Mtime: uint64(1234567890),
Etag: "deadb00f",
}))
Expect(*results[1]).To(Equal(provider.FileVersion{
Opaque: &types.Opaque{
@ -435,13 +408,10 @@ var _ = Describe("Nextcloud", func() {
},
},
},
Key: "asdf",
Size: uint64(12345),
Mtime: uint64(1234567890),
Etag: "deadbeef",
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Key: "asdf",
Size: uint64(12345),
Mtime: uint64(1234567890),
Etag: "deadbeef",
}))
checkCalled(called, `POST /apps/sciencemesh/~tester/api/storage/ListRevisions {"resource_id":{"storage_id":"storage-id","opaque_id":"opaque-id"},"path":"/some/path"}`)
})
@ -505,17 +475,11 @@ var _ = Describe("Nextcloud", func() {
Opaque: &types.Opaque{},
Key: "some-deleted-version",
Ref: &provider.Reference{
ResourceId: &provider.ResourceId{},
Path: "/some/file.txt",
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
ResourceId: &provider.ResourceId{},
Path: "/some/file.txt",
},
Size: uint64(12345),
DeletionTime: &types.Timestamp{Seconds: uint64(1234567890)},
XXX_NoUnkeyedLiteral: struct{}{},
XXX_unrecognized: nil,
XXX_sizecache: 0,
Size: uint64(12345),
DeletionTime: &types.Timestamp{Seconds: uint64(1234567890)},
}))
checkCalled(called, `POST /apps/sciencemesh/~tester/api/storage/ListRecycle {"key":"asdf","path":"/some/file.txt"}`)
})

View File

@ -410,7 +410,7 @@ var _ = Describe("Dynamic storage provider", func() {
Path: "/non/existent",
})
Expect(err).To(HaveOccurred())
Expect(err).To(MatchError(errtypes.NotFound("storage provider not found for ref path:\"/non/existent\" ")))
Expect(err.Error()).To(Equal("error: not found: storage provider not found for ref path:\"/non/existent\""))
})
})
})

View File

@ -24,7 +24,7 @@ import (
provider "github.com/cs3org/go-cs3apis/cs3/storage/provider/v1beta1"
"github.com/cs3org/reva/pkg/storage/utils/acl"
"github.com/google/go-cmp/cmp"
"google.golang.org/protobuf/proto"
)
// GetACLPerm generates a string representation of CS3APIs' ResourcePermissions,
@ -32,7 +32,7 @@ import (
// TODO(labkode): fine grained permission controls.
func GetACLPerm(set *provider.ResourcePermissions) (string, error) {
// resource permission is denied
if cmp.Equal(provider.ResourcePermissions{}, *set) {
if proto.Equal(&provider.ResourcePermissions{}, set) {
return "!r!w!x!m!u!d", nil
}
@ -129,10 +129,10 @@ func GetGranteeType(aclType string) provider.GranteeType {
// PermissionsEqual returns true if the permissions are equal.
func PermissionsEqual(p1, p2 *provider.ResourcePermissions) bool {
return p1 != nil && p2 != nil && cmp.Equal(*p1, *p2)
return p1 != nil && p2 != nil && proto.Equal(p1, p2)
}
// GranteeEqual returns true if the grantee are equal.
func GranteeEqual(g1, g2 *provider.Grantee) bool {
return g1 != nil && g2 != nil && cmp.Equal(*g1, *g2)
return g1 != nil && g2 != nil && proto.Equal(g1, g2)
}

View File

@ -28,6 +28,7 @@ import (
"github.com/cs3org/reva/pkg/errtypes"
"github.com/cs3org/reva/pkg/user"
"github.com/cs3org/reva/pkg/user/manager/registry"
"google.golang.org/protobuf/proto"
)
func init() {
@ -57,11 +58,11 @@ func (m *manager) Configure(ml map[string]interface{}) error {
func (m *manager) GetUser(ctx context.Context, uid *userpb.UserId, skipFetchingGroups bool) (*userpb.User, error) {
if user, ok := m.catalog[uid.OpaqueId]; ok {
if uid.Idp == "" || user.Id.Idp == uid.Idp {
u := *user
u := proto.Clone(user).(*userpb.User)
if skipFetchingGroups {
u.Groups = nil
}
return &u, nil
return u, nil
}
}
return nil, errtypes.NotFound(uid.OpaqueId)
@ -70,11 +71,11 @@ func (m *manager) GetUser(ctx context.Context, uid *userpb.UserId, skipFetchingG
func (m *manager) GetUserByClaim(ctx context.Context, claim, value string, skipFetchingGroups bool) (*userpb.User, error) {
for _, u := range m.catalog {
if userClaim, err := extractClaim(u, claim); err == nil && value == userClaim {
user := *u
u2 := proto.Clone(u).(*userpb.User)
if skipFetchingGroups {
user.Groups = nil
u2.Groups = nil
}
return &user, nil
return u2, nil
}
}
return nil, errtypes.NotFound(value)
@ -103,11 +104,11 @@ func (m *manager) FindUsers(ctx context.Context, query string, skipFetchingGroup
users := []*userpb.User{}
for _, u := range m.catalog {
if userContains(u, query) {
user := *u
user := proto.Clone(u).(*userpb.User)
if skipFetchingGroups {
user.Groups = nil
}
users = append(users, &user)
users = append(users, user)
}
}
return users, nil

View File

@ -25,6 +25,7 @@ import (
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
"github.com/cs3org/reva/pkg/errtypes"
"google.golang.org/protobuf/proto"
)
var ctx = context.Background()
@ -58,7 +59,7 @@ func TestUserManager(t *testing.T) {
// positive test GetUserByClaim by uid
resUserByUID, _ := manager.GetUserByClaim(ctx, "uid", "123", false)
if !reflect.DeepEqual(resUserByUID, userEinstein) {
if !proto.Equal(resUserByUID, userEinstein) {
t.Fatalf("user differs: expected=%v got=%v", userEinstein, resUserByUID)
}
@ -71,13 +72,13 @@ func TestUserManager(t *testing.T) {
// positive test GetUserByClaim by mail
resUserByEmail, _ := manager.GetUserByClaim(ctx, "mail", "einstein@example.org", false)
if !reflect.DeepEqual(resUserByEmail, userEinstein) {
if !proto.Equal(resUserByEmail, userEinstein) {
t.Fatalf("user differs: expected=%v got=%v", userEinstein, resUserByEmail)
}
// positive test GetUserByClaim by uid without groups
resUserByUIDWithoutGroups, _ := manager.GetUserByClaim(ctx, "uid", "123", true)
if !reflect.DeepEqual(resUserByUIDWithoutGroups, userEinsteinWithoutGroups) {
if !proto.Equal(resUserByUIDWithoutGroups, userEinsteinWithoutGroups) {
t.Fatalf("user differs: expected=%v got=%v", userEinsteinWithoutGroups, resUserByUIDWithoutGroups)
}
@ -96,7 +97,7 @@ func TestUserManager(t *testing.T) {
// test FindUsers
resUser, _ := manager.FindUsers(ctx, "einstein", false)
if !reflect.DeepEqual(resUser, []*userpb.User{userEinstein}) {
if !proto.Equal(resUser[0], userEinstein) {
t.Fatalf("user differs: expected=%v got=%v", []*userpb.User{userEinstein}, resUser)
}

View File

@ -31,6 +31,7 @@ import (
"github.com/cs3org/reva/pkg/user/manager/registry"
"github.com/cs3org/reva/pkg/utils/cfg"
"github.com/pkg/errors"
"google.golang.org/protobuf/proto"
)
func init() {
@ -86,11 +87,11 @@ func (m *manager) Configure(ml map[string]interface{}) error {
func (m *manager) GetUser(ctx context.Context, uid *userpb.UserId, skipFetchingGroups bool) (*userpb.User, error) {
for _, u := range m.users {
if (u.Id.GetOpaqueId() == uid.OpaqueId || u.Username == uid.OpaqueId) && (uid.Idp == "" || uid.Idp == u.Id.GetIdp()) {
user := *u
user := proto.Clone(u).(*userpb.User)
if skipFetchingGroups {
user.Groups = nil
}
return &user, nil
return user, nil
}
}
return nil, errtypes.NotFound(uid.OpaqueId)
@ -99,11 +100,11 @@ func (m *manager) GetUser(ctx context.Context, uid *userpb.UserId, skipFetchingG
func (m *manager) GetUserByClaim(ctx context.Context, claim, value string, skipFetchingGroups bool) (*userpb.User, error) {
for _, u := range m.users {
if userClaim, err := extractClaim(u, claim); err == nil && value == userClaim {
user := *u
user := proto.Clone(u).(*userpb.User)
if skipFetchingGroups {
user.Groups = nil
}
return &user, nil
return user, nil
}
}
return nil, errtypes.NotFound(value)
@ -134,11 +135,11 @@ func (m *manager) FindUsers(ctx context.Context, query string, skipFetchingGroup
users := []*userpb.User{}
for _, u := range m.users {
if userContains(u, query) {
user := *u
user := proto.Clone(u).(*userpb.User)
if skipFetchingGroups {
user.Groups = nil
}
users = append(users, &user)
users = append(users, user)
}
}
return users, nil

View File

@ -26,6 +26,7 @@ import (
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
"github.com/cs3org/reva/pkg/errtypes"
"google.golang.org/protobuf/proto"
)
var ctx = context.Background()
@ -108,26 +109,26 @@ func TestUserManager(t *testing.T) {
// negative test GetUserGroups
expectedErr := errtypes.NotFound(userFake.OpaqueId)
_, err = manager.GetUserGroups(ctx, userFake)
if !reflect.DeepEqual(err, expectedErr) {
if err != expectedErr {
t.Fatalf("user not found error differ: expected='%v' got='%v'", expectedErr, err)
}
// positive test GetUserByClaim by mail
resUserByEmail, _ := manager.GetUserByClaim(ctx, "mail", "einstein@example.org", false)
if !reflect.DeepEqual(resUserByEmail, userEinstein) {
if !proto.Equal(resUserByEmail, userEinstein) {
t.Fatalf("user differs: expected=%v got=%v", userEinstein, resUserByEmail)
}
// negative test GetUserByClaim by mail
expectedErr = errtypes.NotFound("abc@example.com")
_, err = manager.GetUserByClaim(ctx, "mail", "abc@example.com", false)
if !reflect.DeepEqual(err, expectedErr) {
if err != expectedErr {
t.Fatalf("user not found error differs: expected='%v' got='%v'", expectedErr, err)
}
// positive test GetUserByClaim by mail without groups
resUserByEmailWithoutGroups, _ := manager.GetUserByClaim(ctx, "mail", "einstein@example.org", true)
if !reflect.DeepEqual(resUserByEmailWithoutGroups, userEinsteinWithoutGroups) {
if !proto.Equal(resUserByEmailWithoutGroups, userEinsteinWithoutGroups) {
t.Fatalf("user differs: expected=%v got=%v", userEinsteinWithoutGroups, resUserByEmailWithoutGroups)
}
@ -142,7 +143,8 @@ func TestUserManager(t *testing.T) {
if len(resUser) != 1 {
t.Fatalf("too many users found: expected=%d got=%d", 1, len(resUser))
}
if !reflect.DeepEqual(resUser[0].Username, "einstein") {
if resUser[0].Username != "einstein" {
t.Fatalf("user differ: expected=%v got=%v", "einstein", resUser[0].Username)
}
}

View File

@ -67,6 +67,6 @@ func TestUserManager(t *testing.T) {
// positive tests for New
_, err = New(context.Background(), map[string]interface{}{})
if err != nil {
t.Fatalf(err.Error())
t.Fatal(err.Error())
}
}

View File

@ -130,7 +130,7 @@ func (um *Manager) do(ctx context.Context, a Action, username string) (int, []by
body, err := io.ReadAll(resp.Body)
log.Info().Msgf("um.do res %s %s", url, string(body))
if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusCreated {
return 0, nil, fmt.Errorf("Unexpected response code from EFSS API: " + strconv.Itoa(resp.StatusCode))
return 0, nil, fmt.Errorf("Unexpected response code from EFSS API: %s", strconv.Itoa(resp.StatusCode))
}
return resp.StatusCode, body, err
}

View File

@ -23,12 +23,12 @@ import (
"os"
userpb "github.com/cs3org/go-cs3apis/cs3/identity/user/v1beta1"
"github.com/cs3org/reva/pkg/appctx"
"github.com/cs3org/reva/pkg/auth/scope"
jwt "github.com/cs3org/reva/pkg/token/manager/jwt"
"github.com/cs3org/reva/pkg/user/manager/nextcloud"
"github.com/cs3org/reva/tests/helpers"
"github.com/jt-nti/gproto"
. "github.com/onsi/ginkgo"
. "github.com/onsi/gomega"
"google.golang.org/grpc/metadata"
@ -204,7 +204,8 @@ var _ = Describe("Nextcloud", func() {
users, err := um.FindUsers(ctx, "some-query", false)
Expect(err).ToNot(HaveOccurred())
Expect(len(users)).To(Equal(1))
Expect(*users[0]).To(Equal(userpb.User{
Expect(users[0]).To(gproto.Equal(&userpb.User{
Id: &userpb.UserId{
Idp: "some-idp",
OpaqueId: "some-opaque-user-id",

View File

@ -1,401 +0,0 @@
default:
autoload:
"": "%paths.base%/../../ocis/tests/acceptance/features/bootstrap"
suites:
coreApiMain:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiMain"
context: &common_ldap_suite_context
parameters:
ldapAdminPassword: admin
ldapUsersOU: TestUsers
ldapGroupsOU: TestGroups
ldapInitialUserFilePath: /../../config/ldap-users.ldif
contexts:
- FeatureContext: &common_feature_context_params
baseUrl: http://localhost:8080
adminUsername: admin
adminPassword: admin
regularUserPassword: 123456
ocPath: apps/testing/api/v1/occ
- AppConfigurationContext:
- ChecksumContext:
- FilesVersionsContext:
- TrashbinContext:
coreApiAuth:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiAuth"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- AuthContext:
coreApiAuthOcs:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiAuthOcs"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- AuthContext:
coreApiAuthWebDav:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiAuthWebDav"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- SearchContext:
- PublicWebDavContext:
- WebDavPropertiesContext:
- AuthContext:
coreApiCapabilities:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiCapabilities"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- CapabilitiesContext:
- AppConfigurationContext:
coreApiFavorites:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiFavorites"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- FavoritesContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiShareCreateSpecialToShares1:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareCreateSpecialToShares1"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiShareCreateSpecialToShares2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareCreateSpecialToShares2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiSharees:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiSharees"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- ShareesContext:
- AppConfigurationContext:
coreApiShareManagementToShares:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareManagementToShares"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
- FilesVersionsContext:
coreApiShareManagementBasicToShares:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareManagementBasicToShares"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AuthContext:
coreApiShareOperationsToShares1:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareOperationsToShares1"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
coreApiShareOperationsToShares2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareOperationsToShares2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
coreApiSharePublicLink1:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiSharePublicLink1"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiSharePublicLink2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiSharePublicLink2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiSharePublicLink3:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiSharePublicLink3"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiShareReshareToShares1:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareReshareToShares1"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
coreApiShareReshareToShares2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareReshareToShares2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiShareReshareToShares3:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareReshareToShares3"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiShareUpdateToShares:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiShareUpdateToShares"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TrashbinContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiTrashbin:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiTrashbin"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- TrashbinContext:
- AppConfigurationContext:
- WebDavPropertiesContext:
coreApiTrashbinRestore:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiTrashbinRestore"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- TrashbinContext:
- AppConfigurationContext:
- WebDavPropertiesContext:
coreApiVersions:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiVersions"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- ChecksumContext:
- FilesVersionsContext:
- WebDavPropertiesContext:
- AppConfigurationContext:
- TrashbinContext:
coreApiWebdavDelete:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavDelete"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- SearchContext:
- PublicWebDavContext:
- WebDavPropertiesContext:
- TrashbinContext:
coreApiWebdavLocks:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavLocks"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- WebDavLockingContext:
- WebDavPropertiesContext:
coreApiWebdavLocks2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavLocks2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- WebDavLockingContext:
- WebDavPropertiesContext:
coreApiWebdavLocksUnlock:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavLocksUnlock"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- WebDavLockingContext:
- WebDavPropertiesContext:
coreApiWebdavMove1:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavMove1"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- WebDavPropertiesContext:
coreApiWebdavMove2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavMove2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- WebDavPropertiesContext:
coreApiWebdavOperations:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavOperations"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- SearchContext:
- PublicWebDavContext:
- WebDavPropertiesContext:
- TrashbinContext:
coreApiWebdavPreviews:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavPreviews"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- WebDavPropertiesContext:
coreApiWebdavProperties1:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavProperties1"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiWebdavProperties2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavProperties2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- WebDavPropertiesContext:
- AppConfigurationContext:
coreApiWebdavUpload1:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavUpload1"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- WebDavPropertiesContext:
coreApiWebdavUpload2:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavUpload2"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
coreApiWebdavUploadTUS:
paths:
- "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavUploadTUS"
context: *common_ldap_suite_context
contexts:
- FeatureContext: *common_feature_context_params
- PublicWebDavContext:
- TUSContext:
- FilesVersionsContext:
- ChecksumContext:
# coreApiWebdavEtagPropagation1:
# paths:
# - "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavEtagPropagation1"
# context: *common_ldap_suite_context
# contexts:
# - FeatureContext: *common_feature_context_params
# - TrashbinContext:
# - PublicWebDavContext:
# - FilesVersionsContext:
# - WebDavPropertiesContext:
# - AppConfigurationContext:
# coreApiWebdavEtagPropagation2:
# paths:
# - "%paths.base%/../../ocis/tests/acceptance/features/coreApiWebdavEtagPropagation2"
# context: *common_ldap_suite_context
# contexts:
# - FeatureContext: *common_feature_context_params
# - TrashbinContext:
# - PublicWebDavContext:
# - FilesVersionsContext:
# - WebDavPropertiesContext:
# - AppConfigurationContext:
extensions:
rdx\behatvars\BehatVariablesExtension: ~
Cjm\Behat\StepThroughExtension: ~

View File

@ -1,20 +0,0 @@
default:
autoload:
'': '%paths.base%/../features/bootstrap'
suites:
apiVirtualViews:
paths:
- '%paths.base%/../features/apiVirtualViews'
contexts:
- RevaContext:
- FeatureContext: &common_feature_context_params
baseUrl: http://frontend:20180
adminUsername: admin
adminPassword: admin
regularUserPassword: relativity
ocPath: apps/testing/api/v1/occ
- WebDavPropertiesContext:
extensions:
Cjm\Behat\StepThroughExtension: ~

File diff suppressed because it is too large Load Diff

View File

@ -1,65 +0,0 @@
@api @virtual-views-required
Feature: virtual views
As admin
I want to be able to shard large folders over multiple storage providers
So that I can scale large numbers of users better.
Background:
Given user "einstein" deletes everything from folder "virtual/" using the WebDAV API
And user "einstein" has created the following folders
| path |
| virtual/a |
| virtual/a/alice |
| virtual/b |
| virtual/c |
| virtual/k |
| virtual/l |
| virtual/z |
Scenario: list large folder
Given using old DAV path
When user "einstein" lists the resources in "/virtual" with depth "0" using the WebDAV API
Then the HTTP status code should be "207"
And the last DAV response for user "einstein" should not contain these nodes
| name |
| virtual/a |
| virtual/b |
| virtual/c |
| virtual/k |
| virtual/l |
| virtual/z |
When user "einstein" lists the resources in "/virtual" with depth 1 using the WebDAV API
Then the HTTP status code should be "207"
And the last DAV response for user "einstein" should contain these nodes
| name |
| virtual/a |
| virtual/b |
| virtual/c |
| virtual/k |
| virtual/l |
| virtual/z |
And the last DAV response for user "einstein" should not contain these nodes
| name |
| virtual/a/alice |
When user "einstein" lists the resources in "/virtual" with depth "infinity" using the WebDAV API
Then the HTTP status code should be "207"
And the last DAV response for user "einstein" should contain these nodes
| name |
| virtual/a |
| virtual/a/alice |
| virtual/b |
| virtual/c |
| virtual/k |
| virtual/l |
| virtual/z |
Scenario: etag changes when adding a folder
Given user "einstein" has stored etag of element "/"
And user "einstein" has stored etag of element "/virtual"
And user "einstein" has stored etag of element "/virtual/b"
When user "einstein" creates folder "/virtual/b/bar" using the WebDAV API
Then these etags should have changed:
| user | path |
| einstein | / |
| einstein | /virtual |
| einstein | /virtual/b |

View File

@ -1,40 +0,0 @@
<?php
use Behat\Behat\Context\Context;
use Behat\Behat\Hook\Scope\BeforeScenarioScope;
use TestHelpers\AppConfigHelper;
use TestHelpers\SetupHelper;
require_once 'bootstrap.php';
/**
* Context for Reva specific steps
*/
class RevaContext implements Context {
/**
* @var FeatureContext
*/
private $featureContext;
/**
* @BeforeScenario
*
* @param BeforeScenarioScope $scope
*
* @return void
* @throws Exception
*/
public function setUpScenario(BeforeScenarioScope $scope) {
// Get the environment
$environment = $scope->getEnvironment();
// Get all the contexts you need in this context
$this->featureContext = $environment->getContext('FeatureContext');
SetupHelper::init(
$this->featureContext->getAdminUsername(),
$this->featureContext->getAdminPassword(),
$this->featureContext->getBaseUrl(),
$this->featureContext->getOcPath()
);
}
}

View File

@ -1,14 +0,0 @@
<?php
$pathToApiTests = \getenv('PATH_TO_APITESTS');
if ($pathToApiTests === false) {
$pathToApiTests = "../ocis";
}
require_once $pathToApiTests . '/tests/acceptance/features/bootstrap/bootstrap.php';
$classLoader = new \Composer\Autoload\ClassLoader();
$classLoader->addPsr4(
"", $pathToApiTests . "/tests/acceptance/features/bootstrap", true
);
$classLoader->register();

View File

@ -1,2 +0,0 @@
coreApiWebdavEtagPropagation1
coreApiWebdavEtagPropagation2

View File

@ -1,145 +0,0 @@
#!/usr/bin/env bash
log_error() {
if [ -n "${PLAIN_OUTPUT}" ]
then
echo -e "$1"
else
echo -e "\e[31m$1\e[0m"
fi
}
log_info() {
if [ -n "${PLAIN_OUTPUT}" ]
then
echo -e "$1"
else
echo -e "\e[34m$1\e[0m"
fi
}
log_success() {
if [ -n "${PLAIN_OUTPUT}" ]
then
echo -e "$1"
else
echo -e "\e[32m$1\e[0m"
fi
}
declare -A scenarioLines
if [ -n "${EXPECTED_FAILURES_FILE}" ]
then
if [ -f "${EXPECTED_FAILURES_FILE}" ]
then
log_info "Checking expected failures in ${EXPECTED_FAILURES_FILE}"
else
log_error "Expected failures file ${EXPECTED_FAILURES_FILE} not found"
log_error "Check the setting of EXPECTED_FAILURES_FILE environment variable"
exit 1
fi
FINAL_EXIT_STATUS=0
# If the last line of the expected-failures file ends without a newline character
# then that line may not get processed by some of the bash code in this script
# So check that the last character in the file is a newline
if [ "$(tail -c1 "${EXPECTED_FAILURES_FILE}" | wc -l)" -eq 0 ]
then
log_error "Expected failures file ${EXPECTED_FAILURES_FILE} must end with a newline"
log_error "Put a newline at the end of the last line and try again"
FINAL_EXIT_STATUS=1
fi
# Check the expected-failures file to ensure that the lines are self-consistent
# In most cases the features that are being run are in owncloud/core,
# so assume that by default.
FEATURE_FILE_REPO="owncloud/core"
FEATURE_FILE_PATH="tests/acceptance/features"
LINE_NUMBER=0
while read -r INPUT_LINE
do
LINE_NUMBER=$(("$LINE_NUMBER" + 1))
# Ignore comment lines (starting with hash)
if [[ "${INPUT_LINE}" =~ ^# ]]
then
continue
fi
# A line of text in the feature file can be used to indicate that the
# features being run are actually from some other repo. For example:
# "The expected failures in this file are from features in the owncloud/ocis repo."
# Write a line near the top of the expected-failures file to "declare" this,
# overriding the default "owncloud/core"
FEATURE_FILE_SPEC_LINE_FOUND="false"
if [[ "${INPUT_LINE}" =~ features[[:blank:]]in[[:blank:]]the[[:blank:]]([a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)[[:blank:]]repo ]]; then
FEATURE_FILE_REPO="${BASH_REMATCH[1]}"
log_info "Features are expected to be in the ${FEATURE_FILE_REPO} repo\n"
FEATURE_FILE_SPEC_LINE_FOUND="true"
fi
if [[ "${INPUT_LINE}" =~ repo[[:blank:]]in[[:blank:]]the[[:blank:]]([a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+)[[:blank:]]folder[[:blank:]]tree ]]; then
FEATURE_FILE_PATH="${BASH_REMATCH[1]}"
log_info "Features are expected to be in the ${FEATURE_FILE_PATH} folder tree\n"
FEATURE_FILE_SPEC_LINE_FOUND="true"
fi
if [[ $FEATURE_FILE_SPEC_LINE_FOUND == "true" ]]; then
continue
fi
# Match lines that have "- [someSuite/someName.feature:n]" pattern on start
# the part inside the brackets is the suite, feature and line number of the expected failure.
if [[ "${INPUT_LINE}" =~ ^-[[:space:]]\[([a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+\.feature:[0-9]+)] ]]; then
SUITE_SCENARIO_LINE="${BASH_REMATCH[1]}"
elif [[
# report for lines like: " - someSuite/someName.feature:n"
"${INPUT_LINE}" =~ ^[[:space:]]*-[[:space:]][a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+\.feature:[0-9]+[[:space:]]*$ ||
# report for lines starting with: "[someSuite/someName.feature:n]"
"${INPUT_LINE}" =~ ^[[:space:]]*\[([a-zA-Z0-9_-]+/[a-zA-Z0-9_-]+\.feature:[0-9]+)]
]]; then
log_error "> Line ${LINE_NUMBER}: Not in the correct format."
log_error " + Actual Line : '${INPUT_LINE}'"
log_error " - Expected Format : '- [suite/scenario.feature:line_number](scenario_line_url)'"
FINAL_EXIT_STATUS=1
continue
else
# otherwise, ignore the line
continue
fi
# Find the link in round-brackets that should be after the SUITE_SCENARIO_LINE
if [[ "${INPUT_LINE}" =~ \(([a-zA-Z0-9:/.#_-]+)\) ]]; then
ACTUAL_LINK="${BASH_REMATCH[1]}"
else
log_error "Line ${LINE_NUMBER}: ${INPUT_LINE} : Link is empty"
FINAL_EXIT_STATUS=1
continue
fi
if [[ -n "${scenarioLines[${SUITE_SCENARIO_LINE}]:-}" ]];
then
log_error "> Line ${LINE_NUMBER}: Scenario line ${SUITE_SCENARIO_LINE} is duplicated"
FINAL_EXIT_STATUS=1
fi
scenarioLines[${SUITE_SCENARIO_LINE}]="exists"
OLD_IFS=${IFS}
IFS=':'
read -ra FEATURE_PARTS <<< "${SUITE_SCENARIO_LINE}"
IFS=${OLD_IFS}
SUITE_FEATURE="${FEATURE_PARTS[0]}"
FEATURE_LINE="${FEATURE_PARTS[1]}"
EXPECTED_LINK="https://github.com/${FEATURE_FILE_REPO}/blob/master/${FEATURE_FILE_PATH}/${SUITE_FEATURE}#L${FEATURE_LINE}"
if [[ "${ACTUAL_LINK}" != "${EXPECTED_LINK}" ]]; then
log_error "> Line ${LINE_NUMBER}: Link is not correct for ${SUITE_SCENARIO_LINE}"
log_error " + Actual link : ${ACTUAL_LINK}"
log_error " - Expected link : ${EXPECTED_LINK}"
FINAL_EXIT_STATUS=1
fi
done < "${EXPECTED_FAILURES_FILE}"
else
log_error "Environment variable EXPECTED_FAILURES_FILE must be defined to be the file to check"
exit 1
fi
if [ ${FINAL_EXIT_STATUS} == 1 ]
then
log_error "\nErrors were found in the expected failures file - see the messages above!"
else
log_success "\nNo problems were found in the expected failures file."
fi
exit ${FINAL_EXIT_STATUS}

View File

@ -1,712 +0,0 @@
#!/bin/bash
[[ "${DEBUG}" == "true" ]] && set -x
# from http://stackoverflow.com/a/630387
SCRIPT_PATH="`dirname \"$0\"`" # relative
SCRIPT_PATH="`( cd \"${SCRIPT_PATH}\" && pwd )`" # absolutized and normalized
echo 'Script path: '${SCRIPT_PATH}
# Allow optionally passing in the path to the behat program.
# This gives flexibility for callers that have installed their own behat
if [ -z "${BEHAT_BIN}" ]
then
BEHAT=${SCRIPT_PATH}/../../vendor-bin/behat/vendor/bin/behat
else
BEHAT=${BEHAT_BIN}
fi
BEHAT_TAGS_OPTION_FOUND=false
if [ -n "${STEP_THROUGH}" ]
then
STEP_THROUGH_OPTION="--step-through"
fi
if [ -n "${STOP_ON_FAILURE}" ]
then
STOP_OPTION="--stop-on-failure"
fi
if [ -n "${PLAIN_OUTPUT}" ]
then
# explicitly tell Behat to not do colored output
COLORS_OPTION="--no-colors"
# Use the Bash "null" command to do nothing, rather than use tput to set a color
RED_COLOR=":"
GREEN_COLOR=":"
YELLOW_COLOR=":"
else
COLORS_OPTION="--colors"
RED_COLOR="tput setaf 1"
GREEN_COLOR="tput setaf 2"
YELLOW_COLOR="tput setaf 3"
fi
# The following environment variables can be specified:
#
# ACCEPTANCE_TEST_TYPE - see "--type" description
# BEHAT_FEATURE - see "--feature" description
# BEHAT_FILTER_TAGS - see "--tags" description
# BEHAT_SUITE - see "--suite" description
# BEHAT_YML - see "--config" description
# RUN_PART and DIVIDE_INTO_NUM_PARTS - see "--part" description
# SHOW_OC_LOGS - see "--show-oc-logs" description
# TESTING_REMOTE_SYSTEM - see "--remote" description
# EXPECTED_FAILURES_FILE - a file that contains a list of the scenarios that are expected to fail
if [ -n "${EXPECTED_FAILURES_FILE}" ]
then
# Check the expected-failures file
${SCRIPT_PATH}/lint-expected-failures.sh
LINT_STATUS=$?
if [ ${LINT_STATUS} -ne 0 ]
then
echo "Error: expected failures file ${EXPECTED_FAILURES_FILE} is invalid"
exit ${LINT_STATUS}
fi
fi
# Default to API tests
# Note: if a specific feature or suite is also specified, then the acceptance
# test type is deduced from the suite name, and this environment variable
# ACCEPTANCE_TEST_TYPE is overridden.
if [ -z "${ACCEPTANCE_TEST_TYPE}" ]
then
ACCEPTANCE_TEST_TYPE="api"
fi
# Look for command line options for:
# -c or --config - specify a behat.yml to use
# --feature - specify a single feature to run
# --suite - specify a single suite to run
# --type - api or core-api - if no individual feature or suite is specified, then
# specify the type of acceptance tests to run. Default api.
# --tags - specify tags for scenarios to run (or not)
# --remote - the server under test is remote, so we cannot locally enable the
# testing app. We have to assume it is already enabled.
# --show-oc-logs - tail the ownCloud log after the test run
# --loop - loop tests for given number of times. Only use it for debugging purposes
# --part - run a subset of scenarios, need two numbers.
# first number: which part to run
# second number: in how many parts to divide the set of scenarios
# --step-through - pause after each test step
# Command line options processed here will override environment variables that
# might have been set by the caller, or in the code above.
while [[ $# -gt 0 ]]
do
key="$1"
case ${key} in
-c|--config)
BEHAT_YML="$2"
shift
;;
--feature)
BEHAT_FEATURE="$2"
shift
;;
--suite)
BEHAT_SUITE="$2"
shift
;;
--loop)
BEHAT_RERUN_TIMES="$2"
shift
;;
--type)
# Lowercase the parameter value, so the user can provide "API", "CORE-API", etc
ACCEPTANCE_TEST_TYPE="${2,,}"
shift
;;
--tags)
BEHAT_FILTER_TAGS="$2"
BEHAT_TAGS_OPTION_FOUND=true
shift
;;
--part)
RUN_PART="$2"
DIVIDE_INTO_NUM_PARTS="$3"
if [ ${RUN_PART} -gt ${DIVIDE_INTO_NUM_PARTS} ]
then
echo "cannot run part ${RUN_PART} of ${DIVIDE_INTO_NUM_PARTS}"
exit 1
fi
shift 2
;;
--step-through)
STEP_THROUGH_OPTION="--step-through"
;;
*)
# A "random" parameter is presumed to be a feature file to run.
# Typically that will be specified at the end, or as the only
# parameter.
BEHAT_FEATURE="$1"
;;
esac
shift
done
# Set the language to "C"
# We want to have it all in english to be able to parse outputs
export LANG=C
# Provide a default admin username and password.
# But let the caller pass them if they wish
if [ -z "${ADMIN_USERNAME}" ]
then
ADMIN_USERNAME="admin"
fi
if [ -z "${ADMIN_PASSWORD}" ]
then
ADMIN_PASSWORD="admin"
fi
export ADMIN_USERNAME
export ADMIN_PASSWORD
if [ -z "${BEHAT_RERUN_TIMES}" ]
then
BEHAT_RERUN_TIMES=1
fi
# expected variables
# --------------------
# $SUITE_FEATURE_TEXT - human readable which test to run
# $BEHAT_SUITE_OPTION - suite setting with "--suite" or empty if all suites have to be run
# $BEHAT_FEATURE - feature file, or empty
# $BEHAT_FILTER_TAGS - list of tags
# $BEHAT_TAGS_OPTION_FOUND
# $TEST_LOG_FILE
# $BEHAT - behat executable
# $BEHAT_YML
#
# set arrays
# ---------------
# $UNEXPECTED_FAILED_SCENARIOS array of scenarios that failed unexpectedly
# $UNEXPECTED_PASSED_SCENARIOS array of scenarios that passed unexpectedly (while running with expected-failures.txt)
# $STOP_ON_FAILURE - aborts the test run after the first failure
declare -a UNEXPECTED_FAILED_SCENARIOS
declare -a UNEXPECTED_PASSED_SCENARIOS
declare -a UNEXPECTED_BEHAT_EXIT_STATUSES
function run_behat_tests() {
echo "Running ${SUITE_FEATURE_TEXT} tests tagged ${BEHAT_FILTER_TAGS}" | tee ${TEST_LOG_FILE}
if [ "${REPLACE_USERNAMES}" == "true" ]
then
echo "Usernames and attributes in tests are being replaced:"
cat ${SCRIPT_PATH}/usernames.json
fi
echo "Using behat config '${BEHAT_YML}'"
${BEHAT} ${COLORS_OPTION} ${STOP_OPTION} --strict ${STEP_THROUGH_OPTION} -c ${BEHAT_YML} -f pretty ${BEHAT_SUITE_OPTION} --tags ${BEHAT_FILTER_TAGS} ${BEHAT_FEATURE} -v 2>&1 | tee -a ${TEST_LOG_FILE}
BEHAT_EXIT_STATUS=${PIPESTATUS[0]}
# remove nullbytes from the test log
TEMP_CONTENT=$(tr < ${TEST_LOG_FILE} -d '\000')
OLD_IFS="${IFS}"
IFS=""
echo ${TEMP_CONTENT} > ${TEST_LOG_FILE}
IFS="${OLD_IFS}"
# Find the count of scenarios that passed
SCENARIO_RESULTS_COLORED=`grep -Ea '^[0-9]+[[:space:]]scenario(|s)[[:space:]]\(' ${TEST_LOG_FILE}`
SCENARIO_RESULTS=$(echo "${SCENARIO_RESULTS_COLORED}" | sed "s/\x1b[^m]*m//g")
if [ ${BEHAT_EXIT_STATUS} -eq 0 ]
then
# They (SCENARIO_RESULTS) all passed, so just get the first number.
# The text looks like "1 scenario (1 passed)" or "123 scenarios (123 passed)"
[[ ${SCENARIO_RESULTS} =~ ([0-9]+) ]]
SCENARIOS_THAT_PASSED=$((SCENARIOS_THAT_PASSED + BASH_REMATCH[1]))
else
# "Something went wrong" with the Behat run (non-zero exit status).
# If there were "ordinary" test fails, then we process that later. Maybe they are all "expected failures".
# But if there were steps in a feature file that are undefined, we want to fail immediately.
# So exit the tests and do not lint expected failures when undefined steps exist.
if [[ ${SCENARIO_RESULTS} == *"undefined"* ]]
then
${RED_COLOR}; echo -e "Undefined steps: There were some undefined steps found."
exit 1
fi
# If there were no scenarios in the requested suite or feature that match
# the requested combination of tags, then Behat exits with an error status
# and reports "No scenarios" in its output.
# This can happen, for example, when running core suites from an app and
# requesting some tag combination that does not happen frequently. Then
# sometimes there may not be any matching scenarios in one of the suites.
# In this case, consider the test has passed.
MATCHING_COUNT=`grep -ca '^No scenarios$' ${TEST_LOG_FILE}`
if [ ${MATCHING_COUNT} -eq 1 ]
then
echo "Information: no matching scenarios were found."
BEHAT_EXIT_STATUS=0
else
# Find the count of scenarios that passed and failed
SCENARIO_RESULTS_COLORED=`grep -Ea '^[0-9]+[[:space:]]scenario(|s)[[:space:]]\(' ${TEST_LOG_FILE}`
SCENARIO_RESULTS=$(echo "${SCENARIO_RESULTS_COLORED}" | sed "s/\x1b[^m]*m//g")
if [[ ${SCENARIO_RESULTS} =~ [0-9]+[^0-9]+([0-9]+)[^0-9]+([0-9]+)[^0-9]+ ]]
then
# Some passed and some failed, we got the second and third numbers.
# The text looked like "15 scenarios (6 passed, 9 failed)"
SCENARIOS_THAT_PASSED=$((SCENARIOS_THAT_PASSED + BASH_REMATCH[1]))
SCENARIOS_THAT_FAILED=$((SCENARIOS_THAT_FAILED + BASH_REMATCH[2]))
elif [[ ${SCENARIO_RESULTS} =~ [0-9]+[^0-9]+([0-9]+)[^0-9]+ ]]
then
# All failed, we got the second number.
# The text looked like "4 scenarios (4 failed)"
SCENARIOS_THAT_FAILED=$((SCENARIOS_THAT_FAILED + BASH_REMATCH[1]))
fi
fi
fi
FAILED_SCENARIO_PATHS_COLORED=`awk '/Failed scenarios:/',0 ${TEST_LOG_FILE} | grep -a feature`
# There will be some ANSI escape codes for color in the FEATURE_COLORED var.
# Strip them out so we can pass just the ordinary feature details to Behat.
# Thanks to https://en.wikipedia.org/wiki/Tee_(command) and
# https://stackoverflow.com/questions/23416278/how-to-strip-ansi-escape-sequences-from-a-variable
# for ideas.
FAILED_SCENARIO_PATHS=$(echo "${FAILED_SCENARIO_PATHS_COLORED}" | sed "s/\x1b[^m]*m//g")
# If something else went wrong, and there were no failed scenarios,
# then the awk, grep, sed command sequence above ends up with an empty string.
# Unset FAILED_SCENARIO_PATHS to avoid later code thinking that there might be
# one failed scenario.
if [ -z "${FAILED_SCENARIO_PATHS}" ]
then
unset FAILED_SCENARIO_PATHS
fi
if [ -n "${EXPECTED_FAILURES_FILE}" ]
then
if [ -n "${BEHAT_SUITE_TO_RUN}" ]
then
echo "Checking expected failures for suite ${BEHAT_SUITE_TO_RUN}"
else
echo "Checking expected failures"
fi
# Check that every failed scenario is in the list of expected failures
for FAILED_SCENARIO_PATH in ${FAILED_SCENARIO_PATHS}
do
SUITE_PATH=`dirname ${FAILED_SCENARIO_PATH}`
SUITE=`basename ${SUITE_PATH}`
SCENARIO=`basename ${FAILED_SCENARIO_PATH}`
SUITE_SCENARIO="${SUITE}/${SCENARIO}"
grep "\[${SUITE_SCENARIO}\]" "${EXPECTED_FAILURES_FILE}" > /dev/null
if [ $? -ne 0 ]
then
echo "Error: Scenario ${SUITE_SCENARIO} failed but was not expected to fail."
UNEXPECTED_FAILED_SCENARIOS+=("${SUITE_SCENARIO}")
fi
done
# Check that every scenario in the list of expected failures did fail
while read SUITE_SCENARIO
do
# Ignore comment lines (starting with hash)
if [[ "${SUITE_SCENARIO}" =~ ^# ]]
then
continue
fi
# Match lines that have [someSuite/someName.feature:n] - the part inside the
# brackets is the suite, feature and line number of the expected failure.
# Else ignore the line.
if [[ "${SUITE_SCENARIO}" =~ \[([a-zA-Z0-9-]+/[a-zA-Z0-9-]+\.feature:[0-9]+)] ]]; then
SUITE_SCENARIO="${BASH_REMATCH[1]}"
else
continue
fi
if [ -n "${BEHAT_SUITE_TO_RUN}" ]
then
# If the expected failure is not in the suite that is currently being run,
# then do not try and check that it failed.
REGEX_TO_MATCH="^${BEHAT_SUITE_TO_RUN}/"
if ! [[ "${SUITE_SCENARIO}" =~ ${REGEX_TO_MATCH} ]]
then
continue
fi
fi
# look for the expected suite-scenario at the end of a line in the
# FAILED_SCENARIO_PATHS - for example looking for apiComments/comments.feature:9
# we want to match lines like:
# tests/acceptance/features/apiComments/comments.feature:9
# but not lines like::
# tests/acceptance/features/apiComments/comments.feature:902
echo "${FAILED_SCENARIO_PATHS}" | grep ${SUITE_SCENARIO}$ > /dev/null
if [ $? -ne 0 ]
then
echo "Info: Scenario ${SUITE_SCENARIO} was expected to fail but did not fail."
UNEXPECTED_PASSED_SCENARIOS+=("${SUITE_SCENARIO}")
fi
done < ${EXPECTED_FAILURES_FILE}
else
for FAILED_SCENARIO_PATH in ${FAILED_SCENARIO_PATHS}
do
SUITE_PATH=$(dirname "${FAILED_SCENARIO_PATH}")
SUITE=$(basename "${SUITE_PATH}")
SCENARIO=$(basename "${FAILED_SCENARIO_PATH}")
SUITE_SCENARIO="${SUITE}/${SCENARIO}"
UNEXPECTED_FAILED_SCENARIOS+=("${SUITE_SCENARIO}")
done
fi
if [ ${BEHAT_EXIT_STATUS} -ne 0 ] && [ ${#FAILED_SCENARIO_PATHS[@]} -eq 0 ]
then
# Behat had some problem and there were no failed scenarios reported
# So the problem is something else.
# Possibly there were missing step definitions. Or Behat crashed badly, or...
UNEXPECTED_BEHAT_EXIT_STATUSES+=("${SUITE_FEATURE_TEXT} had behat exit status ${BEHAT_EXIT_STATUS}")
fi
if [ "${BEHAT_TAGS_OPTION_FOUND}" != true ]
then
# The behat run specified to skip scenarios tagged @skip
# Report them in a dry-run so they can be seen
# Big red error output is displayed if there are no matching scenarios - send it to null
DRY_RUN_FILE=$(mktemp)
SKIP_TAGS="@skip"
${BEHAT} --dry-run {$COLORS_OPTION} -c ${BEHAT_YML} -f pretty ${BEHAT_SUITE_OPTION} --tags "${SKIP_TAGS}" ${BEHAT_FEATURE} 1>${DRY_RUN_FILE} 2>/dev/null
if grep -q -m 1 'No scenarios' "${DRY_RUN_FILE}"
then
# If there are no skip scenarios, then no need to report that
:
else
echo ""
echo "The following tests were skipped because they are tagged @skip:"
cat "${DRY_RUN_FILE}" | tee -a ${TEST_LOG_FILE}
fi
rm -f "${DRY_RUN_FILE}"
fi
}
declare -x TEST_SERVER_URL
if [ -z "${IPV4_URL}" ]
then
IPV4_URL="${TEST_SERVER_URL}"
fi
if [ -z "${IPV6_URL}" ]
then
IPV6_URL="${TEST_SERVER_URL}"
fi
# If a feature file has been specified but no suite, then deduce the suite
if [ -n "${BEHAT_FEATURE}" ] && [ -z "${BEHAT_SUITE}" ]
then
SUITE_PATH=`dirname ${BEHAT_FEATURE}`
BEHAT_SUITE=`basename ${SUITE_PATH}`
fi
if [ -z "${BEHAT_YML}" ]
then
# Look for a behat.yml somewhere below the current working directory
# This saves app acceptance tests being forced to specify BEHAT_YML
BEHAT_YML="config/behat.yml"
if [ ! -f "${BEHAT_YML}" ]
then
BEHAT_YML="acceptance/config/behat.yml"
fi
if [ ! -f "${BEHAT_YML}" ]
then
BEHAT_YML="tests/acceptance/config/behat.yml"
fi
# If no luck above, then use the core behat.yml that should live below this script
if [ ! -f "${BEHAT_YML}" ]
then
BEHAT_YML="${SCRIPT_PATH}/config/behat.yml"
fi
fi
BEHAT_CONFIG_DIR=$(dirname "${BEHAT_YML}")
ACCEPTANCE_DIR=$(dirname "${BEHAT_CONFIG_DIR}")
if [[ -z "$BEHAT_FEATURES_DIR" ]]
then
BEHAT_FEATURES_DIR="${ACCEPTANCE_DIR}/features"
fi
declare -a BEHAT_SUITES
function get_behat_suites() {
# $1 type of suites to get "api" or "core-api"
# defaults to "api"
TYPE="$1"
if [[ -z "$TYPE" ]]
then
TYPE="api"
fi
ALL_SUITES=""
for suite in `find ${BEHAT_FEATURES_DIR}/ -type d -iname ${TYPE}* | sort | rev | cut -d"/" -f1 | rev`
do
if [[ -f "${BEHAT_FILTER_SUITE_FILE}" ]]
then
if [[ ! `grep $suite "${BEHAT_FILTER_SUITE_FILE}"` ]]
then
ALL_SUITES+="$suite"$'\n'
fi
else
ALL_SUITES+="$suite"$'\n'
fi
done
COUNT_ALL_SUITES=`echo "${ALL_SUITES}" | tr " " "\n" | wc -l`
#divide the suites letting it round down (could be zero)
MIN_SUITES_PER_RUN=$((${COUNT_ALL_SUITES} / ${DIVIDE_INTO_NUM_PARTS}))
#some jobs might need an extra suite
MAX_SUITES_PER_RUN=$((${MIN_SUITES_PER_RUN} + 1))
# the remaining number of suites that need to be distributed (could be zero)
REMAINING_SUITES=$((${COUNT_ALL_SUITES} - (${DIVIDE_INTO_NUM_PARTS} * ${MIN_SUITES_PER_RUN})))
if [[ ${RUN_PART} -le ${REMAINING_SUITES} ]]
then
SUITES_THIS_RUN=${MAX_SUITES_PER_RUN}
SUITES_IN_PREVIOUS_RUNS=$((${MAX_SUITES_PER_RUN} * (${RUN_PART} - 1)))
else
SUITES_THIS_RUN=${MIN_SUITES_PER_RUN}
SUITES_IN_PREVIOUS_RUNS=$((((${MAX_SUITES_PER_RUN} * ${REMAINING_SUITES}) + (${MIN_SUITES_PER_RUN} * (${RUN_PART} - ${REMAINING_SUITES} - 1)))))
fi
if [ ${SUITES_THIS_RUN} -eq 0 ]
then
echo "there are only ${COUNT_ALL_SUITES} suites, nothing to do in part ${RUN_PART}"
exit 0
fi
COUNT_FINISH_AND_TODO_SUITES=$((${SUITES_IN_PREVIOUS_RUNS} + ${SUITES_THIS_RUN}))
BEHAT_SUITES+=(`echo "${ALL_SUITES}" | tr " " "\n" | head -n ${COUNT_FINISH_AND_TODO_SUITES} | tail -n ${SUITES_THIS_RUN}`)
}
if [[ -n "${BEHAT_SUITE}" ]]
then
BEHAT_SUITES+=("${BEHAT_SUITE}")
else
if [[ -n "${RUN_PART}" ]]; then
if [[ "${ACCEPTANCE_TEST_TYPE}" == "core-api" ]]; then
get_behat_suites "core"
else
get_behat_suites "${ACCEPTANCE_TEST_TYPE}"
fi
fi
fi
TEST_TYPE_TEXT="API"
# Always have "@api"
if [ ! -z "${BEHAT_FILTER_TAGS}" ]
then
# Be nice to the caller
# Remove any extra "&&" at the end of their tags list
BEHAT_FILTER_TAGS="${BEHAT_FILTER_TAGS%&&}"
# Remove any extra "&&" at the beginning of their tags list
BEHAT_FILTER_TAGS="${BEHAT_FILTER_TAGS#&&}"
fi
# EMAIL_HOST defines where the system-under-test can find the email server (inbucket)
# for sending email.
if [ -z "${EMAIL_HOST}" ]
then
EMAIL_HOST="127.0.0.1"
fi
# LOCAL_INBUCKET_HOST defines where this test script can find the Inbucket server
# for sending email. When testing a remote system, the Inbucket server somewhere
# "in the middle" might have a different host name from the point of view of
# the test script.
if [ -z "${LOCAL_EMAIL_HOST}" ]
then
LOCAL_EMAIL_HOST="${EMAIL_HOST}"
fi
if [ -z "${EMAIL_SMTP_PORT}" ]
then
EMAIL_SMTP_PORT="2500"
fi
# If the caller did not mention specific tags, skip the skipped tests by default
if [ "${BEHAT_TAGS_OPTION_FOUND}" = false ]
then
if [[ -z $BEHAT_FILTER_TAGS ]]
then
BEHAT_FILTER_TAGS="~@skip"
# If the caller has already specified specifically to run "@skip" scenarios
# then do not append "not @skip"
elif [[ ! ${BEHAT_FILTER_TAGS} =~ (^|&)@skip(&|$) ]]
then
BEHAT_FILTER_TAGS="${BEHAT_FILTER_TAGS}&&~@skip"
fi
fi
export IPV4_URL
export IPV6_URL
export FILES_FOR_UPLOAD="${SCRIPT_PATH}/filesForUpload/"
if [ "${TEST_OCIS}" != "true" ] && [ "${TEST_REVA}" != "true" ]
then
# We are testing on an ownCloud core server.
# Tell the tests to wait 1 second between each upload/delete action
# to avoid problems with actions that depend on timestamps in seconds.
export UPLOAD_DELETE_WAIT_TIME=1
fi
TEST_LOG_FILE=$(mktemp)
SCENARIOS_THAT_PASSED=0
SCENARIOS_THAT_FAILED=0
if [ ${#BEHAT_SUITES[@]} -eq 0 ] && [ -z "${BEHAT_FEATURE}" ]
then
SUITE_FEATURE_TEXT="all ${TEST_TYPE_TEXT}"
run_behat_tests
else
if [ -n "${BEHAT_SUITE}" ]
then
SUITE_FEATURE_TEXT="${BEHAT_SUITE}"
fi
if [ -n "${BEHAT_FEATURE}" ]
then
# If running a whole feature, it will be something like login.feature
# If running just a single scenario, it will also have the line number
# like login.feature:36 - which will be parsed correctly like a "file"
# by basename.
BEHAT_FEATURE_FILE=`basename ${BEHAT_FEATURE}`
SUITE_FEATURE_TEXT="${SUITE_FEATURE_TEXT} ${BEHAT_FEATURE_FILE}"
fi
fi
for i in "${!BEHAT_SUITES[@]}"
do
BEHAT_SUITE_TO_RUN="${BEHAT_SUITES[$i]}"
BEHAT_SUITE_OPTION="--suite=${BEHAT_SUITE_TO_RUN}"
SUITE_FEATURE_TEXT="${BEHAT_SUITES[$i]}"
for rerun_number in $(seq 1 ${BEHAT_RERUN_TIMES})
do
if ((${BEHAT_RERUN_TIMES} > 1))
then
echo -e "\nTest repeat $rerun_number of ${BEHAT_RERUN_TIMES}"
fi
run_behat_tests
done
done
TOTAL_SCENARIOS=$((SCENARIOS_THAT_PASSED + SCENARIOS_THAT_FAILED))
echo "runsh: Total ${TOTAL_SCENARIOS} scenarios (${SCENARIOS_THAT_PASSED} passed, ${SCENARIOS_THAT_FAILED} failed)"
# 3 types of things can have gone wrong:
# - some scenario failed (and it was not expected to fail)
# - some scenario passed (but it was expected to fail)
# - Behat exited with non-zero status because of some other error
# If any of these happened then report about it and exit with status 1 (error)
if [ ${#UNEXPECTED_FAILED_SCENARIOS[@]} -gt 0 ]
then
UNEXPECTED_FAILURE=true
else
UNEXPECTED_FAILURE=false
fi
if [ ${#UNEXPECTED_PASSED_SCENARIOS[@]} -gt 0 ]
then
UNEXPECTED_SUCCESS=true
else
UNEXPECTED_SUCCESS=false
fi
if [ ${#UNEXPECTED_BEHAT_EXIT_STATUSES[@]} -gt 0 ]
then
UNEXPECTED_BEHAT_EXIT_STATUS=true
else
UNEXPECTED_BEHAT_EXIT_STATUS=false
fi
# If we got some unexpected success, and we only ran a single feature or scenario
# then the fact that some expected failures did not happen might be because those
# scenarios were never even run.
# Filter the UNEXPECTED_PASSED_SCENARIOS to remove scenarios that were not run.
if [ "${UNEXPECTED_SUCCESS}" = true ]
then
ACTUAL_UNEXPECTED_PASS=()
# if running a single feature or a single scenario
if [[ -n "${BEHAT_FEATURE}" ]]
then
for unexpected_passed_value in "${UNEXPECTED_PASSED_SCENARIOS[@]}"
do
# check only for the running feature
if [[ $BEHAT_FEATURE == *":"* ]]
then
BEHAT_FEATURE_WITH_LINE_NUM=$BEHAT_FEATURE
else
LINE_NUM=$(echo ${unexpected_passed_value} | cut -d":" -f2)
BEHAT_FEATURE_WITH_LINE_NUM=$BEHAT_FEATURE:$LINE_NUM
fi
if [[ $BEHAT_FEATURE_WITH_LINE_NUM == *"${unexpected_passed_value}" ]]
then
ACTUAL_UNEXPECTED_PASS+=("${unexpected_passed_value}")
fi
done
else
ACTUAL_UNEXPECTED_PASS=("${UNEXPECTED_PASSED_SCENARIOS[@]}")
fi
if [ ${#ACTUAL_UNEXPECTED_PASS[@]} -eq 0 ]
then
UNEXPECTED_SUCCESS=false
fi
fi
if [ "${UNEXPECTED_FAILURE}" = false ] && [ "${UNEXPECTED_SUCCESS}" = false ] && [ "${UNEXPECTED_BEHAT_EXIT_STATUS}" = false ]
then
FINAL_EXIT_STATUS=0
else
FINAL_EXIT_STATUS=1
fi
if [ -n "${EXPECTED_FAILURES_FILE}" ]
then
echo "runsh: Exit code after checking expected failures: ${FINAL_EXIT_STATUS}"
fi
if [ "${UNEXPECTED_FAILURE}" = true ]
then
${YELLOW_COLOR}; echo "runsh: Total unexpected failed scenarios throughout the test run:"
${RED_COLOR}; printf "%s\n" "${UNEXPECTED_FAILED_SCENARIOS[@]}"
else
${GREEN_COLOR}; echo "runsh: There were no unexpected failures."
fi
if [ "${UNEXPECTED_SUCCESS}" = true ]
then
${YELLOW_COLOR}; echo "runsh: Total unexpected passed scenarios throughout the test run:"
${RED_COLOR}; printf "%s\n" "${ACTUAL_UNEXPECTED_PASS[@]}"
else
${GREEN_COLOR}; echo "runsh: There were no unexpected success."
fi
if [ "${UNEXPECTED_BEHAT_EXIT_STATUS}" = true ]
then
${YELLOW_COLOR}; echo "runsh: The following Behat test runs exited with non-zero status:"
${RED_COLOR}; printf "%s\n" "${UNEXPECTED_BEHAT_EXIT_STATUSES[@]}"
fi
# sync the file-system so all output will be flushed to storage.
# In drone we sometimes see that the last lines of output are missing from the
# drone log.
sync
# If we are running in drone CI, then sleep for a bit to (hopefully) let the
# drone agent send all the output to the drone server.
if [ -n "${CI_REPO}" ]
then
echo "sleeping for 30 seconds at end of test run"
sleep 30
fi
exit ${FINAL_EXIT_STATUS}

View File

@ -1,4 +1,3 @@
version: "3.4"
services:
revad:
# image: ${REVAD_IMAGE}
@ -9,11 +8,11 @@ services:
build:
context: ../../
dockerfile: docker/Dockerfile.revad-eos
volumes:
- ../revad:/etc/revad
working_dir: /etc/revad/
healthcheck:
test: sleep 5
volumes:
- ../revad:/etc/revad
eos-storage:
image: ${EOS_FULL_IMAGE}
security_opt:
@ -31,50 +30,56 @@ services:
interval: 10s
timeout: 5s
retries: 5
depends_on:
ldap:
condition: service_healthy
litmus:
image: registry.cern.ch/docker.io/owncloud/litmus:latest
environment:
LITMUS_USERNAME: einstein
LITMUS_PASSWORD: relativity
TESTS: basic http copymove props
acceptance:
image: cs3org/behat:latest
entrypoint: /mnt/acceptance/run.sh
environment:
OCIS_REVA_DATA_ROOT: /var/tmp/reva/data/
DELETE_USER_DATA_CMD: rm -rf /var/tmp/reva/data/nodes/root/* /var/tmp/reva/data/nodes/*-*-*-* /var/tmp/reva/data/blobs/*
SKELETON_DIR: /mnt/testing/data/apiSkeleton
PLAIN_OUTPUT: 'true'
volumes:
- ../testing:/mnt/testing
- ../ocis:/mnt/ocis
- ../acceptance:/mnt/acceptance
TESTS: basic http copymove
gateway:
extends: revad
command: -c /etc/revad/gateway.toml
volumes:
- shared-volume:/var/tmp
depends_on:
ldap:
condition: service_healthy
frontend:
extends: revad
command: -c /etc/revad/frontend.toml
volumes:
- shared-volume:/var/tmp
depends_on:
ldap:
condition: service_healthy
storage-home:
extends: revad
hostname: storage-home
command: -c /etc/revad/storage-home.toml
volumes:
- shared-volume:/var/tmp
depends_on:
ldap:
condition: service_healthy
users:
extends: revad
command: -c /etc/revad/users.toml
volumes:
- shared-volume:/var/tmp
depends_on:
ldap:
condition: service_healthy
shares:
extends: revad
command: -c /etc/revad/shares.toml
volumes:
- shared-volume:/var/tmp
depends_on:
ldap:
condition: service_healthy
litmus-1:
extends: litmus
environment:
@ -92,6 +97,8 @@ services:
condition: service_healthy
shares:
condition: service_healthy
ldap:
condition: service_healthy
litmus-2:
extends: litmus
environment:
@ -120,10 +127,12 @@ services:
condition: service_healthy
frontend-global:
extends: revad
hostname: frontend
command: -c /etc/revad/frontend-global.toml
volumes:
- shared-volume:/var/tmp
depends_on:
eos-storage:
condition: service_healthy
storage-local-1:
extends: revad
command: -c /etc/revad/storage-local-1.toml
@ -140,35 +149,6 @@ services:
depends_on:
eos-storage:
condition: service_healthy
acceptance-1:
extends: acceptance
environment:
PATH_TO_APITESTS: /mnt/ocis
TEST_SERVER_URL: http://frontend:20180
TEST_REVA: 'true'
EXPECTED_FAILURES_FILE: /mnt/acceptance/expected-failures-on-EOS-storage.md
REGULAR_USER_PASSWORD: relativity
SEND_SCENARIO_LINE_REFERENCES: 'true'
BEHAT_SUITE: apiVirtualViews
ACCEPTANCE_TEST_TYPE: api
volumes:
- shared-volume:/var/tmp
working_dir: /mnt/acceptance
depends_on:
gateway:
condition: service_healthy
frontend-global:
condition: service_healthy
storage-home:
condition: service_healthy
users:
condition: service_healthy
shares:
condition: service_healthy
storage-local-1:
condition: service_healthy
storage-local-2:
condition: service_healthy
ldap:
image: osixia/openldap:1.5.0
environment:
@ -198,58 +178,5 @@ services:
depends_on:
ldap:
condition: service_healthy
acceptance-2:
extends: acceptance
environment:
TEST_SERVER_URL: http://frontend:20080
TEST_WITH_LDAP: 'true'
REVA_LDAP_HOSTNAME: ldap
TEST_REVA: 'true'
SEND_SCENARIO_LINE_REFERENCES: 'true'
BEHAT_FILTER_TAGS: ~@provisioning_api-app-required&&~@skipOnOcis-OCIS-Storage&&~@personalSpace&&~@skipOnGraph&&~@carddav&&~@skipOnReva&&~@skipOnRevaMaster
DIVIDE_INTO_NUM_PARTS: ${PARTS:-1}
RUN_PART: ${PART:-1}
EXPECTED_FAILURES_FILE: /mnt/acceptance/expected-failures-on-EOS-storage.md
BEHAT_FEATURES_DIR: /mnt/ocis/tests/acceptance/features
BEHAT_YML: /mnt/acceptance/config/behat-core.yml
ACCEPTANCE_TEST_TYPE: core-api
BEHAT_FILTER_SUITE_FILE: /mnt/acceptance/filtered-suites-acceptance-2-EOS
volumes:
- shared-volume:/var/tmp
working_dir: /mnt/ocis
depends_on:
gateway:
condition: service_healthy
frontend:
condition: service_healthy
storage-home:
condition: service_healthy
shares:
condition: service_healthy
storage-users:
condition: service_healthy
storage-publiclink:
condition: service_healthy
ldap-users:
condition: service_healthy
# ceph:
# image: ceph/daemon
# environment:
# CEPH_DAEMON: demo
# NETWORK_AUTO_DETECT: 4
# MON_IP: 0.0.0.0
# CEPH_PUBLIC_NETWORK: 0.0.0.0/0
# RGW_CIVETWEB_PORT: 4000
# RGW_NAME: ceph
# CEPH_DEMO_UID: test-user
# CEPH_DEMO_ACCESS_KEY: test
# CEPH_DEMO_SECRET_KEY: test
# CEPH_DEMO_BUCKET: test
# healthcheck:
# test: ceph health
# interval: 5s
# timeout: 5s
# retries: 5
volumes:
shared-volume:

View File

@ -5,7 +5,7 @@
# needs to be passed, else the container will run out of fd.
# docker run myeoscontainer --ulimit nofiles:1024000:1024000
FROM gitlab-registry.cern.ch/dss/eos/eos-ci:5.1.25
FROM gitlab-registry.cern.ch/dss/eos/eos-ci:02352772.el9
COPY scripts/eos-run.sh /mnt/scripts/eos-run.sh
COPY sssd/sssd.conf /etc/sssd/sssd.conf

View File

@ -32,11 +32,21 @@ for letter in {a..z}; do
eos mkdir -p "/eos/user/$letter"
done
# create cbox sudoer user
adduser cbox -u 58679 -g 0 -m -s /bin/sh
eos vid set membership 0 +sudo
eos vid set membership 99 +sudo
eos vid set map -tident "*@storage-home" vuid:0 vgid:0
eos vid set map -tident "*@storage-users" vuid:0 vgid:0
eos vid set map -tident "*@storage-local-1" vuid:0 vgid:0
eos vid set map -tident "*@storage-local-2" vuid:0 vgid:0
eos vid set membership cbox +sudo
eos vid set map -tident "*@storage-home" vuid:58679 vgid:0
eos vid set map -tident "*@storage-users" vuid:58679 vgid:0
eos vid set map -tident "*@storage-local-1" vuid:58679 vgid:0
eos vid set map -tident "*@storage-local-2" vuid:58679 vgid:0
eos vid set map -tident "*@docker-storage-home-1.docker_default" vuid:58679 vgid:0
eos vid set map -tident "unix@storage-home" vuid:58679 vgid:0
eos vid set map -tident "unix@storage-users" vuid:58679 vgid:0
eos vid set map -tident "unix@storage-local-1" vuid:58679 vgid:0
eos vid set map -tident "unix@storage-local-2" vuid:58679 vgid:0
eos vid set map -tident "unix@docker-storage-home-1.docker_default" vuid:58679 vgid:0
tail -f /dev/null

0
tests/revad/revad.toml Normal file
View File