diff --git a/builder/builder-next/adapters/containerimage/pull.go b/builder/builder-next/adapters/containerimage/pull.go index b25312673d..d089325e6e 100644 --- a/builder/builder-next/adapters/containerimage/pull.go +++ b/builder/builder-next/adapters/containerimage/pull.go @@ -32,6 +32,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/source" + srctypes "github.com/moby/buildkit/source/types" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/leaseutil" @@ -72,7 +73,7 @@ func NewSource(opt SourceOpt) (*Source, error) { // ID returns image scheme identifier func (is *Source) ID() string { - return source.DockerImageScheme + return srctypes.DockerImageScheme } func (is *Source) resolveLocal(refStr string) (*image.Image, error) { @@ -300,51 +301,51 @@ func (p *puller) resolve(ctx context.Context, g session.Group) error { return err } -func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { +func (p *puller) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) { p.resolveLocal() if p.desc.Digest != "" && index == 0 { dgst, err := p.mainManifestKey(p.platform) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } - return dgst.String(), nil, false, nil + return dgst.String(), dgst.String(), nil, false, nil } if p.config != nil { k := cacheKeyFromConfig(p.config).String() if k == "" { - return digest.FromBytes(p.config).String(), nil, true, nil + return digest.FromBytes(p.config).String(), digest.FromBytes(p.config).String(), nil, true, nil } - return k, nil, true, nil + return k, k, nil, true, nil } if err := p.resolve(ctx, g); err != nil { - return "", nil, false, err + return "", "", nil, false, err } if p.desc.Digest != "" && index == 0 { dgst, err := p.mainManifestKey(p.platform) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } - return dgst.String(), nil, false, nil + return dgst.String(), dgst.String(), nil, false, nil } if len(p.config) == 0 && p.desc.MediaType != images.MediaTypeDockerSchema1Manifest { - return "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String()) + return "", "", nil, false, errors.Errorf("invalid empty config file resolved for %s", p.src.Reference.String()) } k := cacheKeyFromConfig(p.config).String() if k == "" || p.desc.MediaType == images.MediaTypeDockerSchema1Manifest { dgst, err := p.mainManifestKey(p.platform) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } - return dgst.String(), nil, true, nil + return dgst.String(), dgst.String(), nil, true, nil } - return k, nil, true, nil + return k, "", nil, true, nil } func (p *puller) getRef(ctx context.Context, diffIDs []layer.DiffID, opts ...cache.RefOption) (cache.ImmutableRef, error) { @@ -405,7 +406,7 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable pctx, stopProgress := context.WithCancel(ctx) - pw, _, ctx := progress.FromContext(ctx) + pw, _, ctx := progress.NewFromContext(ctx) defer pw.Close() progressDone := make(chan struct{}) @@ -586,8 +587,8 @@ func (p *puller) Snapshot(ctx context.Context, g session.Group) (cache.Immutable // TODO: handle windows layers for cross platform builds - if p.src.RecordType != "" && cache.GetRecordType(ref) == "" { - if err := cache.SetRecordType(ref, p.src.RecordType); err != nil { + if p.src.RecordType != "" && ref.GetRecordType() == "" { + if err := ref.SetRecordType(p.src.RecordType); err != nil { ref.Release(context.TODO()) return nil, err } @@ -806,7 +807,7 @@ type statusInfo struct { } func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, diff --git a/builder/builder-next/adapters/localinlinecache/inlinecache.go b/builder/builder-next/adapters/localinlinecache/inlinecache.go index 406ff83466..beba28273b 100644 --- a/builder/builder-next/adapters/localinlinecache/inlinecache.go +++ b/builder/builder-next/adapters/localinlinecache/inlinecache.go @@ -22,11 +22,6 @@ import ( "github.com/pkg/errors" ) -func init() { - // See https://github.com/moby/buildkit/pull/1993. - v1.EmptyLayerRemovalSupported = false -} - // ResolveCacheImporterFunc returns a resolver function for local inline cache func ResolveCacheImporterFunc(sm *session.Manager, resolverFunc docker.RegistryHosts, cs content.Store, rs reference.Store, is imagestore.Store) remotecache.ResolveCacheImporterFunc { @@ -75,7 +70,7 @@ func (li *localImporter) Resolve(ctx context.Context, _ specs.Descriptor, id str if err != nil { return nil, err } - return solver.NewCacheManager(id, keysStorage, resultStorage), nil + return solver.NewCacheManager(ctx, id, keysStorage, resultStorage), nil } func (li *localImporter) importInlineCache(ctx context.Context, dt []byte, cc solver.CacheExporterTarget) error { diff --git a/builder/builder-next/controller.go b/builder/builder-next/controller.go index fd086ca90e..f546c8f98f 100644 --- a/builder/builder-next/controller.go +++ b/builder/builder-next/controller.go @@ -8,7 +8,6 @@ import ( "github.com/containerd/containerd/content/local" ctdmetadata "github.com/containerd/containerd/metadata" - "github.com/containerd/containerd/platforms" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/api/types" "github.com/docker/docker/api/types/filters" @@ -38,7 +37,6 @@ import ( "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/worker" - specs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -169,11 +167,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { return nil, errors.Errorf("snapshotter doesn't support differ") } - p, err := parsePlatforms(archutil.SupportedPlatforms(true)) - if err != nil { - return nil, err - } - leases, err := lm.List(context.TODO(), "labels.\"buildkit/lease.temporary\"") if err != nil { return nil, err @@ -184,7 +177,6 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { wopt := mobyworker.Opt{ ID: "moby", - MetadataStore: md, ContentStore: store, CacheManager: cm, GCPolicy: gcPolicy, @@ -196,7 +188,7 @@ func newController(rt http.RoundTripper, opt Opt) (*control.Controller, error) { Exporter: exp, Transport: rt, Layers: layers, - Platforms: p, + Platforms: archutil.SupportedPlatforms(true), } wc := &worker.Controller{} @@ -268,18 +260,6 @@ func getGCPolicy(conf config.BuilderConfig, root string) ([]client.PruneInfo, er return gcPolicy, nil } -func parsePlatforms(platformsStr []string) ([]specs.Platform, error) { - out := make([]specs.Platform, 0, len(platformsStr)) - for _, s := range platformsStr { - p, err := platforms.Parse(s) - if err != nil { - return nil, err - } - out = append(out, platforms.Normalize(p)) - } - return out, nil -} - func getEntitlements(conf config.BuilderConfig) []string { var ents []string // Incase of no config settings, NetworkHost should be enabled & SecurityInsecure must be disabled. diff --git a/builder/builder-next/exporter/export.go b/builder/builder-next/exporter/export.go index 9e60b7f6ad..9c09c33e3f 100644 --- a/builder/builder-next/exporter/export.go +++ b/builder/builder-next/exporter/export.go @@ -14,7 +14,6 @@ import ( "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) const ( @@ -48,20 +47,12 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp for k, v := range opt { switch k { case keyImageName: - for _, v := range strings.Split(v, ",") { - ref, err := distref.ParseNormalizedNamed(v) - if err != nil { - return nil, err - } - i.targetNames = append(i.targetNames, ref) - } - case exptypes.ExporterImageConfigKey: + i.targetName = v + default: if i.meta == nil { i.meta = make(map[string][]byte) } i.meta[k] = []byte(v) - default: - logrus.Warnf("image exporter: unknown option %s", k) } } return i, nil @@ -69,14 +60,18 @@ func (e *imageExporter) Resolve(ctx context.Context, opt map[string]string) (exp type imageExporterInstance struct { *imageExporter - targetNames []distref.Named - meta map[string][]byte + targetName string + meta map[string][]byte } func (e *imageExporterInstance) Name() string { return "exporting to image" } +func (e *imageExporterInstance) Config() exporter.Config { + return exporter.Config{} +} + func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { if len(inp.Refs) > 1 { return nil, fmt.Errorf("exporting multiple references to image store is currently unsupported") @@ -115,7 +110,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, if ref != nil { layersDone := oneOffProgress(ctx, "exporting layers") - if err := ref.Finalize(ctx, true); err != nil { + if err := ref.Finalize(ctx); err != nil { return nil, layersDone(err) } @@ -162,10 +157,14 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, _ = configDone(nil) if e.opt.ReferenceStore != nil { - for _, targetName := range e.targetNames { - tagDone := oneOffProgress(ctx, "naming to "+targetName.String()) - - if err := e.opt.ReferenceStore.AddTag(targetName, digest.Digest(id), true); err != nil { + targetNames := strings.Split(e.targetName, ",") + for _, targetName := range targetNames { + tagDone := oneOffProgress(ctx, "naming to "+targetName) + tref, err := distref.ParseNormalizedNamed(targetName) + if err != nil { + return nil, err + } + if err := e.opt.ReferenceStore.AddTag(tref, digest.Digest(id), true); err != nil { return nil, tagDone(err) } _ = tagDone(nil) @@ -173,6 +172,7 @@ func (e *imageExporterInstance) Export(ctx context.Context, inp exporter.Source, } return map[string]string{ - "containerimage.digest": id.String(), + exptypes.ExporterImageConfigDigestKey: configDigest.String(), + exptypes.ExporterImageDigestKey: id.String(), }, nil } diff --git a/builder/builder-next/exporter/writer.go b/builder/builder-next/exporter/writer.go index d48fb4419d..c453d455f8 100644 --- a/builder/builder-next/exporter/writer.go +++ b/builder/builder-next/exporter/writer.go @@ -3,9 +3,9 @@ package containerimage import ( "context" "encoding/json" - "runtime" "time" + "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/system" @@ -20,13 +20,15 @@ import ( // ) func emptyImageConfig() ([]byte, error) { + pl := platforms.Normalize(platforms.DefaultSpec()) img := ocispec.Image{ - Architecture: runtime.GOARCH, - OS: runtime.GOOS, + Architecture: pl.Architecture, + OS: pl.OS, + Variant: pl.Variant, } img.RootFS.Type = "layers" img.Config.WorkingDir = "/" - img.Config.Env = []string{"PATH=" + system.DefaultPathEnvUnix} + img.Config.Env = []string{"PATH=" + system.DefaultPathEnv(pl.OS)} dt, err := json.Marshal(img) return dt, errors.Wrap(err, "failed to create empty image config") } @@ -119,7 +121,7 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, // some history items are missing. add them based on the ref metadata for _, md := range refMeta[historyLayers:] { history = append(history, ocispec.History{ - Created: &md.createdAt, + Created: md.createdAt, CreatedBy: md.description, Comment: "buildkit.exporter.image.v0", }) @@ -130,7 +132,7 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, for i, h := range history { if !h.EmptyLayer { if h.Created == nil { - h.Created = &refMeta[layerIndex].createdAt + h.Created = refMeta[layerIndex].createdAt } layerIndex++ } @@ -173,33 +175,39 @@ func normalizeLayersAndHistory(diffs []digest.Digest, history []ocispec.History, type refMetadata struct { description string - createdAt time.Time + createdAt *time.Time } func getRefMetadata(ref cache.ImmutableRef, limit int) []refMetadata { - if limit <= 0 { - return nil - } - meta := refMetadata{ - description: "created by buildkit", // shouldn't be shown but don't fail build - createdAt: time.Now(), - } if ref == nil { - return append(getRefMetadata(nil, limit-1), meta) + return make([]refMetadata, limit) } - if descr := cache.GetDescription(ref.Metadata()); descr != "" { - meta.description = descr + + layerChain := ref.LayerChain() + defer layerChain.Release(context.TODO()) + + if limit < len(layerChain) { + layerChain = layerChain[len(layerChain)-limit:] } - meta.createdAt = cache.GetCreatedAt(ref.Metadata()) - p := ref.Parent() - if p != nil { - defer p.Release(context.TODO()) + + metas := make([]refMetadata, len(layerChain)) + for i, layer := range layerChain { + meta := &metas[i] + + if description := layer.GetDescription(); description != "" { + meta.description = description + } else { + meta.description = "created by buildkit" // shouldn't be shown but don't fail build + } + + createdAt := layer.GetCreatedAt() + meta.createdAt = &createdAt } - return append(getRefMetadata(p, limit-1), meta) + return metas } func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, diff --git a/builder/builder-next/worker/worker.go b/builder/builder-next/worker/worker.go index a595da7ba7..e65d0e51ba 100644 --- a/builder/builder-next/worker/worker.go +++ b/builder/builder-next/worker/worker.go @@ -5,7 +5,6 @@ import ( "fmt" "io" nethttp "net/http" - "strings" "time" "github.com/containerd/containerd/content" @@ -19,7 +18,6 @@ import ( "github.com/docker/docker/layer" pkgprogress "github.com/docker/docker/pkg/progress" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" @@ -45,7 +43,6 @@ import ( ocispec "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" "golang.org/x/sync/semaphore" ) @@ -62,7 +59,6 @@ type Opt struct { ID string Labels map[string]string GCPolicy []client.PruneInfo - MetadataStore *metadata.Store Executor executor.Executor Snapshotter snapshot.Snapshotter ContentStore content.Store @@ -95,7 +91,6 @@ func NewWorker(opt Opt) (*Worker, error) { gs, err := git.NewSource(git.Opt{ CacheAccessor: cm, - MetadataStore: opt.MetadataStore, }) if err == nil { sm.Register(gs) @@ -105,7 +100,6 @@ func NewWorker(opt Opt) (*Worker, error) { hs, err := http.NewSource(http.Opt{ CacheAccessor: cm, - MetadataStore: opt.MetadataStore, Transport: opt.Transport, }) if err == nil { @@ -116,7 +110,6 @@ func NewWorker(opt Opt) (*Worker, error) { ss, err := local.NewSource(local.Opt{ CacheAccessor: cm, - MetadataStore: opt.MetadataStore, }) if err == nil { sm.Register(ss) @@ -148,9 +141,8 @@ func (w *Worker) Platforms(noCache bool) []ocispec.Platform { pm[platforms.Format(p)] = struct{}{} } for _, p := range archutil.SupportedPlatforms(noCache) { - if _, ok := pm[p]; !ok { - pp, _ := platforms.Parse(p) - w.Opt.Platforms = append(w.Opt.Platforms, pp) + if _, ok := pm[platforms.Format(p)]; !ok { + w.Opt.Platforms = append(w.Opt.Platforms, p) } } } @@ -170,18 +162,13 @@ func (w *Worker) ContentStore() content.Store { return w.Opt.ContentStore } -// MetadataStore returns the metadata store -func (w *Worker) MetadataStore() *metadata.Store { - return w.Opt.MetadataStore -} - // LoadRef loads a reference by ID func (w *Worker) LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) { var opts []cache.RefOption if hidden { opts = append(opts, cache.NoUpdateLastUsed) } - return w.CacheManager().Get(ctx, id, opts...) + return w.CacheManager().Get(ctx, id, nil, opts...) } // ResolveOp converts a LLB vertex into a LLB operation @@ -193,9 +180,9 @@ func (w *Worker) ResolveOp(v solver.Vertex, s frontend.FrontendLLBBridge, sm *se case *pb.Op_Source: return ops.NewSourceOp(v, op, baseOp.Platform, w.SourceManager, parallelism, sm, w) case *pb.Op_Exec: - return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Opt.MetadataStore, w.Executor(), w) + return ops.NewExecOp(v, op, baseOp.Platform, w.CacheManager(), parallelism, sm, w.Executor(), w) case *pb.Op_File: - return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w.Opt.MetadataStore, w) + return ops.NewFileOp(v, op, w.CacheManager(), parallelism, w) case *pb.Op_Build: return ops.NewBuildOp(v, op, s, w) } @@ -246,7 +233,7 @@ func (w *Worker) GetRemote(ctx context.Context, ref cache.ImmutableRef, createIf return nil, err } } else { - if err := ref.Finalize(ctx, true); err != nil { + if err := ref.Finalize(ctx); err != nil { return nil, err } diffIDs, err = w.Layers.EnsureLayer(ctx, ref.ID()) @@ -277,32 +264,20 @@ func (w *Worker) PruneCacheMounts(ctx context.Context, ids []string) error { defer mu.Unlock() for _, id := range ids { - id = "cache-dir:" + id - sis, err := w.Opt.MetadataStore.Search(id) + mds, err := mounts.SearchCacheDir(ctx, w.CacheManager(), id) if err != nil { return err } - for _, si := range sis { - for _, k := range si.Indexes() { - if k == id || strings.HasPrefix(k, id+":") { - if siCached := w.CacheManager().Metadata(si.ID()); siCached != nil { - si = siCached - } - if err := cache.CachePolicyDefault(si); err != nil { - return err - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, k, nil) - }) - if err := si.Commit(); err != nil { - return err - } - // if ref is unused try to clean it up right away by releasing it - if mref, err := w.CacheManager().GetMutable(ctx, si.ID()); err == nil { - go mref.Release(context.TODO()) - } - break - } + for _, md := range mds { + if err := md.SetCachePolicyDefault(); err != nil { + return err + } + if err := md.ClearCacheDirIndex(); err != nil { + return err + } + // if ref is unused try to clean it up right away by releasing it + if mref, err := w.CacheManager().GetMutable(ctx, md.ID()); err == nil { + go mref.Release(context.TODO()) } } } @@ -432,7 +407,7 @@ func (ld *layerDescriptor) Download(ctx context.Context, progressOutput pkgprogr // TODO should this write output to progressOutput? Or use something similar to loggerFromContext()? see https://github.com/moby/buildkit/commit/aa29e7729464f3c2a773e27795e584023c751cb8 discardLogs := func(_ []byte) {} - if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, discardLogs); err != nil { + if err := contentutil.Copy(ctx, ld.w.ContentStore(), ld.provider, ld.desc, "", discardLogs); err != nil { return nil, 0, done(err) } _ = done(nil) @@ -479,7 +454,7 @@ func getLayers(ctx context.Context, descs []ocispec.Descriptor) ([]rootfs.Layer, } func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, diff --git a/builder/dockerfile/builder.go b/builder/dockerfile/builder.go index f42bb1bc63..ecfe8eaa1d 100644 --- a/builder/dockerfile/builder.go +++ b/builder/dockerfile/builder.go @@ -185,7 +185,7 @@ func (b *Builder) build(source builder.Source, dockerfile *parser.Result) (*buil stages, metaArgs, err := instructions.Parse(dockerfile.AST) if err != nil { - var uiErr *instructions.UnknownInstruction + var uiErr *instructions.UnknownInstructionError if errors.As(err, &uiErr) { buildsFailed.WithValues(metricsUnknownInstructionError).Inc() } @@ -336,7 +336,7 @@ func BuildFromConfig(config *container.Config, changes []string, os string) (*co // ensure that the commands are valid for _, n := range dockerfile.AST.Children { - if !validCommitCommands[n.Value] { + if !validCommitCommands[strings.ToLower(n.Value)] { return nil, errdefs.InvalidParameter(errors.Errorf("%s is not a valid change command", n.Value)) } } diff --git a/builder/dockerfile/dispatchers.go b/builder/dockerfile/dispatchers.go index 1fafdc488c..65a3a8540c 100644 --- a/builder/dockerfile/dispatchers.go +++ b/builder/dockerfile/dispatchers.go @@ -208,7 +208,7 @@ func dispatchTriggeredOnBuild(d dispatchRequest, triggers []string) error { } cmd, err := instructions.ParseCommand(ast.AST.Children[0]) if err != nil { - var uiErr *instructions.UnknownInstruction + var uiErr *instructions.UnknownInstructionError if errors.As(err, &uiErr) { buildsFailed.WithValues(metricsUnknownInstructionError).Inc() } diff --git a/daemon/daemon.go b/daemon/daemon.go index 5642785319..0c22deffda 100644 --- a/daemon/daemon.go +++ b/daemon/daemon.go @@ -57,6 +57,7 @@ import ( "github.com/docker/docker/runconfig" volumesservice "github.com/docker/docker/volume/service" "github.com/moby/buildkit/util/resolver" + resolverconfig "github.com/moby/buildkit/util/resolver/config" "github.com/moby/locker" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -155,7 +156,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { var ( registryKey = "docker.io" mirrors = make([]string, len(daemon.configStore.Mirrors)) - m = map[string]resolver.RegistryConfig{} + m = map[string]resolverconfig.RegistryConfig{} ) // must trim "https://" or "http://" prefix for i, v := range daemon.configStore.Mirrors { @@ -165,11 +166,11 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { mirrors[i] = v } // set mirrors for default registry - m[registryKey] = resolver.RegistryConfig{Mirrors: mirrors} + m[registryKey] = resolverconfig.RegistryConfig{Mirrors: mirrors} for _, v := range daemon.configStore.InsecureRegistries { u, err := url.Parse(v) - c := resolver.RegistryConfig{} + c := resolverconfig.RegistryConfig{} if err == nil { v = u.Host t := true @@ -191,7 +192,7 @@ func (daemon *Daemon) RegistryHosts() docker.RegistryHosts { if fis, err := os.ReadDir(certsDir); err == nil { for _, fi := range fis { if _, ok := m[fi.Name()]; !ok { - m[fi.Name()] = resolver.RegistryConfig{ + m[fi.Name()] = resolverconfig.RegistryConfig{ TLSConfigDir: []string{filepath.Join(certsDir, fi.Name())}, } } diff --git a/integration-cli/docker_cli_build_test.go b/integration-cli/docker_cli_build_test.go index 7bcb488af0..db3e0fd1e7 100644 --- a/integration-cli/docker_cli_build_test.go +++ b/integration-cli/docker_cli_build_test.go @@ -6073,7 +6073,7 @@ func (s *DockerSuite) TestBuildLineErrorOnBuild(c *testing.T) { ONBUILD `)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "parse error line 2: ONBUILD requires at least one argument", + Err: "parse error on line 2: ONBUILD requires at least one argument", }) } @@ -6087,7 +6087,7 @@ func (s *DockerSuite) TestBuildLineErrorUnknownInstruction(c *testing.T) { ERROR `)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "parse error line 3: unknown instruction: NOINSTRUCTION", + Err: "parse error on line 3: unknown instruction: NOINSTRUCTION", }) } @@ -6104,7 +6104,7 @@ func (s *DockerSuite) TestBuildLineErrorWithEmptyLines(c *testing.T) { CMD ["/bin/init"] `)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "parse error line 6: unknown instruction: NOINSTRUCTION", + Err: "parse error on line 6: unknown instruction: NOINSTRUCTION", }) } @@ -6118,7 +6118,7 @@ func (s *DockerSuite) TestBuildLineErrorWithComments(c *testing.T) { NOINSTRUCTION echo ba `)).Assert(c, icmd.Expected{ ExitCode: 1, - Err: "parse error line 5: unknown instruction: NOINSTRUCTION", + Err: "parse error on line 5: unknown instruction: NOINSTRUCTION", }) } diff --git a/vendor.mod b/vendor.mod index 9c4af544fb..bc3e33f76d 100644 --- a/vendor.mod +++ b/vendor.mod @@ -42,16 +42,16 @@ require ( github.com/google/go-cmp v0.5.7 github.com/google/uuid v1.3.0 github.com/gorilla/mux v1.8.0 - github.com/hashicorp/go-immutable-radix v1.0.0 + github.com/hashicorp/go-immutable-radix v1.3.1 github.com/hashicorp/go-memdb v0.0.0-20161216180745-cb9a474f84cc github.com/hashicorp/memberlist v0.2.4 github.com/hashicorp/serf v0.8.2 github.com/imdario/mergo v0.3.12 github.com/ishidawataru/sctp v0.0.0-20210226210310-f2269e66cdee - github.com/klauspost/compress v1.14.3 + github.com/klauspost/compress v1.15.0 github.com/miekg/dns v1.1.27 github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible - github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a + github.com/moby/buildkit v0.10.0 github.com/moby/ipvs v1.0.1 github.com/moby/locker v1.0.1 github.com/moby/sys/mount v0.3.1 @@ -61,13 +61,13 @@ require ( github.com/moby/term v0.0.0-20210619224110-3f7ff695adc6 github.com/morikuni/aec v1.0.0 github.com/opencontainers/go-digest v1.0.0 - github.com/opencontainers/image-spec v1.0.2 + github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 github.com/opencontainers/runc v1.1.0 github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417 github.com/opencontainers/selinux v1.10.0 github.com/pelletier/go-toml v1.9.4 github.com/pkg/errors v0.9.1 - github.com/prometheus/client_golang v1.11.0 + github.com/prometheus/client_golang v1.12.1 github.com/rootless-containers/rootlesskit v0.14.6 github.com/sirupsen/logrus v1.8.1 github.com/spf13/cobra v1.1.3 @@ -83,14 +83,16 @@ require ( golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 golang.org/x/time v0.0.0-20211116232009-f0f3c7e86c11 google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa - google.golang.org/grpc v1.43.0 + google.golang.org/grpc v1.44.0 gotest.tools/v3 v3.1.0 ) require ( code.cloudfoundry.org/clock v1.0.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/agext/levenshtein v1.2.3 // indirect github.com/akutz/memconn v0.1.0 // indirect + github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 // indirect github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.1.2 // indirect @@ -98,12 +100,17 @@ require ( github.com/container-storage-interface/spec v1.5.0 // indirect github.com/containerd/console v1.0.3 // indirect github.com/containerd/go-runc v1.0.0 // indirect + github.com/containerd/stargz-snapshotter v0.11.2 // indirect + github.com/containerd/stargz-snapshotter/estargz v0.11.2 // indirect github.com/containerd/ttrpc v1.1.0 // indirect github.com/cyphar/filepath-securejoin v0.2.3 // indirect github.com/dustin/go-humanize v1.0.0 // indirect + github.com/felixge/httpsnoop v1.0.2 // indirect github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e // indirect + github.com/go-logr/logr v1.2.2 // indirect + github.com/go-logr/stdr v1.2.2 // indirect github.com/gofrs/flock v0.8.1 // indirect - github.com/gogo/googleapis v1.4.0 // indirect + github.com/gogo/googleapis v1.4.1 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.2 // indirect github.com/google/btree v1.0.1 // indirect @@ -112,7 +119,7 @@ require ( github.com/googleapis/gax-go/v2 v2.0.5 // indirect github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 // indirect - github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 // indirect + github.com/grpc-ecosystem/grpc-gateway v1.16.0 // indirect github.com/hashicorp/errwrap v1.1.0 // indirect github.com/hashicorp/go-msgpack v0.5.3 // indirect github.com/hashicorp/go-multierror v1.1.1 // indirect @@ -121,17 +128,16 @@ require ( github.com/inconshreveable/mousetrap v1.0.0 // indirect github.com/jmespath/go-jmespath v0.3.0 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369 // indirect - github.com/mitchellh/hashstructure v1.0.0 // indirect - github.com/opentracing-contrib/go-stdlib v1.0.0 // indirect - github.com/opentracing/opentracing-go v1.2.0 // indirect + github.com/mitchellh/hashstructure/v2 v2.0.2 // indirect github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee // indirect github.com/philhofer/fwd v1.0.0 // indirect github.com/prometheus/client_model v0.2.0 // indirect - github.com/prometheus/common v0.30.0 // indirect + github.com/prometheus/common v0.32.1 // indirect github.com/prometheus/procfs v0.7.3 // indirect github.com/rexray/gocsi v1.2.2 // indirect github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529 // indirect github.com/tinylib/msgp v1.1.0 // indirect + github.com/tonistiigi/go-archvariant v1.0.0 // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/vmihailenco/msgpack v4.0.4+incompatible // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.2 // indirect @@ -139,6 +145,16 @@ require ( go.etcd.io/etcd/raft/v3 v3.5.2 // indirect go.etcd.io/etcd/server/v3 v3.5.2 // indirect go.opencensus.io v0.23.0 // indirect + go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 // indirect + go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect + go.opentelemetry.io/otel v1.4.1 // indirect + go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 // indirect + go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect + go.opentelemetry.io/otel/metric v0.27.0 // indirect + go.opentelemetry.io/otel/sdk v1.4.1 // indirect + go.opentelemetry.io/otel/trace v1.4.1 // indirect + go.opentelemetry.io/proto/otlp v0.12.0 // indirect go.uber.org/atomic v1.7.0 // indirect go.uber.org/multierr v1.6.0 // indirect go.uber.org/zap v1.17.0 // indirect @@ -157,8 +173,6 @@ replace ( github.com/armon/go-radix => github.com/armon/go-radix v0.0.0-20150105235045-e39d623f12e8 github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea - github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2 - github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe github.com/hashicorp/go-msgpack => github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67 github.com/hashicorp/go-multierror => github.com/hashicorp/go-multierror v1.0.0 github.com/hashicorp/serf => github.com/hashicorp/serf v0.7.1-0.20160317193612-598c54895cc5 diff --git a/vendor/github.com/agext/levenshtein/.gitignore b/vendor/github.com/agext/levenshtein/.gitignore new file mode 100644 index 0000000000..4473da19b2 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.gitignore @@ -0,0 +1,53 @@ +# Ignore docs files +_gh_pages +_site + +# Ignore temporary files +README.html +coverage.out +.tmp + +# Numerous always-ignore extensions +*.diff +*.err +*.log +*.orig +*.rej +*.swo +*.swp +*.vi +*.zip +*~ + +# OS or Editor folders +._* +.cache +.DS_Store +.idea +.project +.settings +.tmproj +*.esproj +*.sublime-project +*.sublime-workspace +nbproject +Thumbs.db + +# Komodo +.komodotools +*.komodoproject + +# SCSS-Lint +scss-lint-report.xml + +# grunt-contrib-sass cache +.sass-cache + +# Jekyll metadata +docs/.jekyll-metadata + +# Folders to ignore +.build +.test +bower_components +node_modules diff --git a/vendor/github.com/agext/levenshtein/.travis.yml b/vendor/github.com/agext/levenshtein/.travis.yml new file mode 100644 index 0000000000..68d38816fc --- /dev/null +++ b/vendor/github.com/agext/levenshtein/.travis.yml @@ -0,0 +1,30 @@ +language: go +sudo: false +matrix: + fast_finish: true + include: + - go: 1.14.x + env: TEST_METHOD=goveralls + - go: 1.13.x + - go: 1.12.x + - go: 1.11.x + - go: 1.10.x + - go: tip + - go: 1.9.x + - go: 1.8.x + - go: 1.7.x + - go: 1.6.x + - go: 1.5.x + allow_failures: + - go: tip + - go: 1.11.x + - go: 1.10.x + - go: 1.9.x + - go: 1.8.x + - go: 1.7.x + - go: 1.6.x + - go: 1.5.x +script: ./test.sh $TEST_METHOD +notifications: + email: + on_success: never diff --git a/vendor/github.com/agext/levenshtein/DCO b/vendor/github.com/agext/levenshtein/DCO new file mode 100644 index 0000000000..716561d5d2 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/DCO @@ -0,0 +1,36 @@ +Developer Certificate of Origin +Version 1.1 + +Copyright (C) 2004, 2006 The Linux Foundation and its contributors. +660 York Street, Suite 102, +San Francisco, CA 94110 USA + +Everyone is permitted to copy and distribute verbatim copies of this +license document, but changing it is not allowed. + + +Developer's Certificate of Origin 1.1 + +By making a contribution to this project, I certify that: + +(a) The contribution was created in whole or in part by me and I + have the right to submit it under the open source license + indicated in the file; or + +(b) The contribution is based upon previous work that, to the best + of my knowledge, is covered under an appropriate open source + license and I have the right under that license to submit that + work with modifications, whether created in whole or in part + by me, under the same open source license (unless I am + permitted to submit under a different license), as indicated + in the file; or + +(c) The contribution was provided directly to me by some other + person who certified (a), (b) or (c) and I have not modified + it. + +(d) I understand and agree that this project and the contribution + are public and that a record of the contribution (including all + personal information I submit with it, including my sign-off) is + maintained indefinitely and may be redistributed consistent with + this project or the open source license(s) involved. diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/LICENSE b/vendor/github.com/agext/levenshtein/LICENSE similarity index 100% rename from vendor/github.com/opentracing-contrib/go-stdlib/LICENSE rename to vendor/github.com/agext/levenshtein/LICENSE diff --git a/vendor/github.com/agext/levenshtein/MAINTAINERS b/vendor/github.com/agext/levenshtein/MAINTAINERS new file mode 100644 index 0000000000..726c2afb32 --- /dev/null +++ b/vendor/github.com/agext/levenshtein/MAINTAINERS @@ -0,0 +1 @@ +Alex Bucataru (@AlexBucataru) diff --git a/vendor/github.com/agext/levenshtein/NOTICE b/vendor/github.com/agext/levenshtein/NOTICE new file mode 100644 index 0000000000..eaffaab94c --- /dev/null +++ b/vendor/github.com/agext/levenshtein/NOTICE @@ -0,0 +1,5 @@ +Alrux Go EXTensions (AGExt) - package levenshtein +Copyright 2016 ALRUX Inc. + +This product includes software developed at ALRUX Inc. +(http://www.alrux.com/). diff --git a/vendor/github.com/agext/levenshtein/README.md b/vendor/github.com/agext/levenshtein/README.md new file mode 100644 index 0000000000..d9a8ce16da --- /dev/null +++ b/vendor/github.com/agext/levenshtein/README.md @@ -0,0 +1,38 @@ +# A Go package for calculating the Levenshtein distance between two strings + +[![Release](https://img.shields.io/github/release/agext/levenshtein.svg?style=flat)](https://github.com/agext/levenshtein/releases/latest) +[![GoDoc](https://img.shields.io/badge/godoc-reference-blue.svg?style=flat)](https://godoc.org/github.com/agext/levenshtein)  +[![Build Status](https://travis-ci.org/agext/levenshtein.svg?branch=master&style=flat)](https://travis-ci.org/agext/levenshtein) +[![Coverage Status](https://coveralls.io/repos/github/agext/levenshtein/badge.svg?style=flat)](https://coveralls.io/github/agext/levenshtein) +[![Go Report Card](https://goreportcard.com/badge/github.com/agext/levenshtein?style=flat)](https://goreportcard.com/report/github.com/agext/levenshtein) + + +This package implements distance and similarity metrics for strings, based on the Levenshtein measure, in [Go](http://golang.org). + +## Project Status + +v1.2.3 Stable: Guaranteed no breaking changes to the API in future v1.x releases. Probably safe to use in production, though provided on "AS IS" basis. + +This package is being actively maintained. If you encounter any problems or have any suggestions for improvement, please [open an issue](https://github.com/agext/levenshtein/issues). Pull requests are welcome. + +## Overview + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. + +## Installation + +``` +go get github.com/agext/levenshtein +``` + +## License + +Package levenshtein is released under the Apache 2.0 license. See the [LICENSE](LICENSE) file for details. diff --git a/vendor/github.com/agext/levenshtein/levenshtein.go b/vendor/github.com/agext/levenshtein/levenshtein.go new file mode 100644 index 0000000000..56d719b83d --- /dev/null +++ b/vendor/github.com/agext/levenshtein/levenshtein.go @@ -0,0 +1,290 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package levenshtein implements distance and similarity metrics for strings, based on the Levenshtein measure. + +The Levenshtein `Distance` between two strings is the minimum total cost of edits that would convert the first string into the second. The allowed edit operations are insertions, deletions, and substitutions, all at character (one UTF-8 code point) level. Each operation has a default cost of 1, but each can be assigned its own cost equal to or greater than 0. + +A `Distance` of 0 means the two strings are identical, and the higher the value the more different the strings. Since in practice we are interested in finding if the two strings are "close enough", it often does not make sense to continue the calculation once the result is mathematically guaranteed to exceed a desired threshold. Providing this value to the `Distance` function allows it to take a shortcut and return a lower bound instead of an exact cost when the threshold is exceeded. + +The `Similarity` function calculates the distance, then converts it into a normalized metric within the range 0..1, with 1 meaning the strings are identical, and 0 that they have nothing in common. A minimum similarity threshold can be provided to speed up the calculation of the metric for strings that are far too dissimilar for the purpose at hand. All values under this threshold are rounded down to 0. + +The `Match` function provides a similarity metric, with the same range and meaning as `Similarity`, but with a bonus for string pairs that share a common prefix and have a similarity above a "bonus threshold". It uses the same method as proposed by Winkler for the Jaro distance, and the reasoning behind it is that these string pairs are very likely spelling variations or errors, and they are more closely linked than the edit distance alone would suggest. + +The underlying `Calculate` function is also exported, to allow the building of other derivative metrics, if needed. +*/ +package levenshtein + +// Calculate determines the Levenshtein distance between two strings, using +// the given costs for each edit operation. It returns the distance along with +// the lengths of the longest common prefix and suffix. +// +// If maxCost is non-zero, the calculation stops as soon as the distance is determined +// to be greater than maxCost. Therefore, any return value higher than maxCost is a +// lower bound for the actual distance. +func Calculate(str1, str2 []rune, maxCost, insCost, subCost, delCost int) (dist, prefixLen, suffixLen int) { + l1, l2 := len(str1), len(str2) + // trim common prefix, if any, as it doesn't affect the distance + for ; prefixLen < l1 && prefixLen < l2; prefixLen++ { + if str1[prefixLen] != str2[prefixLen] { + break + } + } + str1, str2 = str1[prefixLen:], str2[prefixLen:] + l1 -= prefixLen + l2 -= prefixLen + // trim common suffix, if any, as it doesn't affect the distance + for 0 < l1 && 0 < l2 { + if str1[l1-1] != str2[l2-1] { + str1, str2 = str1[:l1], str2[:l2] + break + } + l1-- + l2-- + suffixLen++ + } + // if the first string is empty, the distance is the length of the second string times the cost of insertion + if l1 == 0 { + dist = l2 * insCost + return + } + // if the second string is empty, the distance is the length of the first string times the cost of deletion + if l2 == 0 { + dist = l1 * delCost + return + } + + // variables used in inner "for" loops + var y, dy, c, l int + + // if maxCost is greater than or equal to the maximum possible distance, it's equivalent to 'unlimited' + if maxCost > 0 { + if subCost < delCost+insCost { + if maxCost >= l1*subCost+(l2-l1)*insCost { + maxCost = 0 + } + } else { + if maxCost >= l1*delCost+l2*insCost { + maxCost = 0 + } + } + } + + if maxCost > 0 { + // prefer the longer string first, to minimize time; + // a swap also transposes the meanings of insertion and deletion. + if l1 < l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + + // the length differential times cost of deletion is a lower bound for the cost; + // if it is higher than the maxCost, there is no point going into the main calculation. + if dist = (l1 - l2) * delCost; dist > maxCost { + return + } + + d := make([]int, l1+1) + + // offset and length of d in the current row + doff, dlen := 0, 1 + for y, dy = 1, delCost; y <= l1 && dy <= maxCost; dlen++ { + d[y] = dy + y++ + dy = y * delCost + } + // fmt.Printf("%q -> %q: init doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + + for x := 0; x < l2; x++ { + dy, d[doff] = d[doff], d[doff]+insCost + for doff < l1 && d[doff] > maxCost && dlen > 0 { + if str1[doff] != str2[x] { + dy += subCost + } + doff++ + dlen-- + if c = d[doff] + insCost; c < dy { + dy = c + } + dy, d[doff] = d[doff], dy + } + for y, l = doff, doff+dlen-1; y < l; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + if y < l1 { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + for ; dy <= maxCost && y < l1; dy, d[y] = dy+delCost, dy { + y++ + dlen++ + } + } + // fmt.Printf("%q -> %q: x=%d doff=%d dlen=%d d[%d:%d]=%v\n", str1, str2, x, doff, dlen, doff, doff+dlen, d[doff:doff+dlen]) + if dlen == 0 { + dist = maxCost + 1 + return + } + } + if doff+dlen-1 < l1 { + dist = maxCost + 1 + return + } + dist = d[l1] + } else { + // ToDo: This is O(l1*l2) time and O(min(l1,l2)) space; investigate if it is + // worth to implement diagonal approach - O(l1*(1+dist)) time, up to O(l1*l2) space + // http://www.csse.monash.edu.au/~lloyd/tildeStrings/Alignment/92.IPL.html + + // prefer the shorter string first, to minimize space; time is O(l1*l2) anyway; + // a swap also transposes the meanings of insertion and deletion. + if l1 > l2 { + str1, str2, l1, l2, insCost, delCost = str2, str1, l2, l1, delCost, insCost + } + d := make([]int, l1+1) + + for y = 1; y <= l1; y++ { + d[y] = y * delCost + } + for x := 0; x < l2; x++ { + dy, d[0] = d[0], d[0]+insCost + for y = 0; y < l1; dy, d[y] = d[y], dy { + if str1[y] != str2[x] { + dy += subCost + } + if c = d[y] + delCost; c < dy { + dy = c + } + y++ + if c = d[y] + insCost; c < dy { + dy = c + } + } + } + dist = d[l1] + } + + return +} + +// Distance returns the Levenshtein distance between str1 and str2, using the +// default or provided cost values. Pass nil for the third argument to use the +// default cost of 1 for all three operations, with no maximum. +func Distance(str1, str2 string, p *Params) int { + if p == nil { + p = defaultParams + } + dist, _, _ := Calculate([]rune(str1), []rune(str2), p.maxCost, p.insCost, p.subCost, p.delCost) + return dist +} + +// Similarity returns a score in the range of 0..1 for how similar the two strings are. +// A score of 1 means the strings are identical, and 0 means they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Similarity(str1, str2 string, p *Params) float64 { + return Match(str1, str2, p.Clone().BonusThreshold(1.1)) // guaranteed no bonus +} + +// Match returns a similarity score adjusted by the same method as proposed by Winkler for +// the Jaro distance - giving a bonus to string pairs that share a common prefix, only if their +// similarity score is already over a threshold. +// +// The score is in the range of 0..1, with 1 meaning the strings are identical, +// and 0 meaning they have nothing in common. +// +// A nil third argument uses the default cost of 1 for all three operations, maximum length of +// common prefix to consider for bonus of 4, scaling factor of 0.1, and bonus threshold of 0.7. +// +// If a non-zero MinScore value is provided in the parameters, scores lower than it +// will be returned as 0. +func Match(str1, str2 string, p *Params) float64 { + s1, s2 := []rune(str1), []rune(str2) + l1, l2 := len(s1), len(s2) + // two empty strings are identical; shortcut also avoids divByZero issues later on. + if l1 == 0 && l2 == 0 { + return 1 + } + + if p == nil { + p = defaultParams + } + + // a min over 1 can never be satisfied, so the score is 0. + if p.minScore > 1 { + return 0 + } + + insCost, delCost, maxDist, max := p.insCost, p.delCost, 0, 0 + if l1 > l2 { + l1, l2, insCost, delCost = l2, l1, delCost, insCost + } + + if p.subCost < delCost+insCost { + maxDist = l1*p.subCost + (l2-l1)*insCost + } else { + maxDist = l1*delCost + l2*insCost + } + + // a zero min is always satisfied, so no need to set a max cost. + if p.minScore > 0 { + // if p.minScore is lower than p.bonusThreshold, we can use a simplified formula + // for the max cost, because a sim score below min cannot receive a bonus. + if p.minScore < p.bonusThreshold { + // round down the max - a cost equal to a rounded up max would already be under min. + max = int((1 - p.minScore) * float64(maxDist)) + } else { + // p.minScore <= sim + p.bonusPrefix*p.bonusScale*(1-sim) + // p.minScore <= (1-dist/maxDist) + p.bonusPrefix*p.bonusScale*(1-(1-dist/maxDist)) + // p.minScore <= 1 - dist/maxDist + p.bonusPrefix*p.bonusScale*dist/maxDist + // 1 - p.minScore >= dist/maxDist - p.bonusPrefix*p.bonusScale*dist/maxDist + // (1-p.minScore)*maxDist/(1-p.bonusPrefix*p.bonusScale) >= dist + max = int((1 - p.minScore) * float64(maxDist) / (1 - float64(p.bonusPrefix)*p.bonusScale)) + } + } + + dist, pl, _ := Calculate(s1, s2, max, p.insCost, p.subCost, p.delCost) + if max > 0 && dist > max { + return 0 + } + sim := 1 - float64(dist)/float64(maxDist) + + if sim >= p.bonusThreshold && sim < 1 && p.bonusPrefix > 0 && p.bonusScale > 0 { + if pl > p.bonusPrefix { + pl = p.bonusPrefix + } + sim += float64(pl) * p.bonusScale * (1 - sim) + } + + if sim < p.minScore { + return 0 + } + + return sim +} diff --git a/vendor/github.com/agext/levenshtein/params.go b/vendor/github.com/agext/levenshtein/params.go new file mode 100644 index 0000000000..a85727b3ef --- /dev/null +++ b/vendor/github.com/agext/levenshtein/params.go @@ -0,0 +1,152 @@ +// Copyright 2016 ALRUX Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package levenshtein + +// Params represents a set of parameter values for the various formulas involved +// in the calculation of the Levenshtein string metrics. +type Params struct { + insCost int + subCost int + delCost int + maxCost int + minScore float64 + bonusPrefix int + bonusScale float64 + bonusThreshold float64 +} + +var ( + defaultParams = NewParams() +) + +// NewParams creates a new set of parameters and initializes it with the default values. +func NewParams() *Params { + return &Params{ + insCost: 1, + subCost: 1, + delCost: 1, + maxCost: 0, + minScore: 0, + bonusPrefix: 4, + bonusScale: .1, + bonusThreshold: .7, + } +} + +// Clone returns a pointer to a copy of the receiver parameter set, or of a new +// default parameter set if the receiver is nil. +func (p *Params) Clone() *Params { + if p == nil { + return NewParams() + } + return &Params{ + insCost: p.insCost, + subCost: p.subCost, + delCost: p.delCost, + maxCost: p.maxCost, + minScore: p.minScore, + bonusPrefix: p.bonusPrefix, + bonusScale: p.bonusScale, + bonusThreshold: p.bonusThreshold, + } +} + +// InsCost overrides the default value of 1 for the cost of insertion. +// The new value must be zero or positive. +func (p *Params) InsCost(v int) *Params { + if v >= 0 { + p.insCost = v + } + return p +} + +// SubCost overrides the default value of 1 for the cost of substitution. +// The new value must be zero or positive. +func (p *Params) SubCost(v int) *Params { + if v >= 0 { + p.subCost = v + } + return p +} + +// DelCost overrides the default value of 1 for the cost of deletion. +// The new value must be zero or positive. +func (p *Params) DelCost(v int) *Params { + if v >= 0 { + p.delCost = v + } + return p +} + +// MaxCost overrides the default value of 0 (meaning unlimited) for the maximum cost. +// The calculation of Distance() stops when the result is guaranteed to exceed +// this maximum, returning a lower-bound rather than exact value. +// The new value must be zero or positive. +func (p *Params) MaxCost(v int) *Params { + if v >= 0 { + p.maxCost = v + } + return p +} + +// MinScore overrides the default value of 0 for the minimum similarity score. +// Scores below this threshold are returned as 0 by Similarity() and Match(). +// The new value must be zero or positive. Note that a minimum greater than 1 +// can never be satisfied, resulting in a score of 0 for any pair of strings. +func (p *Params) MinScore(v float64) *Params { + if v >= 0 { + p.minScore = v + } + return p +} + +// BonusPrefix overrides the default value for the maximum length of +// common prefix to be considered for bonus by Match(). +// The new value must be zero or positive. +func (p *Params) BonusPrefix(v int) *Params { + if v >= 0 { + p.bonusPrefix = v + } + return p +} + +// BonusScale overrides the default value for the scaling factor used by Match() +// in calculating the bonus. +// The new value must be zero or positive. To guarantee that the similarity score +// remains in the interval 0..1, this scaling factor is not allowed to exceed +// 1 / BonusPrefix. +func (p *Params) BonusScale(v float64) *Params { + if v >= 0 { + p.bonusScale = v + } + + // the bonus cannot exceed (1-sim), or the score may become greater than 1. + if float64(p.bonusPrefix)*p.bonusScale > 1 { + p.bonusScale = 1 / float64(p.bonusPrefix) + } + + return p +} + +// BonusThreshold overrides the default value for the minimum similarity score +// for which Match() can assign a bonus. +// The new value must be zero or positive. Note that a threshold greater than 1 +// effectively makes Match() become the equivalent of Similarity(). +func (p *Params) BonusThreshold(v float64) *Params { + if v >= 0 { + p.bonusThreshold = v + } + return p +} diff --git a/vendor/github.com/agext/levenshtein/test.sh b/vendor/github.com/agext/levenshtein/test.sh new file mode 100644 index 0000000000..c5ed72466f --- /dev/null +++ b/vendor/github.com/agext/levenshtein/test.sh @@ -0,0 +1,10 @@ +set -ev + +if [[ "$1" == "goveralls" ]]; then + echo "Testing with goveralls..." + go get github.com/mattn/goveralls + $HOME/gopath/bin/goveralls -service=travis-ci +else + echo "Testing with go test..." + go test -v ./... +fi diff --git a/vendor/github.com/armon/circbuf/.gitignore b/vendor/github.com/armon/circbuf/.gitignore new file mode 100644 index 0000000000..00268614f0 --- /dev/null +++ b/vendor/github.com/armon/circbuf/.gitignore @@ -0,0 +1,22 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe diff --git a/vendor/github.com/armon/circbuf/LICENSE b/vendor/github.com/armon/circbuf/LICENSE new file mode 100644 index 0000000000..106569e542 --- /dev/null +++ b/vendor/github.com/armon/circbuf/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2013 Armon Dadgar + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/armon/circbuf/README.md b/vendor/github.com/armon/circbuf/README.md new file mode 100644 index 0000000000..f2e356b8d7 --- /dev/null +++ b/vendor/github.com/armon/circbuf/README.md @@ -0,0 +1,28 @@ +circbuf +======= + +This repository provides the `circbuf` package. This provides a `Buffer` object +which is a circular (or ring) buffer. It has a fixed size, but can be written +to infinitely. Only the last `size` bytes are ever retained. The buffer implements +the `io.Writer` interface. + +Documentation +============= + +Full documentation can be found on [Godoc](http://godoc.org/github.com/armon/circbuf) + +Usage +===== + +The `circbuf` package is very easy to use: + +```go +buf, _ := NewBuffer(6) +buf.Write([]byte("hello world")) + +if string(buf.Bytes()) != " world" { + panic("should only have last 6 bytes!") +} + +``` + diff --git a/vendor/github.com/armon/circbuf/circbuf.go b/vendor/github.com/armon/circbuf/circbuf.go new file mode 100644 index 0000000000..de3cb94a39 --- /dev/null +++ b/vendor/github.com/armon/circbuf/circbuf.go @@ -0,0 +1,92 @@ +package circbuf + +import ( + "fmt" +) + +// Buffer implements a circular buffer. It is a fixed size, +// and new writes overwrite older data, such that for a buffer +// of size N, for any amount of writes, only the last N bytes +// are retained. +type Buffer struct { + data []byte + size int64 + writeCursor int64 + written int64 +} + +// NewBuffer creates a new buffer of a given size. The size +// must be greater than 0. +func NewBuffer(size int64) (*Buffer, error) { + if size <= 0 { + return nil, fmt.Errorf("Size must be positive") + } + + b := &Buffer{ + size: size, + data: make([]byte, size), + } + return b, nil +} + +// Write writes up to len(buf) bytes to the internal ring, +// overriding older data if necessary. +func (b *Buffer) Write(buf []byte) (int, error) { + // Account for total bytes written + n := len(buf) + b.written += int64(n) + + // If the buffer is larger than ours, then we only care + // about the last size bytes anyways + if int64(n) > b.size { + buf = buf[int64(n)-b.size:] + } + + // Copy in place + remain := b.size - b.writeCursor + copy(b.data[b.writeCursor:], buf) + if int64(len(buf)) > remain { + copy(b.data, buf[remain:]) + } + + // Update location of the cursor + b.writeCursor = ((b.writeCursor + int64(len(buf))) % b.size) + return n, nil +} + +// Size returns the size of the buffer +func (b *Buffer) Size() int64 { + return b.size +} + +// TotalWritten provides the total number of bytes written +func (b *Buffer) TotalWritten() int64 { + return b.written +} + +// Bytes provides a slice of the bytes written. This +// slice should not be written to. +func (b *Buffer) Bytes() []byte { + switch { + case b.written >= b.size && b.writeCursor == 0: + return b.data + case b.written > b.size: + out := make([]byte, b.size) + copy(out, b.data[b.writeCursor:]) + copy(out[b.size-b.writeCursor:], b.data[:b.writeCursor]) + return out + default: + return b.data[:b.writeCursor] + } +} + +// Reset resets the buffer so it has no content. +func (b *Buffer) Reset() { + b.writeCursor = 0 + b.written = 0 +} + +// String returns the contents of the buffer as a string +func (b *Buffer) String() string { + return string(b.Bytes()) +} diff --git a/vendor/github.com/containerd/containerd/diff/walking/differ.go b/vendor/github.com/containerd/containerd/diff/walking/differ.go new file mode 100644 index 0000000000..a24c72273c --- /dev/null +++ b/vendor/github.com/containerd/containerd/diff/walking/differ.go @@ -0,0 +1,203 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package walking + +import ( + "context" + "encoding/base64" + "errors" + "fmt" + "io" + "math/rand" + "time" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + digest "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" +) + +type walkingDiff struct { + store content.Store +} + +var emptyDesc = ocispec.Descriptor{} +var uncompressed = "containerd.io/uncompressed" + +// NewWalkingDiff is a generic implementation of diff.Comparer. The diff is +// calculated by mounting both the upper and lower mount sets and walking the +// mounted directories concurrently. Changes are calculated by comparing files +// against each other or by comparing file existence between directories. +// NewWalkingDiff uses no special characteristics of the mount sets and is +// expected to work with any filesystem. +func NewWalkingDiff(store content.Store) diff.Comparer { + return &walkingDiff{ + store: store, + } +} + +// Compare creates a diff between the given mounts and uploads the result +// to the content store. +func (s *walkingDiff) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { + var config diff.Config + for _, opt := range opts { + if err := opt(&config); err != nil { + return emptyDesc, err + } + } + + var isCompressed bool + if config.Compressor != nil { + if config.MediaType == "" { + return emptyDesc, errors.New("media type must be explicitly specified when using custom compressor") + } + isCompressed = true + } else { + if config.MediaType == "" { + config.MediaType = ocispec.MediaTypeImageLayerGzip + } + + switch config.MediaType { + case ocispec.MediaTypeImageLayer: + case ocispec.MediaTypeImageLayerGzip: + isCompressed = true + default: + return emptyDesc, fmt.Errorf("unsupported diff media type: %v: %w", config.MediaType, errdefs.ErrNotImplemented) + } + } + + var ocidesc ocispec.Descriptor + if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { + return mount.WithTempMount(ctx, upper, func(upperRoot string) error { + var newReference bool + if config.Reference == "" { + newReference = true + config.Reference = uniqueRef() + } + + cw, err := s.store.Writer(ctx, + content.WithRef(config.Reference), + content.WithDescriptor(ocispec.Descriptor{ + MediaType: config.MediaType, // most contentstore implementations just ignore this + })) + if err != nil { + return fmt.Errorf("failed to open writer: %w", err) + } + + // errOpen is set when an error occurs while the content writer has not been + // committed or closed yet to force a cleanup + var errOpen error + defer func() { + if errOpen != nil { + cw.Close() + if newReference { + if abortErr := s.store.Abort(ctx, config.Reference); abortErr != nil { + log.G(ctx).WithError(abortErr).WithField("ref", config.Reference).Warnf("failed to delete diff upload") + } + } + } + }() + if !newReference { + if errOpen = cw.Truncate(0); errOpen != nil { + return errOpen + } + } + + if isCompressed { + dgstr := digest.SHA256.Digester() + var compressed io.WriteCloser + if config.Compressor != nil { + compressed, errOpen = config.Compressor(cw, config.MediaType) + if errOpen != nil { + return fmt.Errorf("failed to get compressed stream: %w", errOpen) + } + } else { + compressed, errOpen = compression.CompressStream(cw, compression.Gzip) + if errOpen != nil { + return fmt.Errorf("failed to get compressed stream: %w", errOpen) + } + } + errOpen = archive.WriteDiff(ctx, io.MultiWriter(compressed, dgstr.Hash()), lowerRoot, upperRoot) + compressed.Close() + if errOpen != nil { + return fmt.Errorf("failed to write compressed diff: %w", errOpen) + } + + if config.Labels == nil { + config.Labels = map[string]string{} + } + config.Labels[uncompressed] = dgstr.Digest().String() + } else { + if errOpen = archive.WriteDiff(ctx, cw, lowerRoot, upperRoot); errOpen != nil { + return fmt.Errorf("failed to write diff: %w", errOpen) + } + } + + var commitopts []content.Opt + if config.Labels != nil { + commitopts = append(commitopts, content.WithLabels(config.Labels)) + } + + dgst := cw.Digest() + if errOpen = cw.Commit(ctx, 0, dgst, commitopts...); errOpen != nil { + if !errdefs.IsAlreadyExists(errOpen) { + return fmt.Errorf("failed to commit: %w", errOpen) + } + errOpen = nil + } + + info, err := s.store.Info(ctx, dgst) + if err != nil { + return fmt.Errorf("failed to get info from content store: %w", err) + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + // Set uncompressed label if digest already existed without label + if _, ok := info.Labels[uncompressed]; !ok { + info.Labels[uncompressed] = config.Labels[uncompressed] + if _, err := s.store.Update(ctx, info, "labels."+uncompressed); err != nil { + return fmt.Errorf("error setting uncompressed label: %w", err) + } + } + + ocidesc = ocispec.Descriptor{ + MediaType: config.MediaType, + Size: info.Size, + Digest: info.Digest, + } + return nil + }) + }); err != nil { + return emptyDesc, err + } + + return ocidesc, nil +} + +func uniqueRef() string { + t := time.Now() + var b [3]byte + // Ignore read failures, just decreases uniqueness + rand.Read(b[:]) + return fmt.Sprintf("%d-%s", t.UnixNano(), base64.URLEncoding.EncodeToString(b[:])) +} diff --git a/vendor/github.com/containerd/containerd/images/converter/converter.go b/vendor/github.com/containerd/containerd/images/converter/converter.go new file mode 100644 index 0000000000..441e0169ef --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/converter/converter.go @@ -0,0 +1,126 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// Package converter provides image converter +package converter + +import ( + "context" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/platforms" +) + +type convertOpts struct { + layerConvertFunc ConvertFunc + docker2oci bool + indexConvertFunc ConvertFunc + platformMC platforms.MatchComparer +} + +// Opt is an option for Convert() +type Opt func(*convertOpts) error + +// WithLayerConvertFunc specifies the function that converts layers. +func WithLayerConvertFunc(fn ConvertFunc) Opt { + return func(copts *convertOpts) error { + copts.layerConvertFunc = fn + return nil + } +} + +// WithDockerToOCI converts Docker media types into OCI ones. +func WithDockerToOCI(v bool) Opt { + return func(copts *convertOpts) error { + copts.docker2oci = true + return nil + } +} + +// WithPlatform specifies the platform. +// Defaults to all platforms. +func WithPlatform(p platforms.MatchComparer) Opt { + return func(copts *convertOpts) error { + copts.platformMC = p + return nil + } +} + +// WithIndexConvertFunc specifies the function that converts manifests and index (manifest lists). +// Defaults to DefaultIndexConvertFunc. +func WithIndexConvertFunc(fn ConvertFunc) Opt { + return func(copts *convertOpts) error { + copts.indexConvertFunc = fn + return nil + } +} + +// Client is implemented by *containerd.Client . +type Client interface { + WithLease(ctx context.Context, opts ...leases.Opt) (context.Context, func(context.Context) error, error) + ContentStore() content.Store + ImageService() images.Store +} + +// Convert converts an image. +func Convert(ctx context.Context, client Client, dstRef, srcRef string, opts ...Opt) (*images.Image, error) { + var copts convertOpts + for _, o := range opts { + if err := o(&copts); err != nil { + return nil, err + } + } + if copts.platformMC == nil { + copts.platformMC = platforms.All + } + if copts.indexConvertFunc == nil { + copts.indexConvertFunc = DefaultIndexConvertFunc(copts.layerConvertFunc, copts.docker2oci, copts.platformMC) + } + + ctx, done, err := client.WithLease(ctx) + if err != nil { + return nil, err + } + defer done(ctx) + + cs := client.ContentStore() + is := client.ImageService() + srcImg, err := is.Get(ctx, srcRef) + if err != nil { + return nil, err + } + + dstDesc, err := copts.indexConvertFunc(ctx, cs, srcImg.Target) + if err != nil { + return nil, err + } + + dstImg := srcImg + dstImg.Name = dstRef + if dstDesc != nil { + dstImg.Target = *dstDesc + } + var res images.Image + if dstRef != srcRef { + _ = is.Delete(ctx, dstRef) + res, err = is.Create(ctx, dstImg) + } else { + res, err = is.Update(ctx, dstImg) + } + return &res, err +} diff --git a/vendor/github.com/containerd/containerd/images/converter/default.go b/vendor/github.com/containerd/containerd/images/converter/default.go new file mode 100644 index 0000000000..f4e944bc54 --- /dev/null +++ b/vendor/github.com/containerd/containerd/images/converter/default.go @@ -0,0 +1,453 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package converter + +import ( + "bytes" + "context" + "encoding/json" + "fmt" + "strings" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/platforms" + "github.com/opencontainers/go-digest" + ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/sync/errgroup" +) + +// ConvertFunc returns a converted content descriptor. +// When the content was not converted, ConvertFunc returns nil. +type ConvertFunc func(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) + +// DefaultIndexConvertFunc is the default convert func used by Convert. +func DefaultIndexConvertFunc(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer) ConvertFunc { + c := &defaultConverter{ + layerConvertFunc: layerConvertFunc, + docker2oci: docker2oci, + platformMC: platformMC, + diffIDMap: make(map[digest.Digest]digest.Digest), + } + return c.convert +} + +// ConvertHookFunc is a callback function called during conversion of a blob. +// orgDesc is the target descriptor to convert. newDesc is passed if conversion happens. +type ConvertHookFunc func(ctx context.Context, cs content.Store, orgDesc ocispec.Descriptor, newDesc *ocispec.Descriptor) (*ocispec.Descriptor, error) + +// ConvertHooks is a configuration for hook callbacks called during blob conversion. +type ConvertHooks struct { + // PostConvertHook is a callback function called for each blob after conversion is done. + PostConvertHook ConvertHookFunc +} + +// IndexConvertFuncWithHook is the convert func used by Convert with hook functions support. +func IndexConvertFuncWithHook(layerConvertFunc ConvertFunc, docker2oci bool, platformMC platforms.MatchComparer, hooks ConvertHooks) ConvertFunc { + c := &defaultConverter{ + layerConvertFunc: layerConvertFunc, + docker2oci: docker2oci, + platformMC: platformMC, + diffIDMap: make(map[digest.Digest]digest.Digest), + hooks: hooks, + } + return c.convert +} + +type defaultConverter struct { + layerConvertFunc ConvertFunc + docker2oci bool + platformMC platforms.MatchComparer + diffIDMap map[digest.Digest]digest.Digest // key: old diffID, value: new diffID + diffIDMapMu sync.RWMutex + hooks ConvertHooks +} + +// convert dispatches desc.MediaType and calls c.convert{Layer,Manifest,Index,Config}. +// +// Also converts media type if c.docker2oci is set. +func (c *defaultConverter) convert(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + var ( + newDesc *ocispec.Descriptor + err error + ) + if images.IsLayerType(desc.MediaType) { + newDesc, err = c.convertLayer(ctx, cs, desc) + } else if images.IsManifestType(desc.MediaType) { + newDesc, err = c.convertManifest(ctx, cs, desc) + } else if images.IsIndexType(desc.MediaType) { + newDesc, err = c.convertIndex(ctx, cs, desc) + } else if images.IsConfigType(desc.MediaType) { + newDesc, err = c.convertConfig(ctx, cs, desc) + } + if err != nil { + return nil, err + } + + if c.hooks.PostConvertHook != nil { + if newDescPost, err := c.hooks.PostConvertHook(ctx, cs, desc, newDesc); err != nil { + return nil, err + } else if newDescPost != nil { + newDesc = newDescPost + } + } + + if images.IsDockerType(desc.MediaType) { + if c.docker2oci { + if newDesc == nil { + newDesc = copyDesc(desc) + } + newDesc.MediaType = ConvertDockerMediaTypeToOCI(newDesc.MediaType) + } else if (newDesc == nil && len(desc.Annotations) != 0) || (newDesc != nil && len(newDesc.Annotations) != 0) { + // Annotations is supported only on OCI manifest. + // We need to remove annotations for Docker media types. + if newDesc == nil { + newDesc = copyDesc(desc) + } + newDesc.Annotations = nil + } + } + logrus.WithField("old", desc).WithField("new", newDesc).Debugf("converted") + return newDesc, nil +} + +func copyDesc(desc ocispec.Descriptor) *ocispec.Descriptor { + descCopy := desc + return &descCopy +} + +// convertLayer converts image image layers if c.layerConvertFunc is set. +// +// c.layerConvertFunc can be nil, e.g., for converting Docker media types to OCI ones. +func (c *defaultConverter) convertLayer(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + if c.layerConvertFunc != nil { + return c.layerConvertFunc(ctx, cs, desc) + } + return nil, nil +} + +// convertManifest converts image manifests. +// +// - converts `.mediaType` if the target format is OCI +// - records diff ID changes in c.diffIDMap +func (c *defaultConverter) convertManifest(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + var ( + manifest ocispec.Manifest + modified bool + ) + labels, err := readJSON(ctx, cs, &manifest, desc) + if err != nil { + return nil, err + } + if labels == nil { + labels = make(map[string]string) + } + if images.IsDockerType(manifest.MediaType) && c.docker2oci { + manifest.MediaType = ConvertDockerMediaTypeToOCI(manifest.MediaType) + modified = true + } + var mu sync.Mutex + eg, ctx2 := errgroup.WithContext(ctx) + for i, l := range manifest.Layers { + i := i + l := l + oldDiffID, err := images.GetDiffID(ctx, cs, l) + if err != nil { + return nil, err + } + eg.Go(func() error { + newL, err := c.convert(ctx2, cs, l) + if err != nil { + return err + } + if newL != nil { + mu.Lock() + // update GC labels + ClearGCLabels(labels, l.Digest) + labelKey := fmt.Sprintf("containerd.io/gc.ref.content.l.%d", i) + labels[labelKey] = newL.Digest.String() + manifest.Layers[i] = *newL + modified = true + mu.Unlock() + + // diffID changes if the tar entries were modified. + // diffID stays same if only the compression type was changed. + // When diffID changed, add a map entry so that we can update image config. + newDiffID, err := images.GetDiffID(ctx, cs, *newL) + if err != nil { + return err + } + if newDiffID != oldDiffID { + c.diffIDMapMu.Lock() + c.diffIDMap[oldDiffID] = newDiffID + c.diffIDMapMu.Unlock() + } + } + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + + newConfig, err := c.convert(ctx, cs, manifest.Config) + if err != nil { + return nil, err + } + if newConfig != nil { + ClearGCLabels(labels, manifest.Config.Digest) + labels["containerd.io/gc.ref.content.config"] = newConfig.Digest.String() + manifest.Config = *newConfig + modified = true + } + + if modified { + return writeJSON(ctx, cs, &manifest, desc, labels) + } + return nil, nil +} + +// convertIndex converts image index. +// +// - converts `.mediaType` if the target format is OCI +// - clears manifest entries that do not match c.platformMC +func (c *defaultConverter) convertIndex(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + var ( + index ocispec.Index + modified bool + ) + labels, err := readJSON(ctx, cs, &index, desc) + if err != nil { + return nil, err + } + if labels == nil { + labels = make(map[string]string) + } + if images.IsDockerType(index.MediaType) && c.docker2oci { + index.MediaType = ConvertDockerMediaTypeToOCI(index.MediaType) + modified = true + } + + newManifests := make([]ocispec.Descriptor, len(index.Manifests)) + newManifestsToBeRemoved := make(map[int]struct{}) // slice index + var mu sync.Mutex + eg, ctx2 := errgroup.WithContext(ctx) + for i, mani := range index.Manifests { + i := i + mani := mani + labelKey := fmt.Sprintf("containerd.io/gc.ref.content.m.%d", i) + eg.Go(func() error { + if mani.Platform != nil && !c.platformMC.Match(*mani.Platform) { + mu.Lock() + ClearGCLabels(labels, mani.Digest) + newManifestsToBeRemoved[i] = struct{}{} + modified = true + mu.Unlock() + return nil + } + newMani, err := c.convert(ctx2, cs, mani) + if err != nil { + return err + } + mu.Lock() + if newMani != nil { + ClearGCLabels(labels, mani.Digest) + labels[labelKey] = newMani.Digest.String() + // NOTE: for keeping manifest order, we specify `i` index explicitly + newManifests[i] = *newMani + modified = true + } else { + newManifests[i] = mani + } + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + return nil, err + } + if modified { + var newManifestsClean []ocispec.Descriptor + for i, m := range newManifests { + if _, ok := newManifestsToBeRemoved[i]; !ok { + newManifestsClean = append(newManifestsClean, m) + } + } + index.Manifests = newManifestsClean + return writeJSON(ctx, cs, &index, desc, labels) + } + return nil, nil +} + +// convertConfig converts image config contents. +// +// - updates `.rootfs.diff_ids` using c.diffIDMap . +// +// - clears legacy `.config.Image` and `.container_config.Image` fields if `.rootfs.diff_ids` was updated. +func (c *defaultConverter) convertConfig(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (*ocispec.Descriptor, error) { + var ( + cfg DualConfig + cfgAsOCI ocispec.Image // read only, used for parsing cfg + modified bool + ) + + labels, err := readJSON(ctx, cs, &cfg, desc) + if err != nil { + return nil, err + } + if labels == nil { + labels = make(map[string]string) + } + if _, err := readJSON(ctx, cs, &cfgAsOCI, desc); err != nil { + return nil, err + } + + if rootfs := cfgAsOCI.RootFS; rootfs.Type == "layers" { + rootfsModified := false + c.diffIDMapMu.RLock() + for i, oldDiffID := range rootfs.DiffIDs { + if newDiffID, ok := c.diffIDMap[oldDiffID]; ok && newDiffID != oldDiffID { + rootfs.DiffIDs[i] = newDiffID + rootfsModified = true + } + } + c.diffIDMapMu.RUnlock() + if rootfsModified { + rootfsB, err := json.Marshal(rootfs) + if err != nil { + return nil, err + } + cfg["rootfs"] = (*json.RawMessage)(&rootfsB) + modified = true + } + } + + if modified { + // cfg may have dummy value for legacy `.config.Image` and `.container_config.Image` + // We should clear the ID if we changed the diff IDs. + if _, err := clearDockerV1DummyID(cfg); err != nil { + return nil, err + } + return writeJSON(ctx, cs, &cfg, desc, labels) + } + return nil, nil +} + +// clearDockerV1DummyID clears the dummy values for legacy `.config.Image` and `.container_config.Image`. +// Returns true if the cfg was modified. +func clearDockerV1DummyID(cfg DualConfig) (bool, error) { + var modified bool + f := func(k string) error { + if configX, ok := cfg[k]; ok && configX != nil { + var configField map[string]*json.RawMessage + if err := json.Unmarshal(*configX, &configField); err != nil { + return err + } + delete(configField, "Image") + b, err := json.Marshal(configField) + if err != nil { + return err + } + cfg[k] = (*json.RawMessage)(&b) + modified = true + } + return nil + } + if err := f("config"); err != nil { + return modified, err + } + if err := f("container_config"); err != nil { + return modified, err + } + return modified, nil +} + +// DualConfig covers Docker config (v1.0, v1.1, v1.2) and OCI config. +// Unmarshalled as map[string]*json.RawMessage to retain unknown fields on remarshalling. +type DualConfig map[string]*json.RawMessage + +func readJSON(ctx context.Context, cs content.Store, x interface{}, desc ocispec.Descriptor) (map[string]string, error) { + info, err := cs.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + labels := info.Labels + b, err := content.ReadBlob(ctx, cs, desc) + if err != nil { + return nil, err + } + if err := json.Unmarshal(b, x); err != nil { + return nil, err + } + return labels, nil +} + +func writeJSON(ctx context.Context, cs content.Store, x interface{}, oldDesc ocispec.Descriptor, labels map[string]string) (*ocispec.Descriptor, error) { + b, err := json.Marshal(x) + if err != nil { + return nil, err + } + dgst := digest.SHA256.FromBytes(b) + ref := fmt.Sprintf("converter-write-json-%s", dgst.String()) + w, err := content.OpenWriter(ctx, cs, content.WithRef(ref)) + if err != nil { + return nil, err + } + if err := content.Copy(ctx, w, bytes.NewReader(b), int64(len(b)), dgst, content.WithLabels(labels)); err != nil { + return nil, err + } + if err := w.Close(); err != nil { + return nil, err + } + newDesc := oldDesc + newDesc.Size = int64(len(b)) + newDesc.Digest = dgst + return &newDesc, nil +} + +// ConvertDockerMediaTypeToOCI converts a media type string +func ConvertDockerMediaTypeToOCI(mt string) string { + switch mt { + case images.MediaTypeDockerSchema2ManifestList: + return ocispec.MediaTypeImageIndex + case images.MediaTypeDockerSchema2Manifest: + return ocispec.MediaTypeImageManifest + case images.MediaTypeDockerSchema2LayerGzip: + return ocispec.MediaTypeImageLayerGzip + case images.MediaTypeDockerSchema2LayerForeignGzip: + return ocispec.MediaTypeImageLayerNonDistributableGzip + case images.MediaTypeDockerSchema2Layer: + return ocispec.MediaTypeImageLayer + case images.MediaTypeDockerSchema2LayerForeign: + return ocispec.MediaTypeImageLayerNonDistributable + case images.MediaTypeDockerSchema2Config: + return ocispec.MediaTypeImageConfig + default: + return mt + } +} + +// ClearGCLabels clears GC labels for the given digest. +func ClearGCLabels(labels map[string]string, dgst digest.Digest) { + for k, v := range labels { + if v == dgst.String() && strings.HasPrefix(k, "containerd.io/gc.ref.content") { + delete(labels, k) + } + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md b/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md new file mode 100644 index 0000000000..c907e4216c --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/NOTICE.md @@ -0,0 +1,67 @@ +The source code developed under the Stargz Snapshotter Project is licensed under Apache License 2.0. + +However, the Stargz Snapshotter project contains modified subcomponents from Container Registry Filesystem Project with separate copyright notices and license terms. Your use of the source code for the subcomponent is subject to the terms and conditions as defined by the source project. Files in these subcomponents contain following file header. + +``` +Copyright 2019 The Go Authors. All rights reserved. +Use of this source code is governed by a BSD-style +license that can be found in the NOTICE.md file. +``` + +These source code is governed by a 3-Clause BSD license. The copyright notice, list of conditions and disclaimer are the following. + +``` +Copyright (c) 2019 Google LLC. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +``` + +The Stargz Snapshotter project also contains modified benchmarking code from HelloBench Project with separate copyright notices and license terms. Your use of the source code for the benchmarking code is subject to the terms and conditions as defined by the source project. These source code is governed by a MIT license. The copyright notice, condition and disclaimer are the following. The file in the benchmarking code contains it as the file header. + +``` +The MIT License (MIT) + +Copyright (c) 2015 Tintri + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. +``` diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE new file mode 100644 index 0000000000..d645695673 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/LICENSE @@ -0,0 +1,202 @@ + + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go new file mode 100644 index 0000000000..9ee97fc911 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/build.go @@ -0,0 +1,628 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "path" + "runtime" + "strings" + "sync" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" + "golang.org/x/sync/errgroup" +) + +type options struct { + chunkSize int + compressionLevel int + prioritizedFiles []string + missedPrioritizedFiles *[]string + compression Compression +} + +type Option func(o *options) error + +// WithChunkSize option specifies the chunk size of eStargz blob to build. +func WithChunkSize(chunkSize int) Option { + return func(o *options) error { + o.chunkSize = chunkSize + return nil + } +} + +// WithCompressionLevel option specifies the gzip compression level. +// The default is gzip.BestCompression. +// See also: https://godoc.org/compress/gzip#pkg-constants +func WithCompressionLevel(level int) Option { + return func(o *options) error { + o.compressionLevel = level + return nil + } +} + +// WithPrioritizedFiles option specifies the list of prioritized files. +// These files must be complete paths that are absolute or relative to "/" +// For example, all of "foo/bar", "/foo/bar", "./foo/bar" and "../foo/bar" +// are treated as "/foo/bar". +func WithPrioritizedFiles(files []string) Option { + return func(o *options) error { + o.prioritizedFiles = files + return nil + } +} + +// WithAllowPrioritizeNotFound makes Build continue the execution even if some +// of prioritized files specified by WithPrioritizedFiles option aren't found +// in the input tar. Instead, this records all missed file names to the passed +// slice. +func WithAllowPrioritizeNotFound(missedFiles *[]string) Option { + return func(o *options) error { + if missedFiles == nil { + return fmt.Errorf("WithAllowPrioritizeNotFound: slice must be passed") + } + o.missedPrioritizedFiles = missedFiles + return nil + } +} + +// WithCompression specifies compression algorithm to be used. +// Default is gzip. +func WithCompression(compression Compression) Option { + return func(o *options) error { + o.compression = compression + return nil + } +} + +// Blob is an eStargz blob. +type Blob struct { + io.ReadCloser + diffID digest.Digester + tocDigest digest.Digest +} + +// DiffID returns the digest of uncompressed blob. +// It is only valid to call DiffID after Close. +func (b *Blob) DiffID() digest.Digest { + return b.diffID.Digest() +} + +// TOCDigest returns the digest of uncompressed TOC JSON. +func (b *Blob) TOCDigest() digest.Digest { + return b.tocDigest +} + +// Build builds an eStargz blob which is an extended version of stargz, from a blob (gzip, zstd +// or plain tar) passed through the argument. If there are some prioritized files are listed in +// the option, these files are grouped as "prioritized" and can be used for runtime optimization +// (e.g. prefetch). This function builds a blob in parallel, with dividing that blob into several +// (at least the number of runtime.GOMAXPROCS(0)) sub-blobs. +func Build(tarBlob *io.SectionReader, opt ...Option) (_ *Blob, rErr error) { + var opts options + opts.compressionLevel = gzip.BestCompression // BestCompression by default + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + if opts.compression == nil { + opts.compression = newGzipCompressionWithLevel(opts.compressionLevel) + } + layerFiles := newTempFiles() + defer func() { + if rErr != nil { + if err := layerFiles.CleanupAll(); err != nil { + rErr = fmt.Errorf("failed to cleanup tmp files: %v: %w", err, rErr) + } + } + }() + tarBlob, err := decompressBlob(tarBlob, layerFiles) + if err != nil { + return nil, err + } + entries, err := sortEntries(tarBlob, opts.prioritizedFiles, opts.missedPrioritizedFiles) + if err != nil { + return nil, err + } + tarParts := divideEntries(entries, runtime.GOMAXPROCS(0)) + writers := make([]*Writer, len(tarParts)) + payloads := make([]*os.File, len(tarParts)) + var mu sync.Mutex + var eg errgroup.Group + for i, parts := range tarParts { + i, parts := i, parts + // builds verifiable stargz sub-blobs + eg.Go(func() error { + esgzFile, err := layerFiles.TempFile("", "esgzdata") + if err != nil { + return err + } + sw := NewWriterWithCompressor(esgzFile, opts.compression) + sw.ChunkSize = opts.chunkSize + if err := sw.AppendTar(readerFromEntries(parts...)); err != nil { + return err + } + mu.Lock() + writers[i] = sw + payloads[i] = esgzFile + mu.Unlock() + return nil + }) + } + if err := eg.Wait(); err != nil { + rErr = err + return nil, err + } + tocAndFooter, tocDgst, err := closeWithCombine(opts.compressionLevel, writers...) + if err != nil { + rErr = err + return nil, err + } + var rs []io.Reader + for _, p := range payloads { + fs, err := fileSectionReader(p) + if err != nil { + return nil, err + } + rs = append(rs, fs) + } + diffID := digest.Canonical.Digester() + pr, pw := io.Pipe() + go func() { + r, err := opts.compression.Reader(io.TeeReader(io.MultiReader(append(rs, tocAndFooter)...), pw)) + if err != nil { + pw.CloseWithError(err) + return + } + defer r.Close() + if _, err := io.Copy(diffID.Hash(), r); err != nil { + pw.CloseWithError(err) + return + } + pw.Close() + }() + return &Blob{ + ReadCloser: readCloser{ + Reader: pr, + closeFunc: layerFiles.CleanupAll, + }, + tocDigest: tocDgst, + diffID: diffID, + }, nil +} + +// closeWithCombine takes unclosed Writers and close them. This also returns the +// toc that combined all Writers into. +// Writers doesn't write TOC and footer to the underlying writers so they can be +// combined into a single eStargz and tocAndFooter returned by this function can +// be appended at the tail of that combined blob. +func closeWithCombine(compressionLevel int, ws ...*Writer) (tocAndFooterR io.Reader, tocDgst digest.Digest, err error) { + if len(ws) == 0 { + return nil, "", fmt.Errorf("at least one writer must be passed") + } + for _, w := range ws { + if w.closed { + return nil, "", fmt.Errorf("writer must be unclosed") + } + defer func(w *Writer) { w.closed = true }(w) + if err := w.closeGz(); err != nil { + return nil, "", err + } + if err := w.bw.Flush(); err != nil { + return nil, "", err + } + } + var ( + mtoc = new(JTOC) + currentOffset int64 + ) + mtoc.Version = ws[0].toc.Version + for _, w := range ws { + for _, e := range w.toc.Entries { + // Recalculate Offset of non-empty files/chunks + if (e.Type == "reg" && e.Size > 0) || e.Type == "chunk" { + e.Offset += currentOffset + } + mtoc.Entries = append(mtoc.Entries, e) + } + if w.toc.Version > mtoc.Version { + mtoc.Version = w.toc.Version + } + currentOffset += w.cw.n + } + + return tocAndFooter(ws[0].compressor, mtoc, currentOffset) +} + +func tocAndFooter(compressor Compressor, toc *JTOC, offset int64) (io.Reader, digest.Digest, error) { + buf := new(bytes.Buffer) + tocDigest, err := compressor.WriteTOCAndFooter(buf, offset, toc, nil) + if err != nil { + return nil, "", err + } + return buf, tocDigest, nil +} + +// divideEntries divides passed entries to the parts at least the number specified by the +// argument. +func divideEntries(entries []*entry, minPartsNum int) (set [][]*entry) { + var estimatedSize int64 + for _, e := range entries { + estimatedSize += e.header.Size + } + unitSize := estimatedSize / int64(minPartsNum) + var ( + nextEnd = unitSize + offset int64 + ) + set = append(set, []*entry{}) + for _, e := range entries { + set[len(set)-1] = append(set[len(set)-1], e) + offset += e.header.Size + if offset > nextEnd { + set = append(set, []*entry{}) + nextEnd += unitSize + } + } + return +} + +var errNotFound = errors.New("not found") + +// sortEntries reads the specified tar blob and returns a list of tar entries. +// If some of prioritized files are specified, the list starts from these +// files with keeping the order specified by the argument. +func sortEntries(in io.ReaderAt, prioritized []string, missedPrioritized *[]string) ([]*entry, error) { + + // Import tar file. + intar, err := importTar(in) + if err != nil { + return nil, fmt.Errorf("failed to sort: %w", err) + } + + // Sort the tar file respecting to the prioritized files list. + sorted := &tarFile{} + for _, l := range prioritized { + if err := moveRec(l, intar, sorted); err != nil { + if errors.Is(err, errNotFound) && missedPrioritized != nil { + *missedPrioritized = append(*missedPrioritized, l) + continue // allow not found + } + return nil, fmt.Errorf("failed to sort tar entries: %w", err) + } + } + if len(prioritized) == 0 { + sorted.add(&entry{ + header: &tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } else { + sorted.add(&entry{ + header: &tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + }, + payload: bytes.NewReader([]byte{landmarkContents}), + }) + } + + // Dump all entry and concatinate them. + return append(sorted.dump(), intar.dump()...), nil +} + +// readerFromEntries returns a reader of tar archive that contains entries passed +// through the arguments. +func readerFromEntries(entries ...*entry) io.Reader { + pr, pw := io.Pipe() + go func() { + tw := tar.NewWriter(pw) + defer tw.Close() + for _, entry := range entries { + if err := tw.WriteHeader(entry.header); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar header: %v", err)) + return + } + if _, err := io.Copy(tw, entry.payload); err != nil { + pw.CloseWithError(fmt.Errorf("Failed to write tar payload: %v", err)) + return + } + } + pw.Close() + }() + return pr +} + +func importTar(in io.ReaderAt) (*tarFile, error) { + tf := &tarFile{} + pw, err := newCountReader(in) + if err != nil { + return nil, fmt.Errorf("failed to make position watcher: %w", err) + } + tr := tar.NewReader(pw) + + // Walk through all nodes. + for { + // Fetch and parse next header. + h, err := tr.Next() + if err != nil { + if err == io.EOF { + break + } else { + return nil, fmt.Errorf("failed to parse tar file, %w", err) + } + } + switch cleanEntryName(h.Name) { + case PrefetchLandmark, NoPrefetchLandmark: + // Ignore existing landmark + continue + } + + // Add entry. If it already exists, replace it. + if _, ok := tf.get(h.Name); ok { + tf.remove(h.Name) + } + tf.add(&entry{ + header: h, + payload: io.NewSectionReader(in, pw.currentPos(), h.Size), + }) + } + + return tf, nil +} + +func moveRec(name string, in *tarFile, out *tarFile) error { + name = cleanEntryName(name) + if name == "" { // root directory. stop recursion. + if e, ok := in.get(name); ok { + // entry of the root directory exists. we should move it as well. + // this case will occur if tar entries are prefixed with "./", "/", etc. + out.add(e) + in.remove(name) + } + return nil + } + + _, okIn := in.get(name) + _, okOut := out.get(name) + if !okIn && !okOut { + return fmt.Errorf("file: %q: %w", name, errNotFound) + } + + parent, _ := path.Split(strings.TrimSuffix(name, "/")) + if err := moveRec(parent, in, out); err != nil { + return err + } + if e, ok := in.get(name); ok && e.header.Typeflag == tar.TypeLink { + if err := moveRec(e.header.Linkname, in, out); err != nil { + return err + } + } + if e, ok := in.get(name); ok { + out.add(e) + in.remove(name) + } + return nil +} + +type entry struct { + header *tar.Header + payload io.ReadSeeker +} + +type tarFile struct { + index map[string]*entry + stream []*entry +} + +func (f *tarFile) add(e *entry) { + if f.index == nil { + f.index = make(map[string]*entry) + } + f.index[cleanEntryName(e.header.Name)] = e + f.stream = append(f.stream, e) +} + +func (f *tarFile) remove(name string) { + name = cleanEntryName(name) + if f.index != nil { + delete(f.index, name) + } + var filtered []*entry + for _, e := range f.stream { + if cleanEntryName(e.header.Name) == name { + continue + } + filtered = append(filtered, e) + } + f.stream = filtered +} + +func (f *tarFile) get(name string) (e *entry, ok bool) { + if f.index == nil { + return nil, false + } + e, ok = f.index[cleanEntryName(name)] + return +} + +func (f *tarFile) dump() []*entry { + return f.stream +} + +type readCloser struct { + io.Reader + closeFunc func() error +} + +func (rc readCloser) Close() error { + return rc.closeFunc() +} + +func fileSectionReader(file *os.File) (*io.SectionReader, error) { + info, err := file.Stat() + if err != nil { + return nil, err + } + return io.NewSectionReader(file, 0, info.Size()), nil +} + +func newTempFiles() *tempFiles { + return &tempFiles{} +} + +type tempFiles struct { + files []*os.File + filesMu sync.Mutex +} + +func (tf *tempFiles) TempFile(dir, pattern string) (*os.File, error) { + f, err := ioutil.TempFile(dir, pattern) + if err != nil { + return nil, err + } + tf.filesMu.Lock() + tf.files = append(tf.files, f) + tf.filesMu.Unlock() + return f, nil +} + +func (tf *tempFiles) CleanupAll() error { + tf.filesMu.Lock() + defer tf.filesMu.Unlock() + var allErr []error + for _, f := range tf.files { + if err := f.Close(); err != nil { + allErr = append(allErr, err) + } + if err := os.Remove(f.Name()); err != nil { + allErr = append(allErr, err) + } + } + tf.files = nil + return errorutil.Aggregate(allErr) +} + +func newCountReader(r io.ReaderAt) (*countReader, error) { + pos := int64(0) + return &countReader{r: r, cPos: &pos}, nil +} + +type countReader struct { + r io.ReaderAt + cPos *int64 + + mu sync.Mutex +} + +func (cr *countReader) Read(p []byte) (int, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + n, err := cr.r.ReadAt(p, *cr.cPos) + if err == nil { + *cr.cPos += int64(n) + } + return n, err +} + +func (cr *countReader) Seek(offset int64, whence int) (int64, error) { + cr.mu.Lock() + defer cr.mu.Unlock() + + switch whence { + default: + return 0, fmt.Errorf("Unknown whence: %v", whence) + case io.SeekStart: + case io.SeekCurrent: + offset += *cr.cPos + case io.SeekEnd: + return 0, fmt.Errorf("Unsupported whence: %v", whence) + } + + if offset < 0 { + return 0, fmt.Errorf("invalid offset") + } + *cr.cPos = offset + return offset, nil +} + +func (cr *countReader) currentPos() int64 { + cr.mu.Lock() + defer cr.mu.Unlock() + + return *cr.cPos +} + +func decompressBlob(org *io.SectionReader, tmp *tempFiles) (*io.SectionReader, error) { + if org.Size() < 4 { + return org, nil + } + src := make([]byte, 4) + if _, err := org.Read(src); err != nil && err != io.EOF { + return nil, err + } + var dR io.Reader + if bytes.Equal([]byte{0x1F, 0x8B, 0x08}, src[:3]) { + // gzip + dgR, err := gzip.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dgR.Close() + dR = io.Reader(dgR) + } else if bytes.Equal([]byte{0x28, 0xb5, 0x2f, 0xfd}, src[:4]) { + // zstd + dzR, err := zstd.NewReader(io.NewSectionReader(org, 0, org.Size())) + if err != nil { + return nil, err + } + defer dzR.Close() + dR = io.Reader(dzR) + } else { + // uncompressed + return io.NewSectionReader(org, 0, org.Size()), nil + } + b, err := tmp.TempFile("", "uncompresseddata") + if err != nil { + return nil, err + } + if _, err := io.Copy(b, dR); err != nil { + return nil, err + } + return fileSectionReader(b) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go new file mode 100644 index 0000000000..6de78b02dc --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/errorutil/errors.go @@ -0,0 +1,40 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package errorutil + +import ( + "errors" + "fmt" + "strings" +) + +// Aggregate combines a list of errors into a single new error. +func Aggregate(errs []error) error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + points := make([]string, len(errs)+1) + points[0] = fmt.Sprintf("%d error(s) occurred:", len(errs)) + for i, err := range errs { + points[i+1] = fmt.Sprintf("* %s", err) + } + return errors.New(strings.Join(points, "\n\t")) + } +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go new file mode 100644 index 0000000000..4b655c1453 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/estargz.go @@ -0,0 +1,1042 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "bufio" + "bytes" + "compress/gzip" + "crypto/sha256" + "errors" + "fmt" + "hash" + "io" + "io/ioutil" + "os" + "path" + "sort" + "strings" + "sync" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + digest "github.com/opencontainers/go-digest" + "github.com/vbatts/tar-split/archive/tar" +) + +// A Reader permits random access reads from a stargz file. +type Reader struct { + sr *io.SectionReader + toc *JTOC + tocDigest digest.Digest + + // m stores all non-chunk entries, keyed by name. + m map[string]*TOCEntry + + // chunks stores all TOCEntry values for regular files that + // are split up. For a file with a single chunk, it's only + // stored in m. + chunks map[string][]*TOCEntry + + decompressor Decompressor +} + +type openOpts struct { + tocOffset int64 + decompressors []Decompressor + telemetry *Telemetry +} + +// OpenOption is an option used during opening the layer +type OpenOption func(o *openOpts) error + +// WithTOCOffset option specifies the offset of TOC +func WithTOCOffset(tocOffset int64) OpenOption { + return func(o *openOpts) error { + o.tocOffset = tocOffset + return nil + } +} + +// WithDecompressors option specifies decompressors to use. +// Default is gzip-based decompressor. +func WithDecompressors(decompressors ...Decompressor) OpenOption { + return func(o *openOpts) error { + o.decompressors = decompressors + return nil + } +} + +// WithTelemetry option specifies the telemetry hooks +func WithTelemetry(telemetry *Telemetry) OpenOption { + return func(o *openOpts) error { + o.telemetry = telemetry + return nil + } +} + +// MeasureLatencyHook is a func which takes start time and records the diff +type MeasureLatencyHook func(time.Time) + +// Telemetry is a struct which defines telemetry hooks. By implementing these hooks you should be able to record +// the latency metrics of the respective steps of estargz open operation. To be used with estargz.OpenWithTelemetry(...) +type Telemetry struct { + GetFooterLatency MeasureLatencyHook // measure time to get stargz footer (in milliseconds) + GetTocLatency MeasureLatencyHook // measure time to GET TOC JSON (in milliseconds) + DeserializeTocLatency MeasureLatencyHook // measure time to deserialize TOC JSON (in milliseconds) +} + +// Open opens a stargz file for reading. +// The behavior is configurable using options. +// +// Note that each entry name is normalized as the path that is relative to root. +func Open(sr *io.SectionReader, opt ...OpenOption) (*Reader, error) { + var opts openOpts + for _, o := range opt { + if err := o(&opts); err != nil { + return nil, err + } + } + + gzipCompressors := []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} + decompressors := append(gzipCompressors, opts.decompressors...) + + // Determine the size to fetch. Try to fetch as many bytes as possible. + fetchSize := maxFooterSize(sr.Size(), decompressors...) + if maybeTocOffset := opts.tocOffset; maybeTocOffset > fetchSize { + if maybeTocOffset > sr.Size() { + return nil, fmt.Errorf("blob size %d is smaller than the toc offset", sr.Size()) + } + fetchSize = sr.Size() - maybeTocOffset + } + + start := time.Now() // before getting layer footer + footer := make([]byte, fetchSize) + if _, err := sr.ReadAt(footer, sr.Size()-fetchSize); err != nil { + return nil, fmt.Errorf("error reading footer: %v", err) + } + if opts.telemetry != nil && opts.telemetry.GetFooterLatency != nil { + opts.telemetry.GetFooterLatency(start) + } + + var allErr []error + var found bool + var r *Reader + for _, d := range decompressors { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + maybeTocBytes := footer[:fOffset] + _, tocOffset, tocSize, err := d.ParseFooter(footer[fOffset:]) + if err != nil { + allErr = append(allErr, err) + continue + } + if tocSize <= 0 { + tocSize = sr.Size() - tocOffset - fSize + } + if tocSize < int64(len(maybeTocBytes)) { + maybeTocBytes = maybeTocBytes[:tocSize] + } + r, err = parseTOC(d, sr, tocOffset, tocSize, maybeTocBytes, opts) + if err == nil { + found = true + break + } + allErr = append(allErr, err) + } + if !found { + return nil, errorutil.Aggregate(allErr) + } + if err := r.initFields(); err != nil { + return nil, fmt.Errorf("failed to initialize fields of entries: %v", err) + } + return r, nil +} + +// OpenFooter extracts and parses footer from the given blob. +// only supports gzip-based eStargz. +func OpenFooter(sr *io.SectionReader) (tocOffset int64, footerSize int64, rErr error) { + if sr.Size() < FooterSize && sr.Size() < legacyFooterSize { + return 0, 0, fmt.Errorf("blob size %d is smaller than the footer size", sr.Size()) + } + var footer [FooterSize]byte + if _, err := sr.ReadAt(footer[:], sr.Size()-FooterSize); err != nil { + return 0, 0, fmt.Errorf("error reading footer: %v", err) + } + var allErr []error + for _, d := range []Decompressor{new(GzipDecompressor), new(LegacyGzipDecompressor)} { + fSize := d.FooterSize() + fOffset := positive(int64(len(footer)) - fSize) + _, tocOffset, _, err := d.ParseFooter(footer[fOffset:]) + if err == nil { + return tocOffset, fSize, err + } + allErr = append(allErr, err) + } + return 0, 0, errorutil.Aggregate(allErr) +} + +// initFields populates the Reader from r.toc after decoding it from +// JSON. +// +// Unexported fields are populated and TOCEntry fields that were +// implicit in the JSON are populated. +func (r *Reader) initFields() error { + r.m = make(map[string]*TOCEntry, len(r.toc.Entries)) + r.chunks = make(map[string][]*TOCEntry) + var lastPath string + uname := map[int]string{} + gname := map[int]string{} + var lastRegEnt *TOCEntry + for _, ent := range r.toc.Entries { + ent.Name = cleanEntryName(ent.Name) + if ent.Type == "reg" { + lastRegEnt = ent + } + if ent.Type == "chunk" { + ent.Name = lastPath + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + if ent.ChunkSize == 0 && lastRegEnt != nil { + ent.ChunkSize = lastRegEnt.Size - ent.ChunkOffset + } + } else { + lastPath = ent.Name + + if ent.Uname != "" { + uname[ent.UID] = ent.Uname + } else { + ent.Uname = uname[ent.UID] + } + if ent.Gname != "" { + gname[ent.GID] = ent.Gname + } else { + ent.Gname = uname[ent.GID] + } + + ent.modTime, _ = time.Parse(time.RFC3339, ent.ModTime3339) + + if ent.Type == "dir" { + ent.NumLink++ // Parent dir links to this directory + } + r.m[ent.Name] = ent + } + if ent.Type == "reg" && ent.ChunkSize > 0 && ent.ChunkSize < ent.Size { + r.chunks[ent.Name] = make([]*TOCEntry, 0, ent.Size/ent.ChunkSize+1) + r.chunks[ent.Name] = append(r.chunks[ent.Name], ent) + } + if ent.ChunkSize == 0 && ent.Size != 0 { + ent.ChunkSize = ent.Size + } + } + + // Populate children, add implicit directories: + for _, ent := range r.toc.Entries { + if ent.Type == "chunk" { + continue + } + // add "foo/": + // add "foo" child to "" (creating "" if necessary) + // + // add "foo/bar/": + // add "bar" child to "foo" (creating "foo" if necessary) + // + // add "foo/bar.txt": + // add "bar.txt" child to "foo" (creating "foo" if necessary) + // + // add "a/b/c/d/e/f.txt": + // create "a/b/c/d/e" node + // add "f.txt" child to "e" + + name := ent.Name + pdirName := parentDir(name) + if name == pdirName { + // This entry and its parent are the same. + // Ignore this for avoiding infinite loop of the reference. + // The example case where this can occur is when tar contains the root + // directory itself (e.g. "./", "/"). + continue + } + pdir := r.getOrCreateDir(pdirName) + ent.NumLink++ // at least one name(ent.Name) references this entry. + if ent.Type == "hardlink" { + org, err := r.getSource(ent) + if err != nil { + return err + } + org.NumLink++ // original entry is referenced by this ent.Name. + ent = org + } + pdir.addChild(path.Base(name), ent) + } + + lastOffset := r.sr.Size() + for i := len(r.toc.Entries) - 1; i >= 0; i-- { + e := r.toc.Entries[i] + if e.isDataType() { + e.nextOffset = lastOffset + } + if e.Offset != 0 { + lastOffset = e.Offset + } + } + + return nil +} + +func (r *Reader) getSource(ent *TOCEntry) (_ *TOCEntry, err error) { + if ent.Type == "hardlink" { + org, ok := r.m[cleanEntryName(ent.LinkName)] + if !ok { + return nil, fmt.Errorf("%q is a hardlink but the linkname %q isn't found", ent.Name, ent.LinkName) + } + ent, err = r.getSource(org) + if err != nil { + return nil, err + } + } + return ent, nil +} + +func parentDir(p string) string { + dir, _ := path.Split(p) + return strings.TrimSuffix(dir, "/") +} + +func (r *Reader) getOrCreateDir(d string) *TOCEntry { + e, ok := r.m[d] + if !ok { + e = &TOCEntry{ + Name: d, + Type: "dir", + Mode: 0755, + NumLink: 2, // The directory itself(.) and the parent link to this directory. + } + r.m[d] = e + if d != "" { + pdir := r.getOrCreateDir(parentDir(d)) + pdir.addChild(path.Base(d), e) + } + } + return e +} + +func (r *Reader) TOCDigest() digest.Digest { + return r.tocDigest +} + +// VerifyTOC checks that the TOC JSON in the passed blob matches the +// passed digests and that the TOC JSON contains digests for all chunks +// contained in the blob. If the verification succceeds, this function +// returns TOCEntryVerifier which holds all chunk digests in the stargz blob. +func (r *Reader) VerifyTOC(tocDigest digest.Digest) (TOCEntryVerifier, error) { + // Verify the digest of TOC JSON + if r.tocDigest != tocDigest { + return nil, fmt.Errorf("invalid TOC JSON %q; want %q", r.tocDigest, tocDigest) + } + return r.Verifiers() +} + +// Verifiers returns TOCEntryVerifier of this chunk. Use VerifyTOC instead in most cases +// because this doesn't verify TOC. +func (r *Reader) Verifiers() (TOCEntryVerifier, error) { + chunkDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the chunk digest + regDigestMap := make(map[int64]digest.Digest) // map from chunk offset to the reg file digest + var chunkDigestMapIncomplete bool + var regDigestMapIncomplete bool + var containsChunk bool + for _, e := range r.toc.Entries { + if e.Type != "reg" && e.Type != "chunk" { + continue + } + + // offset must be unique in stargz blob + _, dOK := chunkDigestMap[e.Offset] + _, rOK := regDigestMap[e.Offset] + if dOK || rOK { + return nil, fmt.Errorf("offset %d found twice", e.Offset) + } + + if e.Type == "reg" { + if e.Size == 0 { + continue // ignores empty file + } + + // record the digest of regular file payload + if e.Digest != "" { + d, err := digest.Parse(e.Digest) + if err != nil { + return nil, fmt.Errorf("failed to parse regular file digest %q: %w", e.Digest, err) + } + regDigestMap[e.Offset] = d + } else { + regDigestMapIncomplete = true + } + } else { + containsChunk = true // this layer contains "chunk" entries. + } + + // "reg" also can contain ChunkDigest (e.g. when "reg" is the first entry of + // chunked file) + if e.ChunkDigest != "" { + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, fmt.Errorf("failed to parse chunk digest %q: %w", e.ChunkDigest, err) + } + chunkDigestMap[e.Offset] = d + } else { + chunkDigestMapIncomplete = true + } + } + + if chunkDigestMapIncomplete { + // Though some chunk digests are not found, if this layer doesn't contain + // "chunk"s and all digest of "reg" files are recorded, we can use them instead. + if !containsChunk && !regDigestMapIncomplete { + return &verifier{digestMap: regDigestMap}, nil + } + return nil, fmt.Errorf("some ChunkDigest not found in TOC JSON") + } + + return &verifier{digestMap: chunkDigestMap}, nil +} + +// verifier is an implementation of TOCEntryVerifier which holds verifiers keyed by +// offset of the chunk. +type verifier struct { + digestMap map[int64]digest.Digest + digestMapMu sync.Mutex +} + +// Verifier returns a content verifier specified by TOCEntry. +func (v *verifier) Verifier(ce *TOCEntry) (digest.Verifier, error) { + v.digestMapMu.Lock() + defer v.digestMapMu.Unlock() + d, ok := v.digestMap[ce.Offset] + if !ok { + return nil, fmt.Errorf("verifier for offset=%d,size=%d hasn't been registered", + ce.Offset, ce.ChunkSize) + } + return d.Verifier(), nil +} + +// ChunkEntryForOffset returns the TOCEntry containing the byte of the +// named file at the given offset within the file. +// Name must be absolute path or one that is relative to root. +func (r *Reader) ChunkEntryForOffset(name string, offset int64) (e *TOCEntry, ok bool) { + name = cleanEntryName(name) + e, ok = r.Lookup(name) + if !ok || !e.isDataType() { + return nil, false + } + ents := r.chunks[name] + if len(ents) < 2 { + if offset >= e.ChunkSize { + return nil, false + } + return e, true + } + i := sort.Search(len(ents), func(i int) bool { + e := ents[i] + return e.ChunkOffset >= offset || (offset > e.ChunkOffset && offset < e.ChunkOffset+e.ChunkSize) + }) + if i == len(ents) { + return nil, false + } + return ents[i], true +} + +// Lookup returns the Table of Contents entry for the given path. +// +// To get the root directory, use the empty string. +// Path must be absolute path or one that is relative to root. +func (r *Reader) Lookup(path string) (e *TOCEntry, ok bool) { + path = cleanEntryName(path) + if r == nil { + return + } + e, ok = r.m[path] + if ok && e.Type == "hardlink" { + var err error + e, err = r.getSource(e) + if err != nil { + return nil, false + } + } + return +} + +// OpenFile returns the reader of the specified file payload. +// +// Name must be absolute path or one that is relative to root. +func (r *Reader) OpenFile(name string) (*io.SectionReader, error) { + name = cleanEntryName(name) + ent, ok := r.Lookup(name) + if !ok { + // TODO: come up with some error plan. This is lazy: + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: os.ErrNotExist, + } + } + if ent.Type != "reg" { + return nil, &os.PathError{ + Path: name, + Op: "OpenFile", + Err: errors.New("not a regular file"), + } + } + fr := &fileReader{ + r: r, + size: ent.Size, + ents: r.getChunks(ent), + } + return io.NewSectionReader(fr, 0, fr.size), nil +} + +func (r *Reader) getChunks(ent *TOCEntry) []*TOCEntry { + if ents, ok := r.chunks[ent.Name]; ok { + return ents + } + return []*TOCEntry{ent} +} + +type fileReader struct { + r *Reader + size int64 + ents []*TOCEntry // 1 or more reg/chunk entries +} + +func (fr *fileReader) ReadAt(p []byte, off int64) (n int, err error) { + if off >= fr.size { + return 0, io.EOF + } + if off < 0 { + return 0, errors.New("invalid offset") + } + var i int + if len(fr.ents) > 1 { + i = sort.Search(len(fr.ents), func(i int) bool { + return fr.ents[i].ChunkOffset >= off + }) + if i == len(fr.ents) { + i = len(fr.ents) - 1 + } + } + ent := fr.ents[i] + if ent.ChunkOffset > off { + if i == 0 { + return 0, errors.New("internal error; first chunk offset is non-zero") + } + ent = fr.ents[i-1] + } + + // If ent is a chunk of a large file, adjust the ReadAt + // offset by the chunk's offset. + off -= ent.ChunkOffset + + finalEnt := fr.ents[len(fr.ents)-1] + compressedOff := ent.Offset + // compressedBytesRemain is the number of compressed bytes in this + // file remaining, over 1+ chunks. + compressedBytesRemain := finalEnt.NextOffset() - compressedOff + + sr := io.NewSectionReader(fr.r.sr, compressedOff, compressedBytesRemain) + + const maxRead = 2 << 20 + var bufSize = maxRead + if compressedBytesRemain < maxRead { + bufSize = int(compressedBytesRemain) + } + + br := bufio.NewReaderSize(sr, bufSize) + if _, err := br.Peek(bufSize); err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.peek: %v", err) + } + + dr, err := fr.r.decompressor.Reader(br) + if err != nil { + return 0, fmt.Errorf("fileReader.ReadAt.decompressor.Reader: %v", err) + } + defer dr.Close() + if n, err := io.CopyN(ioutil.Discard, dr, off); n != off || err != nil { + return 0, fmt.Errorf("discard of %d bytes = %v, %v", off, n, err) + } + return io.ReadFull(dr, p) +} + +// A Writer writes stargz files. +// +// Use NewWriter to create a new Writer. +type Writer struct { + bw *bufio.Writer + cw *countWriter + toc *JTOC + diffHash hash.Hash // SHA-256 of uncompressed tar + + closed bool + gz io.WriteCloser + lastUsername map[int]string + lastGroupname map[int]string + compressor Compressor + + // ChunkSize optionally controls the maximum number of bytes + // of data of a regular file that can be written in one gzip + // stream before a new gzip stream is started. + // Zero means to use a default, currently 4 MiB. + ChunkSize int +} + +// currentCompressionWriter writes to the current w.gz field, which can +// change throughout writing a tar entry. +// +// Additionally, it updates w's SHA-256 of the uncompressed bytes +// of the tar file. +type currentCompressionWriter struct{ w *Writer } + +func (ccw currentCompressionWriter) Write(p []byte) (int, error) { + ccw.w.diffHash.Write(p) + if ccw.w.gz == nil { + if err := ccw.w.condOpenGz(); err != nil { + return 0, err + } + } + return ccw.w.gz.Write(p) +} + +func (w *Writer) chunkSize() int { + if w.ChunkSize <= 0 { + return 4 << 20 + } + return w.ChunkSize +} + +// Unpack decompresses the given estargz blob and returns a ReadCloser of the tar blob. +// TOC JSON and footer are removed. +func Unpack(sr *io.SectionReader, c Decompressor) (io.ReadCloser, error) { + footerSize := c.FooterSize() + if sr.Size() < footerSize { + return nil, fmt.Errorf("blob is too small; %d < %d", sr.Size(), footerSize) + } + footerOffset := sr.Size() - footerSize + footer := make([]byte, footerSize) + if _, err := sr.ReadAt(footer, footerOffset); err != nil { + return nil, err + } + blobPayloadSize, _, _, err := c.ParseFooter(footer) + if err != nil { + return nil, fmt.Errorf("failed to parse footer: %w", err) + } + return c.Reader(io.LimitReader(sr, blobPayloadSize)) +} + +// NewWriter returns a new stargz writer (gzip-based) writing to w. +// +// The writer must be closed to write its trailing table of contents. +func NewWriter(w io.Writer) *Writer { + return NewWriterLevel(w, gzip.BestCompression) +} + +// NewWriterLevel returns a new stargz writer (gzip-based) writing to w. +// The compression level is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterLevel(w io.Writer, compressionLevel int) *Writer { + return NewWriterWithCompressor(w, NewGzipCompressorWithLevel(compressionLevel)) +} + +// NewWriterWithCompressor returns a new stargz writer writing to w. +// The compression method is configurable. +// +// The writer must be closed to write its trailing table of contents. +func NewWriterWithCompressor(w io.Writer, c Compressor) *Writer { + bw := bufio.NewWriter(w) + cw := &countWriter{w: bw} + return &Writer{ + bw: bw, + cw: cw, + toc: &JTOC{Version: 1}, + diffHash: sha256.New(), + compressor: c, + } +} + +// Close writes the stargz's table of contents and flushes all the +// buffers, returning any error. +func (w *Writer) Close() (digest.Digest, error) { + if w.closed { + return "", nil + } + defer func() { w.closed = true }() + + if err := w.closeGz(); err != nil { + return "", err + } + + // Write the TOC index and footer. + tocDigest, err := w.compressor.WriteTOCAndFooter(w.cw, w.cw.n, w.toc, w.diffHash) + if err != nil { + return "", err + } + if err := w.bw.Flush(); err != nil { + return "", err + } + + return tocDigest, nil +} + +func (w *Writer) closeGz() error { + if w.closed { + return errors.New("write on closed Writer") + } + if w.gz != nil { + if err := w.gz.Close(); err != nil { + return err + } + w.gz = nil + } + return nil +} + +// nameIfChanged returns name, unless it was the already the value of (*mp)[id], +// in which case it returns the empty string. +func (w *Writer) nameIfChanged(mp *map[int]string, id int, name string) string { + if name == "" { + return "" + } + if *mp == nil { + *mp = make(map[int]string) + } + if (*mp)[id] == name { + return "" + } + (*mp)[id] = name + return name +} + +func (w *Writer) condOpenGz() (err error) { + if w.gz == nil { + w.gz, err = w.compressor.Writer(w.cw) + } + return +} + +// AppendTar reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +func (w *Writer) AppendTar(r io.Reader) error { + return w.appendTar(r, false) +} + +// AppendTarLossLess reads the tar or tar.gz file from r and appends +// each of its contents to w. +// +// The input r can optionally be gzip compressed but the output will +// always be compressed by the specified compressor. +// +// The difference of this func with AppendTar is that this writes +// the input tar stream into w without any modification (e.g. to header bytes). +// +// Note that if the input tar stream already contains TOC JSON, this returns +// error because w cannot overwrite the TOC JSON to the one generated by w without +// lossy modification. To avoid this error, if the input stream is known to be stargz/estargz, +// you shoud decompress it and remove TOC JSON in advance. +func (w *Writer) AppendTarLossLess(r io.Reader) error { + return w.appendTar(r, true) +} + +func (w *Writer) appendTar(r io.Reader, lossless bool) error { + var src io.Reader + br := bufio.NewReader(r) + if isGzip(br) { + zr, _ := gzip.NewReader(br) + src = zr + } else { + src = io.Reader(br) + } + dst := currentCompressionWriter{w} + var tw *tar.Writer + if !lossless { + tw = tar.NewWriter(dst) // use tar writer only when this isn't lossless mode. + } + tr := tar.NewReader(src) + if lossless { + tr.RawAccounting = true + } + for { + h, err := tr.Next() + if err == io.EOF { + if lossless { + if remain := tr.RawBytes(); len(remain) > 0 { + // Collect the remaining null bytes. + // https://github.com/vbatts/tar-split/blob/80a436fd6164c557b131f7c59ed69bd81af69761/concept/main.go#L49-L53 + if _, err := dst.Write(remain); err != nil { + return err + } + } + } + break + } + if err != nil { + return fmt.Errorf("error reading from source tar: tar.Reader.Next: %v", err) + } + if cleanEntryName(h.Name) == TOCTarName { + // It is possible for a layer to be "stargzified" twice during the + // distribution lifecycle. So we reserve "TOCTarName" here to avoid + // duplicated entries in the resulting layer. + if lossless { + // We cannot handle this in lossless way. + return fmt.Errorf("existing TOC JSON is not allowed; decompress layer before append") + } + continue + } + + xattrs := make(map[string][]byte) + const xattrPAXRecordsPrefix = "SCHILY.xattr." + if h.PAXRecords != nil { + for k, v := range h.PAXRecords { + if strings.HasPrefix(k, xattrPAXRecordsPrefix) { + xattrs[k[len(xattrPAXRecordsPrefix):]] = []byte(v) + } + } + } + ent := &TOCEntry{ + Name: h.Name, + Mode: h.Mode, + UID: h.Uid, + GID: h.Gid, + Uname: w.nameIfChanged(&w.lastUsername, h.Uid, h.Uname), + Gname: w.nameIfChanged(&w.lastGroupname, h.Gid, h.Gname), + ModTime3339: formatModtime(h.ModTime), + Xattrs: xattrs, + } + if err := w.condOpenGz(); err != nil { + return err + } + if tw != nil { + if err := tw.WriteHeader(h); err != nil { + return err + } + } else { + if _, err := dst.Write(tr.RawBytes()); err != nil { + return err + } + } + switch h.Typeflag { + case tar.TypeLink: + ent.Type = "hardlink" + ent.LinkName = h.Linkname + case tar.TypeSymlink: + ent.Type = "symlink" + ent.LinkName = h.Linkname + case tar.TypeDir: + ent.Type = "dir" + case tar.TypeReg: + ent.Type = "reg" + ent.Size = h.Size + case tar.TypeChar: + ent.Type = "char" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeBlock: + ent.Type = "block" + ent.DevMajor = int(h.Devmajor) + ent.DevMinor = int(h.Devminor) + case tar.TypeFifo: + ent.Type = "fifo" + default: + return fmt.Errorf("unsupported input tar entry %q", h.Typeflag) + } + + // We need to keep a reference to the TOC entry for regular files, so that we + // can fill the digest later. + var regFileEntry *TOCEntry + var payloadDigest digest.Digester + if h.Typeflag == tar.TypeReg { + regFileEntry = ent + payloadDigest = digest.Canonical.Digester() + } + + if h.Typeflag == tar.TypeReg && ent.Size > 0 { + var written int64 + totalSize := ent.Size // save it before we destroy ent + tee := io.TeeReader(tr, payloadDigest.Hash()) + for written < totalSize { + if err := w.closeGz(); err != nil { + return err + } + + chunkSize := int64(w.chunkSize()) + remain := totalSize - written + if remain < chunkSize { + chunkSize = remain + } else { + ent.ChunkSize = chunkSize + } + ent.Offset = w.cw.n + ent.ChunkOffset = written + chunkDigest := digest.Canonical.Digester() + + if err := w.condOpenGz(); err != nil { + return err + } + + teeChunk := io.TeeReader(tee, chunkDigest.Hash()) + var out io.Writer + if tw != nil { + out = tw + } else { + out = dst + } + if _, err := io.CopyN(out, teeChunk, chunkSize); err != nil { + return fmt.Errorf("error copying %q: %v", h.Name, err) + } + ent.ChunkDigest = chunkDigest.Digest().String() + w.toc.Entries = append(w.toc.Entries, ent) + written += chunkSize + ent = &TOCEntry{ + Name: h.Name, + Type: "chunk", + } + } + } else { + w.toc.Entries = append(w.toc.Entries, ent) + } + if payloadDigest != nil { + regFileEntry.Digest = payloadDigest.Digest().String() + } + if tw != nil { + if err := tw.Flush(); err != nil { + return err + } + } + } + remainDest := ioutil.Discard + if lossless { + remainDest = dst // Preserve the remaining bytes in lossless mode + } + _, err := io.Copy(remainDest, src) + return err +} + +// DiffID returns the SHA-256 of the uncompressed tar bytes. +// It is only valid to call DiffID after Close. +func (w *Writer) DiffID() string { + return fmt.Sprintf("sha256:%x", w.diffHash.Sum(nil)) +} + +func maxFooterSize(blobSize int64, decompressors ...Decompressor) (res int64) { + for _, d := range decompressors { + if s := d.FooterSize(); res < s && s <= blobSize { + res = s + } + } + return +} + +func parseTOC(d Decompressor, sr *io.SectionReader, tocOff, tocSize int64, tocBytes []byte, opts openOpts) (*Reader, error) { + if len(tocBytes) > 0 { + start := time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err == nil { + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil + } + } + + start := time.Now() + tocBytes = make([]byte, tocSize) + if _, err := sr.ReadAt(tocBytes, tocOff); err != nil { + return nil, fmt.Errorf("error reading %d byte TOC targz: %v", len(tocBytes), err) + } + if opts.telemetry != nil && opts.telemetry.GetTocLatency != nil { + opts.telemetry.GetTocLatency(start) + } + start = time.Now() + toc, tocDgst, err := d.ParseTOC(bytes.NewReader(tocBytes)) + if err != nil { + return nil, err + } + if opts.telemetry != nil && opts.telemetry.DeserializeTocLatency != nil { + opts.telemetry.DeserializeTocLatency(start) + } + return &Reader{ + sr: sr, + toc: toc, + tocDigest: tocDgst, + decompressor: d, + }, nil +} + +func formatModtime(t time.Time) string { + if t.IsZero() || t.Unix() == 0 { + return "" + } + return t.UTC().Round(time.Second).Format(time.RFC3339) +} + +func cleanEntryName(name string) string { + // Use path.Clean to consistently deal with path separators across platforms. + return strings.TrimPrefix(path.Clean("/"+name), "/") +} + +// countWriter counts how many bytes have been written to its wrapped +// io.Writer. +type countWriter struct { + w io.Writer + n int64 +} + +func (cw *countWriter) Write(p []byte) (n int, err error) { + n, err = cw.w.Write(p) + cw.n += int64(n) + return +} + +// isGzip reports whether br is positioned right before an upcoming gzip stream. +// It does not consume any bytes from br. +func isGzip(br *bufio.Reader) bool { + const ( + gzipID1 = 0x1f + gzipID2 = 0x8b + gzipDeflate = 8 + ) + peek, _ := br.Peek(3) + return len(peek) >= 3 && peek[0] == gzipID1 && peek[1] == gzipID2 && peek[2] == gzipDeflate +} + +func positive(n int64) int64 { + if n < 0 { + return 0 + } + return n +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go new file mode 100644 index 0000000000..591d7a62e1 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/gzip.go @@ -0,0 +1,237 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "encoding/binary" + "encoding/json" + "fmt" + "hash" + "io" + "strconv" + + digest "github.com/opencontainers/go-digest" +) + +type gzipCompression struct { + *GzipCompressor + *GzipDecompressor +} + +func newGzipCompressionWithLevel(level int) Compression { + return &gzipCompression{ + &GzipCompressor{level}, + &GzipDecompressor{}, + } +} + +func NewGzipCompressor() *GzipCompressor { + return &GzipCompressor{gzip.BestCompression} +} + +func NewGzipCompressorWithLevel(level int) *GzipCompressor { + return &GzipCompressor{level} +} + +type GzipCompressor struct { + compressionLevel int +} + +func (gc *GzipCompressor) Writer(w io.Writer) (io.WriteCloser, error) { + return gzip.NewWriterLevel(w, gc.compressionLevel) +} + +func (gc *GzipCompressor) WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (digest.Digest, error) { + tocJSON, err := json.MarshalIndent(toc, "", "\t") + if err != nil { + return "", err + } + gz, _ := gzip.NewWriterLevel(w, gc.compressionLevel) + gw := io.Writer(gz) + if diffHash != nil { + gw = io.MultiWriter(gz, diffHash) + } + tw := tar.NewWriter(gw) + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: TOCTarName, + Size: int64(len(tocJSON)), + }); err != nil { + return "", err + } + if _, err := tw.Write(tocJSON); err != nil { + return "", err + } + + if err := tw.Close(); err != nil { + return "", err + } + if err := gz.Close(); err != nil { + return "", err + } + if _, err := w.Write(gzipFooterBytes(off)); err != nil { + return "", err + } + return digest.FromBytes(tocJSON), nil +} + +// gzipFooterBytes returns the 51 bytes footer. +func gzipFooterBytes(tocOff int64) []byte { + buf := bytes.NewBuffer(make([]byte, 0, FooterSize)) + gz, _ := gzip.NewWriterLevel(buf, gzip.NoCompression) // MUST be NoCompression to keep 51 bytes + + // Extra header indicating the offset of TOCJSON + // https://tools.ietf.org/html/rfc1952#section-2.3.1.1 + header := make([]byte, 4) + header[0], header[1] = 'S', 'G' + subfield := fmt.Sprintf("%016xSTARGZ", tocOff) + binary.LittleEndian.PutUint16(header[2:4], uint16(len(subfield))) // little-endian per RFC1952 + gz.Header.Extra = append(header, []byte(subfield)...) + gz.Close() + if buf.Len() != FooterSize { + panic(fmt.Sprintf("footer buffer = %d, not %d", buf.Len(), FooterSize)) + } + return buf.Bytes() +} + +type GzipDecompressor struct{} + +func (gz *GzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *GzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *GzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != FooterSize { + return 0, 0, 0, fmt.Errorf("invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, err + } + defer zr.Close() + extra := zr.Header.Extra + si1, si2, subfieldlen, subfield := extra[0], extra[1], extra[2:4], extra[4:] + if si1 != 'S' || si2 != 'G' { + return 0, 0, 0, fmt.Errorf("invalid subfield IDs: %q, %q; want E, S", si1, si2) + } + if slen := binary.LittleEndian.Uint16(subfieldlen); slen != uint16(16+len("STARGZ")) { + return 0, 0, 0, fmt.Errorf("invalid length of subfield %d; want %d", slen, 16+len("STARGZ")) + } + if string(subfield[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("STARGZ magic string must be included in the footer subfield") + } + tocOffset, err = strconv.ParseInt(string(subfield[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *GzipDecompressor) FooterSize() int64 { + return FooterSize +} + +func (gz *GzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +type LegacyGzipDecompressor struct{} + +func (gz *LegacyGzipDecompressor) Reader(r io.Reader) (io.ReadCloser, error) { + return gzip.NewReader(r) +} + +func (gz *LegacyGzipDecompressor) ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + return parseTOCEStargz(r) +} + +func (gz *LegacyGzipDecompressor) ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) { + if len(p) != legacyFooterSize { + return 0, 0, 0, fmt.Errorf("legacy: invalid length %d cannot be parsed", len(p)) + } + zr, err := gzip.NewReader(bytes.NewReader(p)) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to get footer gzip reader: %w", err) + } + defer zr.Close() + extra := zr.Header.Extra + if len(extra) != 16+len("STARGZ") { + return 0, 0, 0, fmt.Errorf("legacy: invalid stargz's extra field size") + } + if string(extra[16:]) != "STARGZ" { + return 0, 0, 0, fmt.Errorf("legacy: magic string STARGZ not found") + } + tocOffset, err = strconv.ParseInt(string(extra[:16]), 16, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("legacy: failed to parse toc offset: %w", err) + } + return tocOffset, tocOffset, 0, nil +} + +func (gz *LegacyGzipDecompressor) FooterSize() int64 { + return legacyFooterSize +} + +func (gz *LegacyGzipDecompressor) DecompressTOC(r io.Reader) (tocJSON io.ReadCloser, err error) { + return decompressTOCEStargz(r) +} + +func parseTOCEStargz(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) { + tr, err := decompressTOCEStargz(r) + if err != nil { + return nil, "", err + } + dgstr := digest.Canonical.Digester() + toc = new(JTOC) + if err := json.NewDecoder(io.TeeReader(tr, dgstr.Hash())).Decode(&toc); err != nil { + return nil, "", fmt.Errorf("error decoding TOC JSON: %v", err) + } + if err := tr.Close(); err != nil { + return nil, "", err + } + return toc, dgstr.Digest(), nil +} + +func decompressTOCEStargz(r io.Reader) (tocJSON io.ReadCloser, err error) { + zr, err := gzip.NewReader(r) + if err != nil { + return nil, fmt.Errorf("malformed TOC gzip header: %v", err) + } + zr.Multistream(false) + tr := tar.NewReader(zr) + h, err := tr.Next() + if err != nil { + return nil, fmt.Errorf("failed to find tar header in TOC gzip stream: %v", err) + } + if h.Name != TOCTarName { + return nil, fmt.Errorf("TOC tar entry had name %q; expected %q", h.Name, TOCTarName) + } + return readCloser{tr, zr.Close}, nil +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go new file mode 100644 index 0000000000..1de13a4705 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/testutil.go @@ -0,0 +1,2009 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "bytes" + "compress/gzip" + "crypto/sha256" + "encoding/json" + "errors" + "fmt" + "io" + "io/ioutil" + "os" + "reflect" + "sort" + "strings" + "testing" + "time" + + "github.com/containerd/stargz-snapshotter/estargz/errorutil" + "github.com/klauspost/compress/zstd" + digest "github.com/opencontainers/go-digest" +) + +// TestingController is Compression with some helper methods necessary for testing. +type TestingController interface { + Compression + CountStreams(*testing.T, []byte) int + DiffIDOf(*testing.T, []byte) string + String() string +} + +// CompressionTestSuite tests this pkg with controllers can build valid eStargz blobs and parse them. +func CompressionTestSuite(t *testing.T, controllers ...TestingController) { + t.Run("testBuild", func(t *testing.T) { t.Parallel(); testBuild(t, controllers...) }) + t.Run("testDigestAndVerify", func(t *testing.T) { t.Parallel(); testDigestAndVerify(t, controllers...) }) + t.Run("testWriteAndOpen", func(t *testing.T) { t.Parallel(); testWriteAndOpen(t, controllers...) }) +} + +const ( + uncompressedType int = iota + gzipType + zstdType +) + +var srcCompressions = []int{ + uncompressedType, + gzipType, + zstdType, +} + +var allowedPrefix = [4]string{"", "./", "/", "../"} + +// testBuild tests the resulting stargz blob built by this pkg has the same +// contents as the normal stargz blob. +func testBuild(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + chunkSize int + in []tarEntry + }{ + { + name: "regfiles and directories", + chunkSize: 4, + in: tarOf( + file("foo", "test1"), + dir("foo2/"), + file("foo2/bar", "test2", xAttr(map[string]string{"test": "sample"})), + ), + }, + { + name: "empty files", + chunkSize: 4, + in: tarOf( + file("foo", "tttttt"), + file("foo_empty", ""), + file("foo2", "tttttt"), + file("foo_empty2", ""), + file("foo3", "tttttt"), + file("foo_empty3", ""), + file("foo4", "tttttt"), + file("foo_empty4", ""), + file("foo5", "tttttt"), + file("foo_empty5", ""), + file("foo6", "tttttt"), + ), + }, + { + name: "various files", + chunkSize: 4, + in: tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "testbartestbar", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + { + name: "no contents", + chunkSize: 4, + in: tarOf( + file("baz.txt", ""), + symlink("barlink", "test/bar.txt"), + dir("test/"), + dir("dev/"), + blockdev("dev/testblock", 3, 4), + fifo("dev/testfifo"), + chardev("dev/testchar1", 5, 6), + file("test/bar.txt", "", xAttr(map[string]string{"test2": "sample2"})), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + chardev("dev/testchar2", 1, 2), + ), + }, + } + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, prefix := range allowedPrefix { + prefix := prefix + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,src=%d,format=%s", cl, prefix, srcCompression, srcTarFormat), func(t *testing.T) { + tarBlob := buildTar(t, tt.in, prefix, srcTarFormat) + // Test divideEntries() + entries, err := sortEntries(tarBlob, nil, nil) // identical order + if err != nil { + t.Fatalf("failed to parse tar: %v", err) + } + var merged []*entry + for _, part := range divideEntries(entries, 4) { + merged = append(merged, part...) + } + if !reflect.DeepEqual(entries, merged) { + for _, e := range entries { + t.Logf("Original: %v", e.header) + } + for _, e := range merged { + t.Logf("Merged: %v", e.header) + } + t.Errorf("divided entries couldn't be merged") + return + } + + // Prepare sample data + wantBuf := new(bytes.Buffer) + sw := NewWriterWithCompressor(wantBuf, cl) + sw.ChunkSize = tt.chunkSize + if err := sw.AppendTar(tarBlob); err != nil { + t.Fatalf("failed to append tar to want stargz: %v", err) + } + if _, err := sw.Close(); err != nil { + t.Fatalf("failed to prepare want stargz: %v", err) + } + wantData := wantBuf.Bytes() + want, err := Open(io.NewSectionReader( + bytes.NewReader(wantData), 0, int64(len(wantData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the want stargz: %v", err) + } + + // Prepare testing data + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(tt.chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to build stargz: %v", err) + } + defer rc.Close() + gotBuf := new(bytes.Buffer) + if _, err := io.Copy(gotBuf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + gotData := gotBuf.Bytes() + got, err := Open(io.NewSectionReader( + bytes.NewReader(gotBuf.Bytes()), 0, int64(len(gotData))), + WithDecompressors(cl), + ) + if err != nil { + t.Fatalf("failed to parse the got stargz: %v", err) + } + + // Check DiffID is properly calculated + rc.Close() + diffID := rc.DiffID() + wantDiffID := cl.DiffIDOf(t, gotData) + if diffID.String() != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + // Compare as stargz + if !isSameVersion(t, cl, wantData, gotData) { + t.Errorf("built stargz hasn't same json") + return + } + if !isSameEntries(t, want, got) { + t.Errorf("built stargz isn't same as the original") + return + } + + // Compare as tar.gz + if !isSameTarGz(t, cl, wantData, gotData) { + t.Errorf("built stargz isn't same tar.gz") + return + } + }) + } + } + } + } + } +} + +func isSameTarGz(t *testing.T, controller TestingController, a, b []byte) bool { + aGz, err := controller.Reader(bytes.NewReader(a)) + if err != nil { + t.Fatalf("failed to read A") + } + defer aGz.Close() + bGz, err := controller.Reader(bytes.NewReader(b)) + if err != nil { + t.Fatalf("failed to read B") + } + defer bGz.Close() + + // Same as tar's Next() method but ignores landmarks and TOCJSON file + next := func(r *tar.Reader) (h *tar.Header, err error) { + for { + if h, err = r.Next(); err != nil { + return + } + if h.Name != PrefetchLandmark && + h.Name != NoPrefetchLandmark && + h.Name != TOCTarName { + return + } + } + } + + aTar := tar.NewReader(aGz) + bTar := tar.NewReader(bGz) + for { + // Fetch and parse next header. + aH, aErr := next(aTar) + bH, bErr := next(bTar) + if aErr != nil || bErr != nil { + if aErr == io.EOF && bErr == io.EOF { + break + } + t.Fatalf("Failed to parse tar file: A: %v, B: %v", aErr, bErr) + } + if !reflect.DeepEqual(aH, bH) { + t.Logf("different header (A = %v; B = %v)", aH, bH) + return false + + } + aFile, err := ioutil.ReadAll(aTar) + if err != nil { + t.Fatal("failed to read tar payload of A") + } + bFile, err := ioutil.ReadAll(bTar) + if err != nil { + t.Fatal("failed to read tar payload of B") + } + if !bytes.Equal(aFile, bFile) { + t.Logf("different tar payload (A = %q; B = %q)", string(a), string(b)) + return false + } + } + + return true +} + +func isSameVersion(t *testing.T, controller TestingController, a, b []byte) bool { + aJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(a), 0, int64(len(a))), controller) + if err != nil { + t.Fatalf("failed to parse A: %v", err) + } + bJTOC, _, err := parseStargz(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), controller) + if err != nil { + t.Fatalf("failed to parse B: %v", err) + } + t.Logf("A: TOCJSON: %v", dumpTOCJSON(t, aJTOC)) + t.Logf("B: TOCJSON: %v", dumpTOCJSON(t, bJTOC)) + return aJTOC.Version == bJTOC.Version +} + +func isSameEntries(t *testing.T, a, b *Reader) bool { + aroot, ok := a.Lookup("") + if !ok { + t.Fatalf("failed to get root of A") + } + broot, ok := b.Lookup("") + if !ok { + t.Fatalf("failed to get root of B") + } + aEntry := stargzEntry{aroot, a} + bEntry := stargzEntry{broot, b} + return contains(t, aEntry, bEntry) && contains(t, bEntry, aEntry) +} + +func compressBlob(t *testing.T, src *io.SectionReader, srcCompression int) *io.SectionReader { + buf := new(bytes.Buffer) + var w io.WriteCloser + var err error + if srcCompression == gzipType { + w = gzip.NewWriter(buf) + } else if srcCompression == zstdType { + w, err = zstd.NewWriter(buf) + if err != nil { + t.Fatalf("failed to init zstd writer: %v", err) + } + } else { + return src + } + src.Seek(0, io.SeekStart) + if _, err := io.Copy(w, src); err != nil { + t.Fatalf("failed to compress source") + } + if err := w.Close(); err != nil { + t.Fatalf("failed to finalize compress source") + } + data := buf.Bytes() + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) + +} + +type stargzEntry struct { + e *TOCEntry + r *Reader +} + +// contains checks if all child entries in "b" are also contained in "a". +// This function also checks if the files/chunks contain the same contents among "a" and "b". +func contains(t *testing.T, a, b stargzEntry) bool { + ae, ar := a.e, a.r + be, br := b.e, b.r + t.Logf("Comparing: %q vs %q", ae.Name, be.Name) + if !equalEntry(ae, be) { + t.Logf("%q != %q: entry: a: %v, b: %v", ae.Name, be.Name, ae, be) + return false + } + if ae.Type == "dir" { + t.Logf("Directory: %q vs %q: %v vs %v", ae.Name, be.Name, + allChildrenName(ae), allChildrenName(be)) + iscontain := true + ae.ForeachChild(func(aBaseName string, aChild *TOCEntry) bool { + // Walk through all files on this stargz file. + + if aChild.Name == PrefetchLandmark || + aChild.Name == NoPrefetchLandmark { + return true // Ignore landmarks + } + + // Ignore a TOCEntry of "./" (formated as "" by stargz lib) on root directory + // because this points to the root directory itself. + if aChild.Name == "" && ae.Name == "" { + return true + } + + bChild, ok := be.LookupChild(aBaseName) + if !ok { + t.Logf("%q (base: %q): not found in b: %v", + ae.Name, aBaseName, allChildrenName(be)) + iscontain = false + return false + } + + childcontain := contains(t, stargzEntry{aChild, a.r}, stargzEntry{bChild, b.r}) + if !childcontain { + t.Logf("%q != %q: non-equal dir", ae.Name, be.Name) + iscontain = false + return false + } + return true + }) + return iscontain + } else if ae.Type == "reg" { + af, err := ar.OpenFile(ae.Name) + if err != nil { + t.Fatalf("failed to open file %q on A: %v", ae.Name, err) + } + bf, err := br.OpenFile(be.Name) + if err != nil { + t.Fatalf("failed to open file %q on B: %v", be.Name, err) + } + + var nr int64 + for nr < ae.Size { + abytes, anext, aok := readOffset(t, af, nr, a) + bbytes, bnext, bok := readOffset(t, bf, nr, b) + if !aok && !bok { + break + } else if !(aok && bok) || anext != bnext { + t.Logf("%q != %q (offset=%d): chunk existence a=%v vs b=%v, anext=%v vs bnext=%v", + ae.Name, be.Name, nr, aok, bok, anext, bnext) + return false + } + nr = anext + if !bytes.Equal(abytes, bbytes) { + t.Logf("%q != %q: different contents %v vs %v", + ae.Name, be.Name, string(abytes), string(bbytes)) + return false + } + } + return true + } + + return true +} + +func allChildrenName(e *TOCEntry) (children []string) { + e.ForeachChild(func(baseName string, _ *TOCEntry) bool { + children = append(children, baseName) + return true + }) + return +} + +func equalEntry(a, b *TOCEntry) bool { + // Here, we selectively compare fileds that we are interested in. + return a.Name == b.Name && + a.Type == b.Type && + a.Size == b.Size && + a.ModTime3339 == b.ModTime3339 && + a.Stat().ModTime().Equal(b.Stat().ModTime()) && // modTime time.Time + a.LinkName == b.LinkName && + a.Mode == b.Mode && + a.UID == b.UID && + a.GID == b.GID && + a.Uname == b.Uname && + a.Gname == b.Gname && + (a.Offset > 0) == (b.Offset > 0) && + (a.NextOffset() > 0) == (b.NextOffset() > 0) && + a.DevMajor == b.DevMajor && + a.DevMinor == b.DevMinor && + a.NumLink == b.NumLink && + reflect.DeepEqual(a.Xattrs, b.Xattrs) && + // chunk-related infomations aren't compared in this function. + // ChunkOffset int64 `json:"chunkOffset,omitempty"` + // ChunkSize int64 `json:"chunkSize,omitempty"` + // children map[string]*TOCEntry + a.Digest == b.Digest +} + +func readOffset(t *testing.T, r *io.SectionReader, offset int64, e stargzEntry) ([]byte, int64, bool) { + ce, ok := e.r.ChunkEntryForOffset(e.e.Name, offset) + if !ok { + return nil, 0, false + } + data := make([]byte, ce.ChunkSize) + t.Logf("Offset: %v, NextOffset: %v", ce.Offset, ce.NextOffset()) + n, err := r.ReadAt(data, ce.ChunkOffset) + if err != nil { + t.Fatalf("failed to read file payload of %q (offset:%d,size:%d): %v", + e.e.Name, ce.ChunkOffset, ce.ChunkSize, err) + } + if int64(n) != ce.ChunkSize { + t.Fatalf("unexpected copied data size %d; want %d", + n, ce.ChunkSize) + } + return data[:n], offset + ce.ChunkSize, true +} + +func dumpTOCJSON(t *testing.T, tocJSON *JTOC) string { + jtocData, err := json.Marshal(*tocJSON) + if err != nil { + t.Fatalf("failed to marshal TOC JSON: %v", err) + } + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, bytes.NewReader(jtocData)); err != nil { + t.Fatalf("failed to read toc json blob: %v", err) + } + return buf.String() +} + +const chunkSize = 3 + +// type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, compressionLevel int) +type check func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) + +// testDigestAndVerify runs specified checks against sample stargz blobs. +func testDigestAndVerify(t *testing.T, controllers ...TestingController) { + tests := []struct { + name string + tarInit func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) + checks []check + }{ + { + name: "no-regfile", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + dir("test/"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + dir("test2/"), // modified + ), allowedPrefix[0])), + }, + }, + { + name: "small-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "bbb", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", ""), + file("foo.txt", "M"), // modified + dir("test/"), + file("test/bar.txt", "bbb"), + ), allowedPrefix[0])), + // checkVerifyInvalidTOCEntryFail("foo.txt"), // TODO + checkVerifyBrokenContentFail("foo.txt"), + }, + }, + { + name: "big-files", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazMMMbazbazbaz"), // modified + file("foo.txt", "a"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + { + name: "with-non-regfiles", + tarInit: func(t *testing.T, dgstMap map[string]digest.Digest) (blob []tarEntry) { + return tarOf( + regDigest(t, "baz.txt", "bazbazbazbazbazbazbaz", dgstMap), + regDigest(t, "foo.txt", "a", dgstMap), + symlink("barlink", "test/bar.txt"), + dir("test/"), + regDigest(t, "test/bar.txt", "testbartestbar", dgstMap), + dir("test2/"), + link("test2/bazlink", "baz.txt"), + ) + }, + checks: []check{ + checkStargzTOC, + checkVerifyTOC, + checkVerifyInvalidStargzFail(buildTar(t, tarOf( + file("baz.txt", "bazbazbazbazbazbazbaz"), + file("foo.txt", "a"), + symlink("barlink", "test/bar.txt"), + dir("test/"), + file("test/bar.txt", "testbartestbar"), + dir("test2/"), + link("test2/bazlink", "foo.txt"), // modified + ), allowedPrefix[0])), + checkVerifyInvalidTOCEntryFail("test/bar.txt"), + checkVerifyBrokenContentFail("test/bar.txt"), + }, + }, + } + + for _, tt := range tests { + for _, srcCompression := range srcCompressions { + srcCompression := srcCompression + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,format=%s", cl, prefix, srcTarFormat), func(t *testing.T) { + // Get original tar file and chunk digests + dgstMap := make(map[string]digest.Digest) + tarBlob := buildTar(t, tt.tarInit(t, dgstMap), prefix, srcTarFormat) + + rc, err := Build(compressBlob(t, tarBlob, srcCompression), + WithChunkSize(chunkSize), WithCompression(cl)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + tocDigest := rc.TOCDigest() + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + newStargz := buf.Bytes() + // NoPrefetchLandmark is added during `Bulid`, which is expected behaviour. + dgstMap[chunkID(NoPrefetchLandmark, 0, int64(len([]byte{landmarkContents})))] = digest.FromBytes([]byte{landmarkContents}) + + for _, check := range tt.checks { + check(t, newStargz, tocDigest, dgstMap, cl) + } + }) + } + } + } + } + } +} + +// checkStargzTOC checks the TOC JSON of the passed stargz has the expected +// digest and contains valid chunks. It walks all entries in the stargz and +// checks all chunk digests stored to the TOC JSON match the actual contents. +func checkStargzTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + digestMapTOC, err := listDigests(io.NewSectionReader( + bytes.NewReader(sgzData), 0, int64(len(sgzData))), + controller, + ) + if err != nil { + t.Fatalf("failed to list digest: %v", err) + } + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + // Check the digest of TOC JSON based on the actual contents + // It's sure that TOC JSON exists in this archive because + // Open succeeded. + dgstr := digest.Canonical.Digester() + if _, err := io.Copy(dgstr.Hash(), tr); err != nil { + t.Fatalf("failed to calculate digest of TOC JSON: %v", + err) + } + if dgstr.Digest() != tocDigest { + t.Errorf("invalid TOC JSON %q; want %q", tocDigest, dgstr.Digest()) + } + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + // Get the original digest to make sure the file contents are kept unchanged + // from the original tar, during the whole conversion steps. + id := chunkID(h.Name, n, ce.ChunkSize) + want, ok := dgstMap[id] + if !ok { + t.Errorf("Unexpected chunk %q(offset=%d,size=%d): %v", + h.Name, n, ce.ChunkSize, dgstMap) + return + } + found[id] = true + + // Check the file contents + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to calculate digest of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if want != dgstr.Digest() { + t.Errorf("Invalid contents in converted stargz %q: %q; want %q", + h.Name, dgstr.Digest(), want) + return + } + + // Check the digest stored in TOC JSON + dgstTOC, ok := digestMapTOC[ce.Offset] + if !ok { + t.Errorf("digest of %q(offset=%d,size=%d,chunkOffset=%d) isn't registered", + h.Name, ce.Offset, ce.ChunkSize, ce.ChunkOffset) + } + if want != dgstTOC { + t.Errorf("Invalid digest in TOCEntry %q: %q; want %q", + h.Name, dgstTOC, want) + return + } + + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyTOC checks the verification works for the TOC JSON of the passed +// stargz. It walks all entries in the stargz and checks the verifications for +// all chunks work. +func checkVerifyTOC(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Errorf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Errorf("failed to verify stargz: %v", err) + return + } + + found := make(map[string]bool) + for id := range dgstMap { + found[id] = false + } + zr, err := controller.Reader(bytes.NewReader(sgzData)) + if err != nil { + t.Fatalf("failed to decompress converted stargz: %v", err) + } + defer zr.Close() + tr := tar.NewReader(zr) + for { + h, err := tr.Next() + if err != nil { + if err != io.EOF { + t.Errorf("failed to read tar entry: %v", err) + return + } + break + } + if h.Name == TOCTarName { + continue + } + if _, ok := sgz.Lookup(h.Name); !ok { + t.Errorf("lost stargz entry %q in the converted TOC", h.Name) + return + } + var n int64 + for n < h.Size { + ce, ok := sgz.ChunkEntryForOffset(h.Name, n) + if !ok { + t.Errorf("lost chunk %q(offset=%d) in the converted TOC", + h.Name, n) + return + } + + v, err := ev.Verifier(ce) + if err != nil { + t.Errorf("failed to get verifier for %q(offset=%d)", h.Name, n) + } + + found[chunkID(h.Name, n, ce.ChunkSize)] = true + + // Check the file contents + if _, err := io.CopyN(v, tr, ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + h.Name, n, ce.ChunkSize) + } + if !v.Verified() { + t.Errorf("Invalid contents in converted stargz %q (should be succeeded)", + h.Name) + return + } + n += ce.ChunkSize + } + } + + for id, ok := range found { + if !ok { + t.Errorf("required chunk %q not found in the converted stargz: %v", id, found) + } + } +} + +// checkVerifyInvalidTOCEntryFail checks if misconfigured TOC JSON can be +// detected during the verification and the verification returns an error. +func checkVerifyInvalidTOCEntryFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + funcs := map[string]rewriteFunc{ + "lost digest in a entry": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var found bool + for _, e := range toc.Entries { + if cleanEntryName(e.Name) == filename { + if e.Type != "reg" && e.Type != "chunk" { + t.Fatalf("entry %q to break must be regfile or chunk", filename) + } + if e.ChunkDigest == "" { + t.Fatalf("entry %q is already invalid", filename) + } + e.ChunkDigest = "" + found = true + } + } + if !found { + t.Fatalf("rewrite target not found") + } + }, + "duplicated entry offset": func(t *testing.T, toc *JTOC, sgz *io.SectionReader) { + var ( + sampleEntry *TOCEntry + targetEntry *TOCEntry + ) + for _, e := range toc.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if cleanEntryName(e.Name) == filename { + targetEntry = e + } else { + sampleEntry = e + } + } + } + if sampleEntry == nil { + t.Fatalf("TOC must contain at least one regfile or chunk entry other than the rewrite target") + } + if targetEntry == nil { + t.Fatalf("rewrite target not found") + } + targetEntry.Offset = sampleEntry.Offset + }, + } + + for name, rFunc := range funcs { + t.Run(name, func(t *testing.T) { + newSgz, newTocDigest := rewriteTOCJSON(t, io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), rFunc, controller) + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, newSgz); err != nil { + t.Fatalf("failed to get converted stargz") + } + isgz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(isgz), 0, int64(len(isgz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(newTocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + }) + } + } +} + +// checkVerifyInvalidStargzFail checks if the verification detects that the +// given stargz file doesn't match to the expected digest and returns error. +func checkVerifyInvalidStargzFail(invalid *io.SectionReader) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + rc, err := Build(invalid, WithChunkSize(chunkSize), WithCompression(controller)) + if err != nil { + t.Fatalf("failed to convert stargz: %v", err) + } + defer rc.Close() + buf := new(bytes.Buffer) + if _, err := io.Copy(buf, rc); err != nil { + t.Fatalf("failed to copy built stargz blob: %v", err) + } + mStargz := buf.Bytes() + + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(mStargz), 0, int64(len(mStargz))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + _, err = sgz.VerifyTOC(tocDigest) + if err == nil { + t.Errorf("must fail for invalid TOC") + return + } + } +} + +// checkVerifyBrokenContentFail checks if the verifier detects broken contents +// that doesn't match to the expected digest and returns error. +func checkVerifyBrokenContentFail(filename string) check { + return func(t *testing.T, sgzData []byte, tocDigest digest.Digest, dgstMap map[string]digest.Digest, controller TestingController) { + // Parse stargz file + sgz, err := Open( + io.NewSectionReader(bytes.NewReader(sgzData), 0, int64(len(sgzData))), + WithDecompressors(controller), + ) + if err != nil { + t.Fatalf("failed to parse converted stargz: %v", err) + return + } + ev, err := sgz.VerifyTOC(tocDigest) + if err != nil { + t.Fatalf("failed to verify stargz: %v", err) + return + } + + // Open the target file + sr, err := sgz.OpenFile(filename) + if err != nil { + t.Fatalf("failed to open file %q", filename) + } + ce, ok := sgz.ChunkEntryForOffset(filename, 0) + if !ok { + t.Fatalf("lost chunk %q(offset=%d) in the converted TOC", filename, 0) + return + } + if ce.ChunkSize == 0 { + t.Fatalf("file mustn't be empty") + return + } + data := make([]byte, ce.ChunkSize) + if _, err := sr.ReadAt(data, ce.ChunkOffset); err != nil { + t.Errorf("failed to get data of a chunk of %q(offset=%q)", + filename, ce.ChunkOffset) + } + + // Check the broken chunk (must fail) + v, err := ev.Verifier(ce) + if err != nil { + t.Fatalf("failed to get verifier for %q", filename) + } + broken := append([]byte{^data[0]}, data[1:]...) + if _, err := io.CopyN(v, bytes.NewReader(broken), ce.ChunkSize); err != nil { + t.Fatalf("failed to get chunk of %q (offset=%d,size=%d)", + filename, ce.ChunkOffset, ce.ChunkSize) + } + if v.Verified() { + t.Errorf("verification must fail for broken file chunk %q(org:%q,broken:%q)", + filename, data, broken) + } + } +} + +func chunkID(name string, offset, size int64) string { + return fmt.Sprintf("%s-%d-%d", cleanEntryName(name), offset, size) +} + +type rewriteFunc func(t *testing.T, toc *JTOC, sgz *io.SectionReader) + +func rewriteTOCJSON(t *testing.T, sgz *io.SectionReader, rewrite rewriteFunc, controller TestingController) (newSgz io.Reader, tocDigest digest.Digest) { + decodedJTOC, jtocOffset, err := parseStargz(sgz, controller) + if err != nil { + t.Fatalf("failed to extract TOC JSON: %v", err) + } + + rewrite(t, decodedJTOC, sgz) + + tocFooter, tocDigest, err := tocAndFooter(controller, decodedJTOC, jtocOffset) + if err != nil { + t.Fatalf("failed to create toc and footer: %v", err) + } + + // Reconstruct stargz file with the modified TOC JSON + if _, err := sgz.Seek(0, io.SeekStart); err != nil { + t.Fatalf("failed to reset the seek position of stargz: %v", err) + } + return io.MultiReader( + io.LimitReader(sgz, jtocOffset), // Original stargz (before TOC JSON) + tocFooter, // Rewritten TOC and footer + ), tocDigest +} + +func listDigests(sgz *io.SectionReader, controller TestingController) (map[int64]digest.Digest, error) { + decodedJTOC, _, err := parseStargz(sgz, controller) + if err != nil { + return nil, err + } + digestMap := make(map[int64]digest.Digest) + for _, e := range decodedJTOC.Entries { + if e.Type == "reg" || e.Type == "chunk" { + if e.Type == "reg" && e.Size == 0 { + continue // ignores empty file + } + if e.ChunkDigest == "" { + return nil, fmt.Errorf("ChunkDigest of %q(off=%d) not found in TOC JSON", + e.Name, e.Offset) + } + d, err := digest.Parse(e.ChunkDigest) + if err != nil { + return nil, err + } + digestMap[e.Offset] = d + } + } + return digestMap, nil +} + +func parseStargz(sgz *io.SectionReader, controller TestingController) (decodedJTOC *JTOC, jtocOffset int64, err error) { + fSize := controller.FooterSize() + footer := make([]byte, fSize) + if _, err := sgz.ReadAt(footer, sgz.Size()-fSize); err != nil { + return nil, 0, fmt.Errorf("error reading footer: %w", err) + } + _, tocOffset, _, err := controller.ParseFooter(footer[positive(int64(len(footer))-fSize):]) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse footer: %w", err) + } + + // Decode the TOC JSON + tocReader := io.NewSectionReader(sgz, tocOffset, sgz.Size()-tocOffset-fSize) + decodedJTOC, _, err = controller.ParseTOC(tocReader) + if err != nil { + return nil, 0, fmt.Errorf("failed to parse TOC: %w", err) + } + return decodedJTOC, tocOffset, nil +} + +func testWriteAndOpen(t *testing.T, controllers ...TestingController) { + const content = "Some contents" + invalidUtf8 := "\xff\xfe\xfd" + + xAttrFile := xAttr{"foo": "bar", "invalid-utf8": invalidUtf8} + sampleOwner := owner{uid: 50, gid: 100} + + tests := []struct { + name string + chunkSize int + in []tarEntry + want []stargzCheck + wantNumGz int // expected number of streams + + wantNumGzLossLess int // expected number of streams (> 0) in lossless mode if it's different from wantNumGz + wantFailOnLossLess bool + }{ + { + name: "empty", + in: tarOf(), + wantNumGz: 2, // empty tar + TOC + footer + wantNumGzLossLess: 3, // empty tar + TOC + footer + want: checks( + numTOCEntries(0), + ), + }, + { + name: "1dir_1empty_file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", ""), + ), + wantNumGz: 3, // dir, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", 0), + entryHasChildren("foo", "bar.txt"), + hasFileDigest("foo/bar.txt", digestFor("")), + ), + }, + { + name: "1dir_1file", + in: tarOf( + dir("foo/"), + file("foo/bar.txt", content, xAttrFile), + ), + wantNumGz: 4, // var dir, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(2), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + hasFileDigest("foo/bar.txt", digestFor(content)), + hasFileContentsRange("foo/bar.txt", 0, content), + hasFileContentsRange("foo/bar.txt", 1, content[1:]), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar.txt"), + hasFileXattrs("foo/bar.txt", "foo", "bar"), + hasFileXattrs("foo/bar.txt", "invalid-utf8", invalidUtf8), + ), + }, + { + name: "2meta_2file", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + }, + { + name: "3dir", + in: tarOf( + dir("bar/"), + dir("foo/"), + dir("foo/bar/"), + ), + wantNumGz: 3, // 3 dirs, TOC, footer + want: checks( + hasDirLinkCount("bar/", 2), + hasDirLinkCount("foo/", 3), + hasDirLinkCount("foo/bar/", 2), + ), + }, + { + name: "symlink", + in: tarOf( + dir("foo/"), + symlink("foo/bar", "../../x"), + ), + wantNumGz: 3, // metas + TOC + footer + want: checks( + numTOCEntries(2), + hasSymlink("foo/bar", "../../x"), + entryHasChildren("", "foo"), + entryHasChildren("foo", "bar"), + ), + }, + { + name: "chunked_file", + chunkSize: 4, + in: tarOf( + dir("foo/"), + file("foo/big.txt", "This "+"is s"+"uch "+"a bi"+"g fi"+"le"), + ), + wantNumGz: 9, + want: checks( + numTOCEntries(7), // 1 for foo dir, 6 for the foo/big.txt file + hasDir("foo/"), + hasFileLen("foo/big.txt", len("This is such a big file")), + hasFileDigest("foo/big.txt", digestFor("This is such a big file")), + hasFileContentsRange("foo/big.txt", 0, "This is such a big file"), + hasFileContentsRange("foo/big.txt", 1, "his is such a big file"), + hasFileContentsRange("foo/big.txt", 2, "is is such a big file"), + hasFileContentsRange("foo/big.txt", 3, "s is such a big file"), + hasFileContentsRange("foo/big.txt", 4, " is such a big file"), + hasFileContentsRange("foo/big.txt", 5, "is such a big file"), + hasFileContentsRange("foo/big.txt", 6, "s such a big file"), + hasFileContentsRange("foo/big.txt", 7, " such a big file"), + hasFileContentsRange("foo/big.txt", 8, "such a big file"), + hasFileContentsRange("foo/big.txt", 9, "uch a big file"), + hasFileContentsRange("foo/big.txt", 10, "ch a big file"), + hasFileContentsRange("foo/big.txt", 11, "h a big file"), + hasFileContentsRange("foo/big.txt", 12, " a big file"), + hasFileContentsRange("foo/big.txt", len("This is such a big file")-1, ""), + hasChunkEntries("foo/big.txt", 6), + ), + }, + { + name: "recursive", + in: tarOf( + dir("/", sampleOwner), + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + ), + wantNumGz: 4, // dirs, bar.txt alone, TOC, footer + want: checks( + maxDepth(2), // 0: root directory, 1: "foo/", 2: "bar.txt" + ), + }, + { + name: "block_char_fifo", + in: tarOf( + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "b", + Typeflag: tar.TypeBlock, + Devmajor: 123, + Devminor: 456, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "c", + Typeflag: tar.TypeChar, + Devmajor: 111, + Devminor: 222, + Format: format, + }) + }), + tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Name: prefix + "f", + Typeflag: tar.TypeFifo, + Format: format, + }) + }), + ), + wantNumGz: 3, + want: checks( + lookupMatch("b", &TOCEntry{Name: "b", Type: "block", DevMajor: 123, DevMinor: 456, NumLink: 1}), + lookupMatch("c", &TOCEntry{Name: "c", Type: "char", DevMajor: 111, DevMinor: 222, NumLink: 1}), + lookupMatch("f", &TOCEntry{Name: "f", Type: "fifo", NumLink: 1}), + ), + }, + { + name: "modes", + in: tarOf( + dir("foo1/", 0755|os.ModeDir|os.ModeSetgid), + file("foo1/bar1", content, 0700|os.ModeSetuid), + file("foo1/bar2", content, 0755|os.ModeSetgid), + dir("foo2/", 0755|os.ModeDir|os.ModeSticky), + file("foo2/bar3", content, 0755|os.ModeSticky), + dir("foo3/", 0755|os.ModeDir), + file("foo3/bar4", content, os.FileMode(0700)), + file("foo3/bar5", content, os.FileMode(0755)), + ), + wantNumGz: 8, // dir, bar1 alone, bar2 alone + dir, bar3 alone + dir, bar4 alone, bar5 alone, TOC, footer + want: checks( + hasMode("foo1/", 0755|os.ModeDir|os.ModeSetgid), + hasMode("foo1/bar1", 0700|os.ModeSetuid), + hasMode("foo1/bar2", 0755|os.ModeSetgid), + hasMode("foo2/", 0755|os.ModeDir|os.ModeSticky), + hasMode("foo2/bar3", 0755|os.ModeSticky), + hasMode("foo3/", 0755|os.ModeDir), + hasMode("foo3/bar4", os.FileMode(0700)), + hasMode("foo3/bar5", os.FileMode(0755)), + ), + }, + { + name: "lossy", + in: tarOf( + dir("bar/", sampleOwner), + dir("foo/", sampleOwner), + file("foo/bar.txt", content, sampleOwner), + file(TOCTarName, "dummy"), // ignored by the writer. (lossless write returns error) + ), + wantNumGz: 4, // both dirs, foo.txt alone, TOC, footer + want: checks( + numTOCEntries(3), + hasDir("bar/"), + hasDir("foo/"), + hasFileLen("foo/bar.txt", len(content)), + entryHasChildren("", "bar", "foo"), + entryHasChildren("foo", "bar.txt"), + hasChunkEntries("foo/bar.txt", 1), + hasEntryOwner("bar/", sampleOwner), + hasEntryOwner("foo/", sampleOwner), + hasEntryOwner("foo/bar.txt", sampleOwner), + ), + wantFailOnLossLess: true, + }, + } + + for _, tt := range tests { + for _, cl := range controllers { + cl := cl + for _, prefix := range allowedPrefix { + prefix := prefix + for _, srcTarFormat := range []tar.Format{tar.FormatUSTAR, tar.FormatPAX, tar.FormatGNU} { + srcTarFormat := srcTarFormat + for _, lossless := range []bool{true, false} { + t.Run(tt.name+"-"+fmt.Sprintf("compression=%v,prefix=%q,lossless=%v,format=%s", cl, prefix, lossless, srcTarFormat), func(t *testing.T) { + var tr io.Reader = buildTar(t, tt.in, prefix, srcTarFormat) + origTarDgstr := digest.Canonical.Digester() + tr = io.TeeReader(tr, origTarDgstr.Hash()) + var stargzBuf bytes.Buffer + w := NewWriterWithCompressor(&stargzBuf, cl) + w.ChunkSize = tt.chunkSize + if lossless { + err := w.AppendTarLossLess(tr) + if tt.wantFailOnLossLess { + if err != nil { + return // expected to fail + } + t.Fatalf("Append wanted to fail on lossless") + } + if err != nil { + t.Fatalf("Append(lossless): %v", err) + } + } else { + if err := w.AppendTar(tr); err != nil { + t.Fatalf("Append: %v", err) + } + } + if _, err := w.Close(); err != nil { + t.Fatalf("Writer.Close: %v", err) + } + b := stargzBuf.Bytes() + + if lossless { + // Check if the result blob reserves original tar metadata + rc, err := Unpack(io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), cl) + if err != nil { + t.Errorf("failed to decompress blob: %v", err) + return + } + defer rc.Close() + resultDgstr := digest.Canonical.Digester() + if _, err := io.Copy(resultDgstr.Hash(), rc); err != nil { + t.Errorf("failed to read result decompressed blob: %v", err) + return + } + if resultDgstr.Digest() != origTarDgstr.Digest() { + t.Errorf("lossy compression occurred: digest=%v; want %v", + resultDgstr.Digest(), origTarDgstr.Digest()) + return + } + } + + diffID := w.DiffID() + wantDiffID := cl.DiffIDOf(t, b) + if diffID != wantDiffID { + t.Errorf("DiffID = %q; want %q", diffID, wantDiffID) + } + + got := cl.CountStreams(t, b) + wantNumGz := tt.wantNumGz + if lossless && tt.wantNumGzLossLess > 0 { + wantNumGz = tt.wantNumGzLossLess + } + if got != wantNumGz { + t.Errorf("number of streams = %d; want %d", got, wantNumGz) + } + + telemetry, checkCalled := newCalledTelemetry() + r, err := Open( + io.NewSectionReader(bytes.NewReader(b), 0, int64(len(b))), + WithDecompressors(cl), + WithTelemetry(telemetry), + ) + if err != nil { + t.Fatalf("stargz.Open: %v", err) + } + if err := checkCalled(); err != nil { + t.Errorf("telemetry failure: %v", err) + } + for _, want := range tt.want { + want.check(t, r) + } + }) + } + } + } + } + } +} + +func newCalledTelemetry() (telemetry *Telemetry, check func() error) { + var getFooterLatencyCalled bool + var getTocLatencyCalled bool + var deserializeTocLatencyCalled bool + return &Telemetry{ + func(time.Time) { getFooterLatencyCalled = true }, + func(time.Time) { getTocLatencyCalled = true }, + func(time.Time) { deserializeTocLatencyCalled = true }, + }, func() error { + var allErr []error + if !getFooterLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetFooterLatency isn't called")) + } + if !getTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics GetTocLatency isn't called")) + } + if !deserializeTocLatencyCalled { + allErr = append(allErr, fmt.Errorf("metrics DeserializeTocLatency isn't called")) + } + return errorutil.Aggregate(allErr) + } +} + +func digestFor(content string) string { + sum := sha256.Sum256([]byte(content)) + return fmt.Sprintf("sha256:%x", sum) +} + +type numTOCEntries int + +func (n numTOCEntries) check(t *testing.T, r *Reader) { + if r.toc == nil { + t.Fatal("nil TOC") + } + if got, want := len(r.toc.Entries), int(n); got != want { + t.Errorf("got %d TOC entries; want %d", got, want) + } + t.Logf("got TOC entries:") + for i, ent := range r.toc.Entries { + entj, _ := json.Marshal(ent) + t.Logf(" [%d]: %s\n", i, entj) + } + if t.Failed() { + t.FailNow() + } +} + +func checks(s ...stargzCheck) []stargzCheck { return s } + +type stargzCheck interface { + check(t *testing.T, r *Reader) +} + +type stargzCheckFn func(*testing.T, *Reader) + +func (f stargzCheckFn) check(t *testing.T, r *Reader) { f(t, r) } + +func maxDepth(max int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup("") + if !ok { + t.Fatal("root directory not found") + } + d, err := getMaxDepth(t, e, 0, 10*max) + if err != nil { + t.Errorf("failed to get max depth (wanted %d): %v", max, err) + return + } + if d != max { + t.Errorf("invalid depth %d; want %d", d, max) + return + } + }) +} + +func getMaxDepth(t *testing.T, e *TOCEntry, current, limit int) (max int, rErr error) { + if current > limit { + return -1, fmt.Errorf("walkMaxDepth: exceeds limit: current:%d > limit:%d", + current, limit) + } + max = current + e.ForeachChild(func(baseName string, ent *TOCEntry) bool { + t.Logf("%q(basename:%q) is child of %q\n", ent.Name, baseName, e.Name) + d, err := getMaxDepth(t, ent, current+1, limit) + if err != nil { + rErr = err + return false + } + if d > max { + max = d + } + return true + }) + return +} + +func hasFileLen(file string, wantLen int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } else if ent.Size != int64(wantLen) { + t.Errorf("file size of %q = %d; want %d", file, ent.Size, wantLen) + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileXattrs(file, name, value string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "reg" { + t.Errorf("file type of %q is %q; want \"reg\"", file, ent.Type) + } + if ent.Xattrs == nil { + t.Errorf("file %q has no xattrs", file) + return + } + valueFound, found := ent.Xattrs[name] + if !found { + t.Errorf("file %q has no xattr %q", file, name) + return + } + if string(valueFound) != value { + t.Errorf("file %q has xattr %q with value %q instead of %q", file, name, valueFound, value) + } + + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasFileDigest(file string, digest string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("didn't find TOCEntry for file %q", file) + } + if ent.Digest != digest { + t.Fatalf("Digest(%q) = %q, want %q", file, ent.Digest, digest) + } + }) +} + +func hasFileContentsRange(file string, offset int, want string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + f, err := r.OpenFile(file) + if err != nil { + t.Fatal(err) + } + got := make([]byte, len(want)) + n, err := f.ReadAt(got, int64(offset)) + if err != nil { + t.Fatalf("ReadAt(len %d, offset %d) = %v, %v", len(got), offset, n, err) + } + if string(got) != want { + t.Fatalf("ReadAt(len %d, offset %d) = %q, want %q", len(got), offset, got, want) + } + }) +} + +func hasChunkEntries(file string, wantChunks int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(file) + if !ok { + t.Fatalf("no file for %q", file) + } + if ent.Type != "reg" { + t.Fatalf("file %q has unexpected type %q; want reg", file, ent.Type) + } + chunks := r.getChunks(ent) + if len(chunks) != wantChunks { + t.Errorf("len(r.getChunks(%q)) = %d; want %d", file, len(chunks), wantChunks) + return + } + f := chunks[0] + + var gotChunks []*TOCEntry + var last *TOCEntry + for off := int64(0); off < f.Size; off++ { + e, ok := r.ChunkEntryForOffset(file, off) + if !ok { + t.Errorf("no ChunkEntryForOffset at %d", off) + return + } + if last != e { + gotChunks = append(gotChunks, e) + last = e + } + } + if !reflect.DeepEqual(chunks, gotChunks) { + t.Errorf("gotChunks=%d, want=%d; contents mismatch", len(gotChunks), wantChunks) + } + + // And verify the NextOffset + for i := 0; i < len(gotChunks)-1; i++ { + ci := gotChunks[i] + cnext := gotChunks[i+1] + if ci.NextOffset() != cnext.Offset { + t.Errorf("chunk %d NextOffset %d != next chunk's Offset of %d", i, ci.NextOffset(), cnext.Offset) + } + } + }) +} + +func entryHasChildren(dir string, want ...string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + want := append([]string(nil), want...) + var got []string + ent, ok := r.Lookup(dir) + if !ok { + t.Fatalf("didn't find TOCEntry for dir node %q", dir) + } + for baseName := range ent.children { + got = append(got, baseName) + } + sort.Strings(got) + sort.Strings(want) + if !reflect.DeepEqual(got, want) { + t.Errorf("children of %q = %q; want %q", dir, got, want) + } + }) +} + +func hasDir(file string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasDirLinkCount(file string, count int) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Type != "dir" { + t.Errorf("file type of %q is %q; want \"dir\"", file, ent.Type) + return + } + if ent.NumLink != count { + t.Errorf("link count of %q = %d; want %d", file, ent.NumLink, count) + } + return + } + } + t.Errorf("directory %q not found", file) + }) +} + +func hasMode(file string, mode os.FileMode) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == cleanEntryName(file) { + if ent.Stat().Mode() != mode { + t.Errorf("invalid mode: got %v; want %v", ent.Stat().Mode(), mode) + return + } + return + } + } + t.Errorf("file %q not found", file) + }) +} + +func hasSymlink(file, target string) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + for _, ent := range r.toc.Entries { + if ent.Name == file { + if ent.Type != "symlink" { + t.Errorf("file type of %q is %q; want \"symlink\"", file, ent.Type) + } else if ent.LinkName != target { + t.Errorf("link target of symlink %q is %q; want %q", file, ent.LinkName, target) + } + return + } + } + t.Errorf("symlink %q not found", file) + }) +} + +func lookupMatch(name string, want *TOCEntry) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + e, ok := r.Lookup(name) + if !ok { + t.Fatalf("failed to Lookup entry %q", name) + } + if !reflect.DeepEqual(e, want) { + t.Errorf("entry %q mismatch.\n got: %+v\nwant: %+v\n", name, e, want) + } + + }) +} + +func hasEntryOwner(entry string, owner owner) stargzCheck { + return stargzCheckFn(func(t *testing.T, r *Reader) { + ent, ok := r.Lookup(strings.TrimSuffix(entry, "/")) + if !ok { + t.Errorf("entry %q not found", entry) + return + } + if ent.UID != owner.uid || ent.GID != owner.gid { + t.Errorf("entry %q has invalid owner (uid:%d, gid:%d) instead of (uid:%d, gid:%d)", entry, ent.UID, ent.GID, owner.uid, owner.gid) + return + } + }) +} + +func tarOf(s ...tarEntry) []tarEntry { return s } + +type tarEntry interface { + appendTar(tw *tar.Writer, prefix string, format tar.Format) error +} + +type tarEntryFunc func(*tar.Writer, string, tar.Format) error + +func (f tarEntryFunc) appendTar(tw *tar.Writer, prefix string, format tar.Format) error { + return f(tw, prefix, format) +} + +func buildTar(t *testing.T, ents []tarEntry, prefix string, opts ...interface{}) *io.SectionReader { + format := tar.FormatUnknown + for _, opt := range opts { + switch v := opt.(type) { + case tar.Format: + format = v + default: + panic(fmt.Errorf("unsupported opt for buildTar: %v", opt)) + } + } + buf := new(bytes.Buffer) + tw := tar.NewWriter(buf) + for _, ent := range ents { + if err := ent.appendTar(tw, prefix, format); err != nil { + t.Fatalf("building input tar: %v", err) + } + } + if err := tw.Close(); err != nil { + t.Errorf("closing write of input tar: %v", err) + } + data := append(buf.Bytes(), make([]byte, 100)...) // append empty bytes at the tail to see lossless works + return io.NewSectionReader(bytes.NewReader(data), 0, int64(len(data))) +} + +func dir(name string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var o owner + mode := os.FileMode(0755) + for _, opt := range opts { + switch v := opt.(type) { + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if !strings.HasSuffix(name, "/") { + panic(fmt.Sprintf("missing trailing slash in dir %q ", name)) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeDir, + Name: prefix + name, + Mode: tm, + Uid: o.uid, + Gid: o.gid, + Format: format, + }) + }) +} + +// xAttr are extended attributes to set on test files created with the file func. +type xAttr map[string]string + +// owner is owner ot set on test files and directories with the file and dir functions. +type owner struct { + uid int + gid int +} + +func file(name, contents string, opts ...interface{}) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + var xattrs xAttr + var o owner + mode := os.FileMode(0644) + for _, opt := range opts { + switch v := opt.(type) { + case xAttr: + xattrs = v + case owner: + o = v + case os.FileMode: + mode = v + default: + return errors.New("unsupported opt") + } + } + if strings.HasSuffix(name, "/") { + return fmt.Errorf("bogus trailing slash in file %q", name) + } + tm, err := fileModeToTarMode(mode) + if err != nil { + return err + } + if len(xattrs) > 0 { + format = tar.FormatPAX // only PAX supports xattrs + } + if err := tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Mode: tm, + Xattrs: xattrs, + Size: int64(len(contents)), + Uid: o.uid, + Gid: o.gid, + Format: format, + }); err != nil { + return err + } + _, err = io.WriteString(tw, contents) + return err + }) +} + +func symlink(name, target string) tarEntry { + return tarEntryFunc(func(tw *tar.Writer, prefix string, format tar.Format) error { + return tw.WriteHeader(&tar.Header{ + Typeflag: tar.TypeSymlink, + Name: prefix + name, + Linkname: target, + Mode: 0644, + Format: format, + }) + }) +} + +func link(name string, linkname string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeLink, + Name: prefix + name, + Linkname: linkname, + ModTime: now, + Format: format, + }) + }) +} + +func chardev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeChar, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} + +func blockdev(name string, major, minor int64) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeBlock, + Name: prefix + name, + Devmajor: major, + Devminor: minor, + ModTime: now, + Format: format, + }) + }) +} +func fifo(name string) tarEntry { + now := time.Now() + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + return w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeFifo, + Name: prefix + name, + ModTime: now, + Format: format, + }) + }) +} + +func prefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: PrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func noPrefetchLandmark() tarEntry { + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Name: NoPrefetchLandmark, + Typeflag: tar.TypeReg, + Size: int64(len([]byte{landmarkContents})), + Format: format, + }); err != nil { + return err + } + contents := []byte{landmarkContents} + if _, err := io.CopyN(w, bytes.NewReader(contents), int64(len(contents))); err != nil { + return err + } + return nil + }) +} + +func regDigest(t *testing.T, name string, contentStr string, digestMap map[string]digest.Digest) tarEntry { + if digestMap == nil { + t.Fatalf("digest map mustn't be nil") + } + content := []byte(contentStr) + + var n int64 + for n < int64(len(content)) { + size := int64(chunkSize) + remain := int64(len(content)) - n + if remain < size { + size = remain + } + dgstr := digest.Canonical.Digester() + if _, err := io.CopyN(dgstr.Hash(), bytes.NewReader(content[n:n+size]), size); err != nil { + t.Fatalf("failed to calculate digest of %q (name=%q,offset=%d,size=%d)", + string(content[n:n+size]), name, n, size) + } + digestMap[chunkID(name, n, size)] = dgstr.Digest() + n += size + } + + return tarEntryFunc(func(w *tar.Writer, prefix string, format tar.Format) error { + if err := w.WriteHeader(&tar.Header{ + Typeflag: tar.TypeReg, + Name: prefix + name, + Size: int64(len(content)), + Format: format, + }); err != nil { + return err + } + if _, err := io.CopyN(w, bytes.NewReader(content), int64(len(content))); err != nil { + return err + } + return nil + }) +} + +func fileModeToTarMode(mode os.FileMode) (int64, error) { + h, err := tar.FileInfoHeader(fileInfoOnlyMode(mode), "") + if err != nil { + return 0, err + } + return h.Mode, nil +} + +// fileInfoOnlyMode is os.FileMode that populates only file mode. +type fileInfoOnlyMode os.FileMode + +func (f fileInfoOnlyMode) Name() string { return "" } +func (f fileInfoOnlyMode) Size() int64 { return 0 } +func (f fileInfoOnlyMode) Mode() os.FileMode { return os.FileMode(f) } +func (f fileInfoOnlyMode) ModTime() time.Time { return time.Now() } +func (f fileInfoOnlyMode) IsDir() bool { return os.FileMode(f).IsDir() } +func (f fileInfoOnlyMode) Sys() interface{} { return nil } diff --git a/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go new file mode 100644 index 0000000000..384ff7fd7f --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/estargz/types.go @@ -0,0 +1,316 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +/* + Copyright 2019 The Go Authors. All rights reserved. + Use of this source code is governed by a BSD-style + license that can be found in the LICENSE file. +*/ + +package estargz + +import ( + "archive/tar" + "hash" + "io" + "os" + "path" + "time" + + digest "github.com/opencontainers/go-digest" +) + +const ( + // TOCTarName is the name of the JSON file in the tar archive in the + // table of contents gzip stream. + TOCTarName = "stargz.index.json" + + // FooterSize is the number of bytes in the footer + // + // The footer is an empty gzip stream with no compression and an Extra + // header of the form "%016xSTARGZ", where the 64 bit hex-encoded + // number is the offset to the gzip stream of JSON TOC. + // + // 51 comes from: + // + // 10 bytes gzip header + // 2 bytes XLEN (length of Extra field) = 26 (4 bytes header + 16 hex digits + len("STARGZ")) + // 2 bytes Extra: SI1 = 'S', SI2 = 'G' + // 2 bytes Extra: LEN = 22 (16 hex digits + len("STARGZ")) + // 22 bytes Extra: subfield = fmt.Sprintf("%016xSTARGZ", offsetOfTOC) + // 5 bytes flate header + // 8 bytes gzip footer + // (End of the eStargz blob) + // + // NOTE: For Extra fields, subfield IDs SI1='S' SI2='G' is used for eStargz. + FooterSize = 51 + + // legacyFooterSize is the number of bytes in the legacy stargz footer. + // + // 47 comes from: + // + // 10 byte gzip header + + // 2 byte (LE16) length of extra, encoding 22 (16 hex digits + len("STARGZ")) == "\x16\x00" + + // 22 bytes of extra (fmt.Sprintf("%016xSTARGZ", tocGzipOffset)) + // 5 byte flate header + // 8 byte gzip footer (two little endian uint32s: digest, size) + legacyFooterSize = 47 + + // TOCJSONDigestAnnotation is an annotation for an image layer. This stores the + // digest of the TOC JSON. + // This annotation is valid only when it is specified in `.[]layers.annotations` + // of an image manifest. + TOCJSONDigestAnnotation = "containerd.io/snapshot/stargz/toc.digest" + + // StoreUncompressedSizeAnnotation is an additional annotation key for eStargz to enable lazy + // pulling on containers/storage. Stargz Store is required to expose the layer's uncompressed size + // to the runtime but current OCI image doesn't ship this information by default. So we store this + // to the special annotation. + StoreUncompressedSizeAnnotation = "io.containers.estargz.uncompressed-size" + + // PrefetchLandmark is a file entry which indicates the end position of + // prefetch in the stargz file. + PrefetchLandmark = ".prefetch.landmark" + + // NoPrefetchLandmark is a file entry which indicates that no prefetch should + // occur in the stargz file. + NoPrefetchLandmark = ".no.prefetch.landmark" + + landmarkContents = 0xf +) + +// JTOC is the JSON-serialized table of contents index of the files in the stargz file. +type JTOC struct { + Version int `json:"version"` + Entries []*TOCEntry `json:"entries"` +} + +// TOCEntry is an entry in the stargz file's TOC (Table of Contents). +type TOCEntry struct { + // Name is the tar entry's name. It is the complete path + // stored in the tar file, not just the base name. + Name string `json:"name"` + + // Type is one of "dir", "reg", "symlink", "hardlink", "char", + // "block", "fifo", or "chunk". + // The "chunk" type is used for regular file data chunks past the first + // TOCEntry; the 2nd chunk and on have only Type ("chunk"), Offset, + // ChunkOffset, and ChunkSize populated. + Type string `json:"type"` + + // Size, for regular files, is the logical size of the file. + Size int64 `json:"size,omitempty"` + + // ModTime3339 is the modification time of the tar entry. Empty + // means zero or unknown. Otherwise it's in UTC RFC3339 + // format. Use the ModTime method to access the time.Time value. + ModTime3339 string `json:"modtime,omitempty"` + modTime time.Time + + // LinkName, for symlinks and hardlinks, is the link target. + LinkName string `json:"linkName,omitempty"` + + // Mode is the permission and mode bits. + Mode int64 `json:"mode,omitempty"` + + // UID is the user ID of the owner. + UID int `json:"uid,omitempty"` + + // GID is the group ID of the owner. + GID int `json:"gid,omitempty"` + + // Uname is the username of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same UID. + Uname string `json:"userName,omitempty"` + + // Gname is the group name of the owner. + // + // In the serialized JSON, this field may only be present for + // the first entry with the same GID. + Gname string `json:"groupName,omitempty"` + + // Offset, for regular files, provides the offset in the + // stargz file to the file's data bytes. See ChunkOffset and + // ChunkSize. + Offset int64 `json:"offset,omitempty"` + + nextOffset int64 // the Offset of the next entry with a non-zero Offset + + // DevMajor is the major device number for "char" and "block" types. + DevMajor int `json:"devMajor,omitempty"` + + // DevMinor is the major device number for "char" and "block" types. + DevMinor int `json:"devMinor,omitempty"` + + // NumLink is the number of entry names pointing to this entry. + // Zero means one name references this entry. + NumLink int + + // Xattrs are the extended attribute for the entry. + Xattrs map[string][]byte `json:"xattrs,omitempty"` + + // Digest stores the OCI checksum for regular files payload. + // It has the form "sha256:abcdef01234....". + Digest string `json:"digest,omitempty"` + + // ChunkOffset is non-zero if this is a chunk of a large, + // regular file. If so, the Offset is where the gzip header of + // ChunkSize bytes at ChunkOffset in Name begin. + // + // In serialized form, a "chunkSize" JSON field of zero means + // that the chunk goes to the end of the file. After reading + // from the stargz TOC, though, the ChunkSize is initialized + // to a non-zero file for when Type is either "reg" or + // "chunk". + ChunkOffset int64 `json:"chunkOffset,omitempty"` + ChunkSize int64 `json:"chunkSize,omitempty"` + + // ChunkDigest stores an OCI digest of the chunk. This must be formed + // as "sha256:0123abcd...". + ChunkDigest string `json:"chunkDigest,omitempty"` + + children map[string]*TOCEntry +} + +// ModTime returns the entry's modification time. +func (e *TOCEntry) ModTime() time.Time { return e.modTime } + +// NextOffset returns the position (relative to the start of the +// stargz file) of the next gzip boundary after e.Offset. +func (e *TOCEntry) NextOffset() int64 { return e.nextOffset } + +func (e *TOCEntry) addChild(baseName string, child *TOCEntry) { + if e.children == nil { + e.children = make(map[string]*TOCEntry) + } + if child.Type == "dir" { + e.NumLink++ // Entry ".." in the subdirectory links to this directory + } + e.children[baseName] = child +} + +// isDataType reports whether TOCEntry is a regular file or chunk (something that +// contains regular file data). +func (e *TOCEntry) isDataType() bool { return e.Type == "reg" || e.Type == "chunk" } + +// Stat returns a FileInfo value representing e. +func (e *TOCEntry) Stat() os.FileInfo { return fileInfo{e} } + +// ForeachChild calls f for each child item. If f returns false, iteration ends. +// If e is not a directory, f is not called. +func (e *TOCEntry) ForeachChild(f func(baseName string, ent *TOCEntry) bool) { + for name, ent := range e.children { + if !f(name, ent) { + return + } + } +} + +// LookupChild returns the directory e's child by its base name. +func (e *TOCEntry) LookupChild(baseName string) (child *TOCEntry, ok bool) { + child, ok = e.children[baseName] + return +} + +// fileInfo implements os.FileInfo using the wrapped *TOCEntry. +type fileInfo struct{ e *TOCEntry } + +var _ os.FileInfo = fileInfo{} + +func (fi fileInfo) Name() string { return path.Base(fi.e.Name) } +func (fi fileInfo) IsDir() bool { return fi.e.Type == "dir" } +func (fi fileInfo) Size() int64 { return fi.e.Size } +func (fi fileInfo) ModTime() time.Time { return fi.e.ModTime() } +func (fi fileInfo) Sys() interface{} { return fi.e } +func (fi fileInfo) Mode() (m os.FileMode) { + // TOCEntry.Mode is tar.Header.Mode so we can understand the these bits using `tar` pkg. + m = (&tar.Header{Mode: fi.e.Mode}).FileInfo().Mode() & + (os.ModePerm | os.ModeSetuid | os.ModeSetgid | os.ModeSticky) + switch fi.e.Type { + case "dir": + m |= os.ModeDir + case "symlink": + m |= os.ModeSymlink + case "char": + m |= os.ModeDevice | os.ModeCharDevice + case "block": + m |= os.ModeDevice + case "fifo": + m |= os.ModeNamedPipe + } + return m +} + +// TOCEntryVerifier holds verifiers that are usable for verifying chunks contained +// in a eStargz blob. +type TOCEntryVerifier interface { + + // Verifier provides a content verifier that can be used for verifying the + // contents of the specified TOCEntry. + Verifier(ce *TOCEntry) (digest.Verifier, error) +} + +// Compression provides the compression helper to be used creating and parsing eStargz. +// This package provides gzip-based Compression by default, but any compression +// algorithm (e.g. zstd) can be used as long as it implements Compression. +type Compression interface { + Compressor + Decompressor +} + +// Compressor represents the helper mothods to be used for creating eStargz. +type Compressor interface { + // Writer returns WriteCloser to be used for writing a chunk to eStargz. + // Everytime a chunk is written, the WriteCloser is closed and Writer is + // called again for writing the next chunk. + Writer(w io.Writer) (io.WriteCloser, error) + + // WriteTOCAndFooter is called to write JTOC to the passed Writer. + // diffHash calculates the DiffID (uncompressed sha256 hash) of the blob + // WriteTOCAndFooter can optionally write anything that affects DiffID calculation + // (e.g. uncompressed TOC JSON). + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob when it's parsed. + WriteTOCAndFooter(w io.Writer, off int64, toc *JTOC, diffHash hash.Hash) (tocDgst digest.Digest, err error) +} + +// Decompressor represents the helper mothods to be used for parsing eStargz. +type Decompressor interface { + // Reader returns ReadCloser to be used for decompressing file payload. + Reader(r io.Reader) (io.ReadCloser, error) + + // FooterSize returns the size of the footer of this blob. + FooterSize() int64 + + // ParseFooter parses the footer and returns the offset and (compressed) size of TOC. + // payloadBlobSize is the (compressed) size of the blob payload (i.e. the size between + // the top until the TOC JSON). + // + // Here, tocSize is optional. If tocSize <= 0, it's by default the size of the range + // from tocOffset until the beginning of the footer (blob size - tocOff - FooterSize). + ParseFooter(p []byte) (blobPayloadSize, tocOffset, tocSize int64, err error) + + // ParseTOC parses TOC from the passed reader. The reader provides the partial contents + // of the underlying blob that has the range specified by ParseFooter method. + // + // This function returns tocDgst that represents the digest of TOC that will be used + // to verify this blob. This must match to the value returned from + // Compressor.WriteTOCAndFooter that is used when creating this blob. + ParseTOC(r io.Reader) (toc *JTOC, tocDgst digest.Digest, err error) +} diff --git a/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go b/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go new file mode 100644 index 0000000000..e76c0b3a56 --- /dev/null +++ b/vendor/github.com/containerd/stargz-snapshotter/snapshot/overlayutils/check.go @@ -0,0 +1,172 @@ +/* + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +// ===== +// NOTE: This file is ported from https://github.com/containerd/containerd/blob/v1.5.2/snapshots/overlay/overlayutils/check.go +// TODO: import this from containerd package once we drop support to continerd v1.4.x +// ===== + +package overlayutils + +import ( + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/mount" + userns "github.com/containerd/containerd/sys" + "github.com/containerd/continuity/fs" +) + +// SupportsMultipleLowerDir checks if the system supports multiple lowerdirs, +// which is required for the overlay snapshotter. On 4.x kernels, multiple lowerdirs +// are always available (so this check isn't needed), and backported to RHEL and +// CentOS 3.x kernels (3.10.0-693.el7.x86_64 and up). This function is to detect +// support on those kernels, without doing a kernel version compare. +// +// Ported from moby overlay2. +func SupportsMultipleLowerDir(d string) error { + td, err := ioutil.TempDir(d, "multiple-lowerdir-check") + if err != nil { + return err + } + defer func() { + if err := os.RemoveAll(td); err != nil { + log.L.WithError(err).Warnf("Failed to remove check directory %v", td) + } + }() + + for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { + if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { + return err + } + } + + opts := fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")) + m := mount.Mount{ + Type: "overlay", + Source: "overlay", + Options: []string{opts}, + } + dest := filepath.Join(td, "merged") + if err := m.Mount(dest); err != nil { + return fmt.Errorf("failed to mount overlay: %w", err) + } + if err := mount.UnmountAll(dest, 0); err != nil { + log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) + } + return nil +} + +// Supported returns nil when the overlayfs is functional on the system with the root directory. +// Supported is not called during plugin initialization, but exposed for downstream projects which uses +// this snapshotter as a library. +func Supported(root string) error { + if err := os.MkdirAll(root, 0700); err != nil { + return err + } + supportsDType, err := fs.SupportsDType(root) + if err != nil { + return err + } + if !supportsDType { + return fmt.Errorf("%s does not support d_type. If the backing filesystem is xfs, please reformat with ftype=1 to enable d_type support", root) + } + return SupportsMultipleLowerDir(root) +} + +// NeedsUserXAttr returns whether overlayfs should be mounted with the "userxattr" mount option. +// +// The "userxattr" option is needed for mounting overlayfs inside a user namespace with kernel >= 5.11. +// +// The "userxattr" option is NOT needed for the initial user namespace (aka "the host"). +// +// Also, Ubuntu (since circa 2015) and Debian (since 10) with kernel < 5.11 can mount +// the overlayfs in a user namespace without the "userxattr" option. +// +// The corresponding kernel commit: https://github.com/torvalds/linux/commit/2d2f2d7322ff43e0fe92bf8cccdc0b09449bf2e1 +// > ovl: user xattr +// > +// > Optionally allow using "user.overlay." namespace instead of "trusted.overlay." +// > ... +// > Disable redirect_dir and metacopy options, because these would allow privilege escalation through direct manipulation of the +// > "user.overlay.redirect" or "user.overlay.metacopy" xattrs. +// > ... +// +// The "userxattr" support is not exposed in "/sys/module/overlay/parameters". +func NeedsUserXAttr(d string) (bool, error) { + if !userns.RunningInUserNS() { + // we are the real root (i.e., the root in the initial user NS), + // so we do never need "userxattr" opt. + return false, nil + } + + // TODO: add fast path for kernel >= 5.11 . + // + // Keep in mind that distro vendors might be going to backport the patch to older kernels. + // So we can't completely remove the check. + + tdRoot := filepath.Join(d, "userxattr-check") + if err := os.RemoveAll(tdRoot); err != nil { + log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot) + } + + if err := os.MkdirAll(tdRoot, 0700); err != nil { + return false, err + } + + defer func() { + if err := os.RemoveAll(tdRoot); err != nil { + log.L.WithError(err).Warnf("Failed to remove check directory %v", tdRoot) + } + }() + + td, err := ioutil.TempDir(tdRoot, "") + if err != nil { + return false, err + } + + for _, dir := range []string{"lower1", "lower2", "upper", "work", "merged"} { + if err := os.Mkdir(filepath.Join(td, dir), 0755); err != nil { + return false, err + } + } + + opts := []string{ + fmt.Sprintf("lowerdir=%s:%s,upperdir=%s,workdir=%s", filepath.Join(td, "lower2"), filepath.Join(td, "lower1"), filepath.Join(td, "upper"), filepath.Join(td, "work")), + "userxattr", + } + + m := mount.Mount{ + Type: "overlay", + Source: "overlay", + Options: opts, + } + + dest := filepath.Join(td, "merged") + if err := m.Mount(dest); err != nil { + // Probably the host is running Ubuntu/Debian kernel (< 5.11) with the userns patch but without the userxattr patch. + // Return false without error. + log.L.WithError(err).Debugf("cannot mount overlay with \"userxattr\", probably the kernel does not support userxattr") + return false, nil + } + if err := mount.UnmountAll(dest, 0); err != nil { + log.L.WithError(err).Warnf("Failed to unmount check directory %v", dest) + } + return true, nil +} diff --git a/vendor/github.com/felixge/httpsnoop/.gitignore b/vendor/github.com/felixge/httpsnoop/.gitignore new file mode 100644 index 0000000000..e69de29bb2 diff --git a/vendor/github.com/felixge/httpsnoop/.travis.yml b/vendor/github.com/felixge/httpsnoop/.travis.yml new file mode 100644 index 0000000000..bfc421200d --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/.travis.yml @@ -0,0 +1,6 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 diff --git a/vendor/github.com/felixge/httpsnoop/LICENSE.txt b/vendor/github.com/felixge/httpsnoop/LICENSE.txt new file mode 100644 index 0000000000..e028b46a9b --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/LICENSE.txt @@ -0,0 +1,19 @@ +Copyright (c) 2016 Felix Geisendörfer (felix@debuggable.com) + + Permission is hereby granted, free of charge, to any person obtaining a copy + of this software and associated documentation files (the "Software"), to deal + in the Software without restriction, including without limitation the rights + to use, copy, modify, merge, publish, distribute, sublicense, and/or sell + copies of the Software, and to permit persons to whom the Software is + furnished to do so, subject to the following conditions: + + The above copyright notice and this permission notice shall be included in + all copies or substantial portions of the Software. + + THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR + IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, + FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE + AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER + LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, + OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN + THE SOFTWARE. diff --git a/vendor/github.com/felixge/httpsnoop/Makefile b/vendor/github.com/felixge/httpsnoop/Makefile new file mode 100644 index 0000000000..2d84889aed --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/Makefile @@ -0,0 +1,10 @@ +.PHONY: ci generate clean + +ci: clean generate + go test -v ./... + +generate: + go generate . + +clean: + rm -rf *_generated*.go diff --git a/vendor/github.com/felixge/httpsnoop/README.md b/vendor/github.com/felixge/httpsnoop/README.md new file mode 100644 index 0000000000..ddcecd13e7 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/README.md @@ -0,0 +1,95 @@ +# httpsnoop + +Package httpsnoop provides an easy way to capture http related metrics (i.e. +response time, bytes written, and http status code) from your application's +http.Handlers. + +Doing this requires non-trivial wrapping of the http.ResponseWriter interface, +which is also exposed for users interested in a more low-level API. + +[![GoDoc](https://godoc.org/github.com/felixge/httpsnoop?status.svg)](https://godoc.org/github.com/felixge/httpsnoop) +[![Build Status](https://travis-ci.org/felixge/httpsnoop.svg?branch=master)](https://travis-ci.org/felixge/httpsnoop) + +## Usage Example + +```go +// myH is your app's http handler, perhaps a http.ServeMux or similar. +var myH http.Handler +// wrappedH wraps myH in order to log every request. +wrappedH := http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + m := httpsnoop.CaptureMetrics(myH, w, r) + log.Printf( + "%s %s (code=%d dt=%s written=%d)", + r.Method, + r.URL, + m.Code, + m.Duration, + m.Written, + ) +}) +http.ListenAndServe(":8080", wrappedH) +``` + +## Why this package exists + +Instrumenting an application's http.Handler is surprisingly difficult. + +However if you google for e.g. "capture ResponseWriter status code" you'll find +lots of advise and code examples that suggest it to be a fairly trivial +undertaking. Unfortunately everything I've seen so far has a high chance of +breaking your application. + +The main problem is that a `http.ResponseWriter` often implements additional +interfaces such as `http.Flusher`, `http.CloseNotifier`, `http.Hijacker`, `http.Pusher`, and +`io.ReaderFrom`. So the naive approach of just wrapping `http.ResponseWriter` +in your own struct that also implements the `http.ResponseWriter` interface +will hide the additional interfaces mentioned above. This has a high change of +introducing subtle bugs into any non-trivial application. + +Another approach I've seen people take is to return a struct that implements +all of the interfaces above. However, that's also problematic, because it's +difficult to fake some of these interfaces behaviors when the underlying +`http.ResponseWriter` doesn't have an implementation. It's also dangerous, +because an application may choose to operate differently, merely because it +detects the presence of these additional interfaces. + +This package solves this problem by checking which additional interfaces a +`http.ResponseWriter` implements, returning a wrapped version implementing the +exact same set of interfaces. + +Additionally this package properly handles edge cases such as `WriteHeader` not +being called, or called more than once, as well as concurrent calls to +`http.ResponseWriter` methods, and even calls happening after the wrapped +`ServeHTTP` has already returned. + +Unfortunately this package is not perfect either. It's possible that it is +still missing some interfaces provided by the go core (let me know if you find +one), and it won't work for applications adding their own interfaces into the +mix. You can however use `httpsnoop.Unwrap(w)` to access the underlying +`http.ResponseWriter` and type-assert the result to its other interfaces. + +However, hopefully the explanation above has sufficiently scared you of rolling +your own solution to this problem. httpsnoop may still break your application, +but at least it tries to avoid it as much as possible. + +Anyway, the real problem here is that smuggling additional interfaces inside +`http.ResponseWriter` is a problematic design choice, but it probably goes as +deep as the Go language specification itself. But that's okay, I still prefer +Go over the alternatives ;). + +## Performance + +``` +BenchmarkBaseline-8 20000 94912 ns/op +BenchmarkCaptureMetrics-8 20000 95461 ns/op +``` + +As you can see, using `CaptureMetrics` on a vanilla http.Handler introduces an +overhead of ~500 ns per http request on my machine. However, the margin of +error appears to be larger than that, therefor it should be reasonable to +assume that the overhead introduced by `CaptureMetrics` is absolutely +negligible. + +## License + +MIT diff --git a/vendor/github.com/felixge/httpsnoop/capture_metrics.go b/vendor/github.com/felixge/httpsnoop/capture_metrics.go new file mode 100644 index 0000000000..c1d1b38056 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/capture_metrics.go @@ -0,0 +1,79 @@ +package httpsnoop + +import ( + "io" + "net/http" + "time" +) + +// Metrics holds metrics captured from CaptureMetrics. +type Metrics struct { + // Code is the first http response code passed to the WriteHeader func of + // the ResponseWriter. If no such call is made, a default code of 200 is + // assumed instead. + Code int + // Duration is the time it took to execute the handler. + Duration time.Duration + // Written is the number of bytes successfully written by the Write or + // ReadFrom function of the ResponseWriter. ResponseWriters may also write + // data to their underlaying connection directly (e.g. headers), but those + // are not tracked. Therefor the number of Written bytes will usually match + // the size of the response body. + Written int64 +} + +// CaptureMetrics wraps the given hnd, executes it with the given w and r, and +// returns the metrics it captured from it. +func CaptureMetrics(hnd http.Handler, w http.ResponseWriter, r *http.Request) Metrics { + return CaptureMetricsFn(w, func(ww http.ResponseWriter) { + hnd.ServeHTTP(ww, r) + }) +} + +// CaptureMetricsFn wraps w and calls fn with the wrapped w and returns the +// resulting metrics. This is very similar to CaptureMetrics (which is just +// sugar on top of this func), but is a more usable interface if your +// application doesn't use the Go http.Handler interface. +func CaptureMetricsFn(w http.ResponseWriter, fn func(http.ResponseWriter)) Metrics { + var ( + start = time.Now() + m = Metrics{Code: http.StatusOK} + headerWritten bool + hooks = Hooks{ + WriteHeader: func(next WriteHeaderFunc) WriteHeaderFunc { + return func(code int) { + next(code) + + if !headerWritten { + m.Code = code + headerWritten = true + } + } + }, + + Write: func(next WriteFunc) WriteFunc { + return func(p []byte) (int, error) { + n, err := next(p) + + m.Written += int64(n) + headerWritten = true + return n, err + } + }, + + ReadFrom: func(next ReadFromFunc) ReadFromFunc { + return func(src io.Reader) (int64, error) { + n, err := next(src) + + headerWritten = true + m.Written += n + return n, err + } + }, + } + ) + + fn(Wrap(w, hooks)) + m.Duration = time.Since(start) + return m +} diff --git a/vendor/github.com/felixge/httpsnoop/docs.go b/vendor/github.com/felixge/httpsnoop/docs.go new file mode 100644 index 0000000000..203c35b3c6 --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/docs.go @@ -0,0 +1,10 @@ +// Package httpsnoop provides an easy way to capture http related metrics (i.e. +// response time, bytes written, and http status code) from your application's +// http.Handlers. +// +// Doing this requires non-trivial wrapping of the http.ResponseWriter +// interface, which is also exposed for users interested in a more low-level +// API. +package httpsnoop + +//go:generate go run codegen/main.go diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go new file mode 100644 index 0000000000..31cbdfb8ef --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_gteq_1.8.go @@ -0,0 +1,436 @@ +// +build go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// PushFunc is part of the http.Pusher interface. +type PushFunc func(target string, opts *http.PushOptions) error + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc + Push func(PushFunc) PushFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// - http.Pusher +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + _, i4 := w.(http.Pusher) + switch { + // combination 1/32 + case !i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/32 + case !i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Pusher + }{rw, rw, rw} + // combination 3/32 + case !i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 4/32 + case !i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw} + // combination 5/32 + case !i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 6/32 + case !i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + http.Pusher + }{rw, rw, rw, rw} + // combination 7/32 + case !i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 8/32 + case !i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 9/32 + case !i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 10/32 + case !i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw} + // combination 11/32 + case !i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 12/32 + case !i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 13/32 + case !i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 14/32 + case !i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 15/32 + case !i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 16/32 + case !i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 17/32 + case i0 && !i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 18/32 + case i0 && !i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Pusher + }{rw, rw, rw, rw} + // combination 19/32 + case i0 && !i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 20/32 + case i0 && !i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 21/32 + case i0 && !i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 22/32 + case i0 && !i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 23/32 + case i0 && !i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 24/32 + case i0 && !i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 25/32 + case i0 && i1 && !i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 26/32 + case i0 && i1 && !i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Pusher + }{rw, rw, rw, rw, rw} + // combination 27/32 + case i0 && i1 && !i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 28/32 + case i0 && i1 && !i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 29/32 + case i0 && i1 && i2 && !i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 30/32 + case i0 && i1 && i2 && !i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + http.Pusher + }{rw, rw, rw, rw, rw, rw} + // combination 31/32 + case i0 && i1 && i2 && i3 && !i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + // combination 32/32 + case i0 && i1 && i2 && i3 && i4: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + http.Pusher + }{rw, rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +func (w *rw) Push(target string, opts *http.PushOptions) error { + f := w.w.(http.Pusher).Push + if w.h.Push != nil { + f = w.h.Push(f) + } + return f(target, opts) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go new file mode 100644 index 0000000000..ab99c07c7a --- /dev/null +++ b/vendor/github.com/felixge/httpsnoop/wrap_generated_lt_1.8.go @@ -0,0 +1,278 @@ +// +build !go1.8 +// Code generated by "httpsnoop/codegen"; DO NOT EDIT + +package httpsnoop + +import ( + "bufio" + "io" + "net" + "net/http" +) + +// HeaderFunc is part of the http.ResponseWriter interface. +type HeaderFunc func() http.Header + +// WriteHeaderFunc is part of the http.ResponseWriter interface. +type WriteHeaderFunc func(code int) + +// WriteFunc is part of the http.ResponseWriter interface. +type WriteFunc func(b []byte) (int, error) + +// FlushFunc is part of the http.Flusher interface. +type FlushFunc func() + +// CloseNotifyFunc is part of the http.CloseNotifier interface. +type CloseNotifyFunc func() <-chan bool + +// HijackFunc is part of the http.Hijacker interface. +type HijackFunc func() (net.Conn, *bufio.ReadWriter, error) + +// ReadFromFunc is part of the io.ReaderFrom interface. +type ReadFromFunc func(src io.Reader) (int64, error) + +// Hooks defines a set of method interceptors for methods included in +// http.ResponseWriter as well as some others. You can think of them as +// middleware for the function calls they target. See Wrap for more details. +type Hooks struct { + Header func(HeaderFunc) HeaderFunc + WriteHeader func(WriteHeaderFunc) WriteHeaderFunc + Write func(WriteFunc) WriteFunc + Flush func(FlushFunc) FlushFunc + CloseNotify func(CloseNotifyFunc) CloseNotifyFunc + Hijack func(HijackFunc) HijackFunc + ReadFrom func(ReadFromFunc) ReadFromFunc +} + +// Wrap returns a wrapped version of w that provides the exact same interface +// as w. Specifically if w implements any combination of: +// +// - http.Flusher +// - http.CloseNotifier +// - http.Hijacker +// - io.ReaderFrom +// +// The wrapped version will implement the exact same combination. If no hooks +// are set, the wrapped version also behaves exactly as w. Hooks targeting +// methods not supported by w are ignored. Any other hooks will intercept the +// method they target and may modify the call's arguments and/or return values. +// The CaptureMetrics implementation serves as a working example for how the +// hooks can be used. +func Wrap(w http.ResponseWriter, hooks Hooks) http.ResponseWriter { + rw := &rw{w: w, h: hooks} + _, i0 := w.(http.Flusher) + _, i1 := w.(http.CloseNotifier) + _, i2 := w.(http.Hijacker) + _, i3 := w.(io.ReaderFrom) + switch { + // combination 1/16 + case !i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + }{rw, rw} + // combination 2/16 + case !i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + io.ReaderFrom + }{rw, rw, rw} + // combination 3/16 + case !i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + }{rw, rw, rw} + // combination 4/16 + case !i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 5/16 + case !i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + }{rw, rw, rw} + // combination 6/16 + case !i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 7/16 + case !i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw} + // combination 8/16 + case !i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 9/16 + case i0 && !i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + }{rw, rw, rw} + // combination 10/16 + case i0 && !i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + io.ReaderFrom + }{rw, rw, rw, rw} + // combination 11/16 + case i0 && !i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + }{rw, rw, rw, rw} + // combination 12/16 + case i0 && !i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 13/16 + case i0 && i1 && !i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + }{rw, rw, rw, rw} + // combination 14/16 + case i0 && i1 && !i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + io.ReaderFrom + }{rw, rw, rw, rw, rw} + // combination 15/16 + case i0 && i1 && i2 && !i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + }{rw, rw, rw, rw, rw} + // combination 16/16 + case i0 && i1 && i2 && i3: + return struct { + Unwrapper + http.ResponseWriter + http.Flusher + http.CloseNotifier + http.Hijacker + io.ReaderFrom + }{rw, rw, rw, rw, rw, rw} + } + panic("unreachable") +} + +type rw struct { + w http.ResponseWriter + h Hooks +} + +func (w *rw) Unwrap() http.ResponseWriter { + return w.w +} + +func (w *rw) Header() http.Header { + f := w.w.(http.ResponseWriter).Header + if w.h.Header != nil { + f = w.h.Header(f) + } + return f() +} + +func (w *rw) WriteHeader(code int) { + f := w.w.(http.ResponseWriter).WriteHeader + if w.h.WriteHeader != nil { + f = w.h.WriteHeader(f) + } + f(code) +} + +func (w *rw) Write(b []byte) (int, error) { + f := w.w.(http.ResponseWriter).Write + if w.h.Write != nil { + f = w.h.Write(f) + } + return f(b) +} + +func (w *rw) Flush() { + f := w.w.(http.Flusher).Flush + if w.h.Flush != nil { + f = w.h.Flush(f) + } + f() +} + +func (w *rw) CloseNotify() <-chan bool { + f := w.w.(http.CloseNotifier).CloseNotify + if w.h.CloseNotify != nil { + f = w.h.CloseNotify(f) + } + return f() +} + +func (w *rw) Hijack() (net.Conn, *bufio.ReadWriter, error) { + f := w.w.(http.Hijacker).Hijack + if w.h.Hijack != nil { + f = w.h.Hijack(f) + } + return f() +} + +func (w *rw) ReadFrom(src io.Reader) (int64, error) { + f := w.w.(io.ReaderFrom).ReadFrom + if w.h.ReadFrom != nil { + f = w.h.ReadFrom(f) + } + return f(src) +} + +type Unwrapper interface { + Unwrap() http.ResponseWriter +} + +// Unwrap returns the underlying http.ResponseWriter from within zero or more +// layers of httpsnoop wrappers. +func Unwrap(w http.ResponseWriter) http.ResponseWriter { + if rw, ok := w.(Unwrapper); ok { + // recurse until rw.Unwrap() returns a non-Unwrapper + return Unwrap(rw.Unwrap()) + } else { + return w + } +} diff --git a/vendor/github.com/go-logr/logr/.golangci.yaml b/vendor/github.com/go-logr/logr/.golangci.yaml new file mode 100644 index 0000000000..94ff801df1 --- /dev/null +++ b/vendor/github.com/go-logr/logr/.golangci.yaml @@ -0,0 +1,29 @@ +run: + timeout: 1m + tests: true + +linters: + disable-all: true + enable: + - asciicheck + - deadcode + - errcheck + - forcetypeassert + - gocritic + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + +issues: + exclude-use-default: false + max-issues-per-linter: 0 + max-same-issues: 10 diff --git a/vendor/github.com/go-logr/logr/CHANGELOG.md b/vendor/github.com/go-logr/logr/CHANGELOG.md new file mode 100644 index 0000000000..c356960046 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CHANGELOG.md @@ -0,0 +1,6 @@ +# CHANGELOG + +## v1.0.0-rc1 + +This is the first logged release. Major changes (including breaking changes) +have occurred since earlier tags. diff --git a/vendor/github.com/go-logr/logr/CONTRIBUTING.md b/vendor/github.com/go-logr/logr/CONTRIBUTING.md new file mode 100644 index 0000000000..5d37e294c5 --- /dev/null +++ b/vendor/github.com/go-logr/logr/CONTRIBUTING.md @@ -0,0 +1,17 @@ +# Contributing + +Logr is open to pull-requests, provided they fit within the intended scope of +the project. Specifically, this library aims to be VERY small and minimalist, +with no external dependencies. + +## Compatibility + +This project intends to follow [semantic versioning](http://semver.org) and +is very strict about compatibility. Any proposed changes MUST follow those +rules. + +## Performance + +As a logging library, logr must be as light-weight as possible. Any proposed +code change must include results of running the [benchmark](./benchmark) +before and after the change. diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/go-logr/logr/LICENSE similarity index 99% rename from vendor/github.com/opentracing/opentracing-go/LICENSE rename to vendor/github.com/go-logr/logr/LICENSE index f0027349e8..8dada3edaf 100644 --- a/vendor/github.com/opentracing/opentracing-go/LICENSE +++ b/vendor/github.com/go-logr/logr/LICENSE @@ -186,7 +186,7 @@ same "printed page" as the copyright notice for easier identification within third-party archives. - Copyright 2016 The OpenTracing Authors + Copyright {yyyy} {name of copyright owner} Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. diff --git a/vendor/github.com/go-logr/logr/README.md b/vendor/github.com/go-logr/logr/README.md new file mode 100644 index 0000000000..ad825f5f0a --- /dev/null +++ b/vendor/github.com/go-logr/logr/README.md @@ -0,0 +1,278 @@ +# A minimal logging API for Go + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/logr.svg)](https://pkg.go.dev/github.com/go-logr/logr) + +logr offers an(other) opinion on how Go programs and libraries can do logging +without becoming coupled to a particular logging implementation. This is not +an implementation of logging - it is an API. In fact it is two APIs with two +different sets of users. + +The `Logger` type is intended for application and library authors. It provides +a relatively small API which can be used everywhere you want to emit logs. It +defers the actual act of writing logs (to files, to stdout, or whatever) to the +`LogSink` interface. + +The `LogSink` interface is intended for logging library implementers. It is a +pure interface which can be implemented by logging frameworks to provide the actual logging +functionality. + +This decoupling allows application and library developers to write code in +terms of `logr.Logger` (which has very low dependency fan-out) while the +implementation of logging is managed "up stack" (e.g. in or near `main()`.) +Application developers can then switch out implementations as necessary. + +Many people assert that libraries should not be logging, and as such efforts +like this are pointless. Those people are welcome to convince the authors of +the tens-of-thousands of libraries that *DO* write logs that they are all +wrong. In the meantime, logr takes a more practical approach. + +## Typical usage + +Somewhere, early in an application's life, it will make a decision about which +logging library (implementation) it actually wants to use. Something like: + +``` + func main() { + // ... other setup code ... + + // Create the "root" logger. We have chosen the "logimpl" implementation, + // which takes some initial parameters and returns a logr.Logger. + logger := logimpl.New(param1, param2) + + // ... other setup code ... +``` + +Most apps will call into other libraries, create structures to govern the flow, +etc. The `logr.Logger` object can be passed to these other libraries, stored +in structs, or even used as a package-global variable, if needed. For example: + +``` + app := createTheAppObject(logger) + app.Run() +``` + +Outside of this early setup, no other packages need to know about the choice of +implementation. They write logs in terms of the `logr.Logger` that they +received: + +``` + type appObject struct { + // ... other fields ... + logger logr.Logger + // ... other fields ... + } + + func (app *appObject) Run() { + app.logger.Info("starting up", "timestamp", time.Now()) + + // ... app code ... +``` + +## Background + +If the Go standard library had defined an interface for logging, this project +probably would not be needed. Alas, here we are. + +### Inspiration + +Before you consider this package, please read [this blog post by the +inimitable Dave Cheney][warning-makes-no-sense]. We really appreciate what +he has to say, and it largely aligns with our own experiences. + +### Differences from Dave's ideas + +The main differences are: + +1. Dave basically proposes doing away with the notion of a logging API in favor +of `fmt.Printf()`. We disagree, especially when you consider things like output +locations, timestamps, file and line decorations, and structured logging. This +package restricts the logging API to just 2 types of logs: info and error. + +Info logs are things you want to tell the user which are not errors. Error +logs are, well, errors. If your code receives an `error` from a subordinate +function call and is logging that `error` *and not returning it*, use error +logs. + +2. Verbosity-levels on info logs. This gives developers a chance to indicate +arbitrary grades of importance for info logs, without assigning names with +semantic meaning such as "warning", "trace", and "debug." Superficially this +may feel very similar, but the primary difference is the lack of semantics. +Because verbosity is a numerical value, it's safe to assume that an app running +with higher verbosity means more (and less important) logs will be generated. + +## Implementations (non-exhaustive) + +There are implementations for the following logging libraries: + +- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr) +- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr) +- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr) +- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr) +- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr) +- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr) +- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend) +- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr) +- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr) + +## FAQ + +### Conceptual + +#### Why structured logging? + +- **Structured logs are more easily queryable**: Since you've got + key-value pairs, it's much easier to query your structured logs for + particular values by filtering on the contents of a particular key -- + think searching request logs for error codes, Kubernetes reconcilers for + the name and namespace of the reconciled object, etc. + +- **Structured logging makes it easier to have cross-referenceable logs**: + Similarly to searchability, if you maintain conventions around your + keys, it becomes easy to gather all log lines related to a particular + concept. + +- **Structured logs allow better dimensions of filtering**: if you have + structure to your logs, you've got more precise control over how much + information is logged -- you might choose in a particular configuration + to log certain keys but not others, only log lines where a certain key + matches a certain value, etc., instead of just having v-levels and names + to key off of. + +- **Structured logs better represent structured data**: sometimes, the + data that you want to log is inherently structured (think tuple-link + objects.) Structured logs allow you to preserve that structure when + outputting. + +#### Why V-levels? + +**V-levels give operators an easy way to control the chattiness of log +operations**. V-levels provide a way for a given package to distinguish +the relative importance or verbosity of a given log message. Then, if +a particular logger or package is logging too many messages, the user +of the package can simply change the v-levels for that library. + +#### Why not named levels, like Info/Warning/Error? + +Read [Dave Cheney's post][warning-makes-no-sense]. Then read [Differences +from Dave's ideas](#differences-from-daves-ideas). + +#### Why not allow format strings, too? + +**Format strings negate many of the benefits of structured logs**: + +- They're not easily searchable without resorting to fuzzy searching, + regular expressions, etc. + +- They don't store structured data well, since contents are flattened into + a string. + +- They're not cross-referenceable. + +- They don't compress easily, since the message is not constant. + +(Unless you turn positional parameters into key-value pairs with numerical +keys, at which point you've gotten key-value logging with meaningless +keys.) + +### Practical + +#### Why key-value pairs, and not a map? + +Key-value pairs are *much* easier to optimize, especially around +allocations. Zap (a structured logger that inspired logr's interface) has +[performance measurements](https://github.com/uber-go/zap#performance) +that show this quite nicely. + +While the interface ends up being a little less obvious, you get +potentially better performance, plus avoid making users type +`map[string]string{}` every time they want to log. + +#### What if my V-levels differ between libraries? + +That's fine. Control your V-levels on a per-logger basis, and use the +`WithName` method to pass different loggers to different libraries. + +Generally, you should take care to ensure that you have relatively +consistent V-levels within a given logger, however, as this makes deciding +on what verbosity of logs to request easier. + +#### But I really want to use a format string! + +That's not actually a question. Assuming your question is "how do +I convert my mental model of logging with format strings to logging with +constant messages": + +1. Figure out what the error actually is, as you'd write in a TL;DR style, + and use that as a message. + +2. For every place you'd write a format specifier, look to the word before + it, and add that as a key value pair. + +For instance, consider the following examples (all taken from spots in the +Kubernetes codebase): + +- `klog.V(4).Infof("Client is returning errors: code %v, error %v", + responseCode, err)` becomes `logger.Error(err, "client returned an + error", "code", responseCode)` + +- `klog.V(4).Infof("Got a Retry-After %ds response for attempt %d to %v", + seconds, retries, url)` becomes `logger.V(4).Info("got a retry-after + response when requesting url", "attempt", retries, "after + seconds", seconds, "url", url)` + +If you *really* must use a format string, use it in a key's value, and +call `fmt.Sprintf` yourself. For instance: `log.Printf("unable to +reflect over type %T")` becomes `logger.Info("unable to reflect over +type", "type", fmt.Sprintf("%T"))`. In general though, the cases where +this is necessary should be few and far between. + +#### How do I choose my V-levels? + +This is basically the only hard constraint: increase V-levels to denote +more verbose or more debug-y logs. + +Otherwise, you can start out with `0` as "you always want to see this", +`1` as "common logging that you might *possibly* want to turn off", and +`10` as "I would like to performance-test your log collection stack." + +Then gradually choose levels in between as you need them, working your way +down from 10 (for debug and trace style logs) and up from 1 (for chattier +info-type logs.) + +#### How do I choose my keys? + +Keys are fairly flexible, and can hold more or less any string +value. For best compatibility with implementations and consistency +with existing code in other projects, there are a few conventions you +should consider. + +- Make your keys human-readable. +- Constant keys are generally a good idea. +- Be consistent across your codebase. +- Keys should naturally match parts of the message string. +- Use lower case for simple keys and + [lowerCamelCase](https://en.wiktionary.org/wiki/lowerCamelCase) for + more complex ones. Kubernetes is one example of a project that has + [adopted that + convention](https://github.com/kubernetes/community/blob/HEAD/contributors/devel/sig-instrumentation/migration-to-structured-logging.md#name-arguments). + +While key names are mostly unrestricted (and spaces are acceptable), +it's generally a good idea to stick to printable ascii characters, or at +least match the general character set of your log lines. + +#### Why should keys be constant values? + +The point of structured logging is to make later log processing easier. Your +keys are, effectively, the schema of each log message. If you use different +keys across instances of the same log line, you will make your structured logs +much harder to use. `Sprintf()` is for values, not for keys! + +#### Why is this not a pure interface? + +The Logger type is implemented as a struct in order to allow the Go compiler to +optimize things like high-V `Info` logs that are not triggered. Not all of +these implementations are implemented yet, but this structure was suggested as +a way to ensure they *can* be implemented. All of the real work is behind the +`LogSink` interface. + +[warning-makes-no-sense]: http://dave.cheney.net/2015/11/05/lets-talk-about-logging diff --git a/vendor/github.com/go-logr/logr/discard.go b/vendor/github.com/go-logr/logr/discard.go new file mode 100644 index 0000000000..9d92a38f1d --- /dev/null +++ b/vendor/github.com/go-logr/logr/discard.go @@ -0,0 +1,54 @@ +/* +Copyright 2020 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package logr + +// Discard returns a Logger that discards all messages logged to it. It can be +// used whenever the caller is not interested in the logs. Logger instances +// produced by this function always compare as equal. +func Discard() Logger { + return Logger{ + level: 0, + sink: discardLogSink{}, + } +} + +// discardLogSink is a LogSink that discards all messages. +type discardLogSink struct{} + +// Verify that it actually implements the interface +var _ LogSink = discardLogSink{} + +func (l discardLogSink) Init(RuntimeInfo) { +} + +func (l discardLogSink) Enabled(int) bool { + return false +} + +func (l discardLogSink) Info(int, string, ...interface{}) { +} + +func (l discardLogSink) Error(error, string, ...interface{}) { +} + +func (l discardLogSink) WithValues(...interface{}) LogSink { + return l +} + +func (l discardLogSink) WithName(string) LogSink { + return l +} diff --git a/vendor/github.com/go-logr/logr/funcr/funcr.go b/vendor/github.com/go-logr/logr/funcr/funcr.go new file mode 100644 index 0000000000..b23ab9679a --- /dev/null +++ b/vendor/github.com/go-logr/logr/funcr/funcr.go @@ -0,0 +1,759 @@ +/* +Copyright 2021 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package funcr implements formatting of structured log messages and +// optionally captures the call site and timestamp. +// +// The simplest way to use it is via its implementation of a +// github.com/go-logr/logr.LogSink with output through an arbitrary +// "write" function. See New and NewJSON for details. +// +// Custom LogSinks +// +// For users who need more control, a funcr.Formatter can be embedded inside +// your own custom LogSink implementation. This is useful when the LogSink +// needs to implement additional methods, for example. +// +// Formatting +// +// This will respect logr.Marshaler, fmt.Stringer, and error interfaces for +// values which are being logged. When rendering a struct, funcr will use Go's +// standard JSON tags (all except "string"). +package funcr + +import ( + "bytes" + "encoding" + "fmt" + "path/filepath" + "reflect" + "runtime" + "strconv" + "strings" + "time" + + "github.com/go-logr/logr" +) + +// New returns a logr.Logger which is implemented by an arbitrary function. +func New(fn func(prefix, args string), opts Options) logr.Logger { + return logr.New(newSink(fn, NewFormatter(opts))) +} + +// NewJSON returns a logr.Logger which is implemented by an arbitrary function +// and produces JSON output. +func NewJSON(fn func(obj string), opts Options) logr.Logger { + fnWrapper := func(_, obj string) { + fn(obj) + } + return logr.New(newSink(fnWrapper, NewFormatterJSON(opts))) +} + +// Underlier exposes access to the underlying logging function. Since +// callers only have a logr.Logger, they have to know which +// implementation is in use, so this interface is less of an +// abstraction and more of a way to test type conversion. +type Underlier interface { + GetUnderlying() func(prefix, args string) +} + +func newSink(fn func(prefix, args string), formatter Formatter) logr.LogSink { + l := &fnlogger{ + Formatter: formatter, + write: fn, + } + // For skipping fnlogger.Info and fnlogger.Error. + l.Formatter.AddCallDepth(1) + return l +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // LogCaller tells funcr to add a "caller" key to some or all log lines. + // This has some overhead, so some users might not want it. + LogCaller MessageClass + + // LogCallerFunc tells funcr to also log the calling function name. This + // has no effect if caller logging is not enabled (see Options.LogCaller). + LogCallerFunc bool + + // LogTimestamp tells funcr to add a "ts" key to log lines. This has some + // overhead, so some users might not want it. + LogTimestamp bool + + // TimestampFormat tells funcr how to render timestamps when LogTimestamp + // is enabled. If not specified, a default format will be used. For more + // details, see docs for Go's time.Layout. + TimestampFormat string + + // Verbosity tells funcr which V logs to produce. Higher values enable + // more logs. Info logs at or below this level will be written, while logs + // above this level will be discarded. + Verbosity int + + // RenderBuiltinsHook allows users to mutate the list of key-value pairs + // while a log line is being rendered. The kvList argument follows logr + // conventions - each pair of slice elements is comprised of a string key + // and an arbitrary value (verified and sanitized before calling this + // hook). The value returned must follow the same conventions. This hook + // can be used to audit or modify logged data. For example, you might want + // to prefix all of funcr's built-in keys with some string. This hook is + // only called for built-in (provided by funcr itself) key-value pairs. + // Equivalent hooks are offered for key-value pairs saved via + // logr.Logger.WithValues or Formatter.AddValues (see RenderValuesHook) and + // for user-provided pairs (see RenderArgsHook). + RenderBuiltinsHook func(kvList []interface{}) []interface{} + + // RenderValuesHook is the same as RenderBuiltinsHook, except that it is + // only called for key-value pairs saved via logr.Logger.WithValues. See + // RenderBuiltinsHook for more details. + RenderValuesHook func(kvList []interface{}) []interface{} + + // RenderArgsHook is the same as RenderBuiltinsHook, except that it is only + // called for key-value pairs passed directly to Info and Error. See + // RenderBuiltinsHook for more details. + RenderArgsHook func(kvList []interface{}) []interface{} + + // MaxLogDepth tells funcr how many levels of nested fields (e.g. a struct + // that contains a struct, etc.) it may log. Every time it finds a struct, + // slice, array, or map the depth is increased by one. When the maximum is + // reached, the value will be converted to a string indicating that the max + // depth has been exceeded. If this field is not specified, a default + // value will be used. + MaxLogDepth int +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// fnlogger inherits some of its LogSink implementation from Formatter +// and just needs to add some glue code. +type fnlogger struct { + Formatter + write func(prefix, args string) +} + +func (l fnlogger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l fnlogger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l fnlogger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +func (l fnlogger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + l.write(prefix, args) +} + +func (l fnlogger) GetUnderlying() func(prefix, args string) { + return l.write +} + +// Assert conformance to the interfaces. +var _ logr.LogSink = &fnlogger{} +var _ logr.CallDepthLogSink = &fnlogger{} +var _ Underlier = &fnlogger{} + +// NewFormatter constructs a Formatter which emits a JSON-like key=value format. +func NewFormatter(opts Options) Formatter { + return newFormatter(opts, outputKeyValue) +} + +// NewFormatterJSON constructs a Formatter which emits strict JSON. +func NewFormatterJSON(opts Options) Formatter { + return newFormatter(opts, outputJSON) +} + +// Defaults for Options. +const defaultTimestampFormat = "2006-01-02 15:04:05.000000" +const defaultMaxLogDepth = 16 + +func newFormatter(opts Options, outfmt outputFormat) Formatter { + if opts.TimestampFormat == "" { + opts.TimestampFormat = defaultTimestampFormat + } + if opts.MaxLogDepth == 0 { + opts.MaxLogDepth = defaultMaxLogDepth + } + f := Formatter{ + outputFormat: outfmt, + prefix: "", + values: nil, + depth: 0, + opts: opts, + } + return f +} + +// Formatter is an opaque struct which can be embedded in a LogSink +// implementation. It should be constructed with NewFormatter. Some of +// its methods directly implement logr.LogSink. +type Formatter struct { + outputFormat outputFormat + prefix string + values []interface{} + valuesStr string + depth int + opts Options +} + +// outputFormat indicates which outputFormat to use. +type outputFormat int + +const ( + // outputKeyValue emits a JSON-like key=value format, but not strict JSON. + outputKeyValue outputFormat = iota + // outputJSON emits strict JSON. + outputJSON +) + +// PseudoStruct is a list of key-value pairs that gets logged as a struct. +type PseudoStruct []interface{} + +// render produces a log line, ready to use. +func (f Formatter) render(builtins, args []interface{}) string { + // Empirically bytes.Buffer is faster than strings.Builder for this. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + if f.outputFormat == outputJSON { + buf.WriteByte('{') + } + vals := builtins + if hook := f.opts.RenderBuiltinsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, false, false) // keys are ours, no need to escape + continuing := len(builtins) > 0 + if len(f.valuesStr) > 0 { + if continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + buf.WriteByte(' ') + } + } + continuing = true + buf.WriteString(f.valuesStr) + } + vals = args + if hook := f.opts.RenderArgsHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + f.flatten(buf, vals, continuing, true) // escape user-provided keys + if f.outputFormat == outputJSON { + buf.WriteByte('}') + } + return buf.String() +} + +// flatten renders a list of key-value pairs into a buffer. If continuing is +// true, it assumes that the buffer has previous values and will emit a +// separator (which depends on the output format) before the first pair it +// writes. If escapeKeys is true, the keys are assumed to have +// non-JSON-compatible characters in them and must be evaluated for escapes. +// +// This function returns a potentially modified version of kvList, which +// ensures that there is a value for every key (adding a value if needed) and +// that each key is a string (substituting a key if needed). +func (f Formatter) flatten(buf *bytes.Buffer, kvList []interface{}, continuing bool, escapeKeys bool) []interface{} { + // This logic overlaps with sanitize() but saves one type-cast per key, + // which can be measurable. + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + k, ok := kvList[i].(string) + if !ok { + k = f.nonStringKey(kvList[i]) + kvList[i] = k + } + v := kvList[i+1] + + if i > 0 || continuing { + if f.outputFormat == outputJSON { + buf.WriteByte(',') + } else { + // In theory the format could be something we don't understand. In + // practice, we control it, so it won't be. + buf.WriteByte(' ') + } + } + + if escapeKeys { + buf.WriteString(prettyString(k)) + } else { + // this is faster + buf.WriteByte('"') + buf.WriteString(k) + buf.WriteByte('"') + } + if f.outputFormat == outputJSON { + buf.WriteByte(':') + } else { + buf.WriteByte('=') + } + buf.WriteString(f.pretty(v)) + } + return kvList +} + +func (f Formatter) pretty(value interface{}) string { + return f.prettyWithFlags(value, 0, 0) +} + +const ( + flagRawStruct = 0x1 // do not print braces on structs +) + +// TODO: This is not fast. Most of the overhead goes here. +func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) string { + if depth > f.opts.MaxLogDepth { + return `""` + } + + // Handle types that take full control of logging. + if v, ok := value.(logr.Marshaler); ok { + // Replace the value with what the type wants to get logged. + // That then gets handled below via reflection. + value = v.MarshalLog() + } + + // Handle types that want to format themselves. + switch v := value.(type) { + case fmt.Stringer: + value = v.String() + case error: + value = v.Error() + } + + // Handling the most common types without reflect is a small perf win. + switch v := value.(type) { + case bool: + return strconv.FormatBool(v) + case string: + return prettyString(v) + case int: + return strconv.FormatInt(int64(v), 10) + case int8: + return strconv.FormatInt(int64(v), 10) + case int16: + return strconv.FormatInt(int64(v), 10) + case int32: + return strconv.FormatInt(int64(v), 10) + case int64: + return strconv.FormatInt(int64(v), 10) + case uint: + return strconv.FormatUint(uint64(v), 10) + case uint8: + return strconv.FormatUint(uint64(v), 10) + case uint16: + return strconv.FormatUint(uint64(v), 10) + case uint32: + return strconv.FormatUint(uint64(v), 10) + case uint64: + return strconv.FormatUint(v, 10) + case uintptr: + return strconv.FormatUint(uint64(v), 10) + case float32: + return strconv.FormatFloat(float64(v), 'f', -1, 32) + case float64: + return strconv.FormatFloat(v, 'f', -1, 64) + case complex64: + return `"` + strconv.FormatComplex(complex128(v), 'f', -1, 64) + `"` + case complex128: + return `"` + strconv.FormatComplex(v, 'f', -1, 128) + `"` + case PseudoStruct: + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + v = f.sanitize(v) + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < len(v); i += 2 { + if i > 0 { + buf.WriteByte(',') + } + // arbitrary keys might need escaping + buf.WriteString(prettyString(v[i].(string))) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + } + + buf := bytes.NewBuffer(make([]byte, 0, 256)) + t := reflect.TypeOf(value) + if t == nil { + return "null" + } + v := reflect.ValueOf(value) + switch t.Kind() { + case reflect.Bool: + return strconv.FormatBool(v.Bool()) + case reflect.String: + return prettyString(v.String()) + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return strconv.FormatInt(int64(v.Int()), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return strconv.FormatUint(uint64(v.Uint()), 10) + case reflect.Float32: + return strconv.FormatFloat(float64(v.Float()), 'f', -1, 32) + case reflect.Float64: + return strconv.FormatFloat(v.Float(), 'f', -1, 64) + case reflect.Complex64: + return `"` + strconv.FormatComplex(complex128(v.Complex()), 'f', -1, 64) + `"` + case reflect.Complex128: + return `"` + strconv.FormatComplex(v.Complex(), 'f', -1, 128) + `"` + case reflect.Struct: + if flags&flagRawStruct == 0 { + buf.WriteByte('{') + } + for i := 0; i < t.NumField(); i++ { + fld := t.Field(i) + if fld.PkgPath != "" { + // reflect says this field is only defined for non-exported fields. + continue + } + if !v.Field(i).CanInterface() { + // reflect isn't clear exactly what this means, but we can't use it. + continue + } + name := "" + omitempty := false + if tag, found := fld.Tag.Lookup("json"); found { + if tag == "-" { + continue + } + if comma := strings.Index(tag, ","); comma != -1 { + if n := tag[:comma]; n != "" { + name = n + } + rest := tag[comma:] + if strings.Contains(rest, ",omitempty,") || strings.HasSuffix(rest, ",omitempty") { + omitempty = true + } + } else { + name = tag + } + } + if omitempty && isEmpty(v.Field(i)) { + continue + } + if i > 0 { + buf.WriteByte(',') + } + if fld.Anonymous && fld.Type.Kind() == reflect.Struct && name == "" { + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), flags|flagRawStruct, depth+1)) + continue + } + if name == "" { + name = fld.Name + } + // field names can't contain characters which need escaping + buf.WriteByte('"') + buf.WriteString(name) + buf.WriteByte('"') + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(v.Field(i).Interface(), 0, depth+1)) + } + if flags&flagRawStruct == 0 { + buf.WriteByte('}') + } + return buf.String() + case reflect.Slice, reflect.Array: + buf.WriteByte('[') + for i := 0; i < v.Len(); i++ { + if i > 0 { + buf.WriteByte(',') + } + e := v.Index(i) + buf.WriteString(f.prettyWithFlags(e.Interface(), 0, depth+1)) + } + buf.WriteByte(']') + return buf.String() + case reflect.Map: + buf.WriteByte('{') + // This does not sort the map keys, for best perf. + it := v.MapRange() + i := 0 + for it.Next() { + if i > 0 { + buf.WriteByte(',') + } + // If a map key supports TextMarshaler, use it. + keystr := "" + if m, ok := it.Key().Interface().(encoding.TextMarshaler); ok { + txt, err := m.MarshalText() + if err != nil { + keystr = fmt.Sprintf("", err.Error()) + } else { + keystr = string(txt) + } + keystr = prettyString(keystr) + } else { + // prettyWithFlags will produce already-escaped values + keystr = f.prettyWithFlags(it.Key().Interface(), 0, depth+1) + if t.Key().Kind() != reflect.String { + // JSON only does string keys. Unlike Go's standard JSON, we'll + // convert just about anything to a string. + keystr = prettyString(keystr) + } + } + buf.WriteString(keystr) + buf.WriteByte(':') + buf.WriteString(f.prettyWithFlags(it.Value().Interface(), 0, depth+1)) + i++ + } + buf.WriteByte('}') + return buf.String() + case reflect.Ptr, reflect.Interface: + if v.IsNil() { + return "null" + } + return f.prettyWithFlags(v.Elem().Interface(), 0, depth) + } + return fmt.Sprintf(`""`, t.Kind().String()) +} + +func prettyString(s string) string { + // Avoid escaping (which does allocations) if we can. + if needsEscape(s) { + return strconv.Quote(s) + } + b := bytes.NewBuffer(make([]byte, 0, 1024)) + b.WriteByte('"') + b.WriteString(s) + b.WriteByte('"') + return b.String() +} + +// needsEscape determines whether the input string needs to be escaped or not, +// without doing any allocations. +func needsEscape(s string) bool { + for _, r := range s { + if !strconv.IsPrint(r) || r == '\\' || r == '"' { + return true + } + } + return false +} + +func isEmpty(v reflect.Value) bool { + switch v.Kind() { + case reflect.Array, reflect.Map, reflect.Slice, reflect.String: + return v.Len() == 0 + case reflect.Bool: + return !v.Bool() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.Complex64, reflect.Complex128: + return v.Complex() == 0 + case reflect.Interface, reflect.Ptr: + return v.IsNil() + } + return false +} + +// Caller represents the original call site for a log line, after considering +// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and +// Line fields will always be provided, while the Func field is optional. +// Users can set the render hook fields in Options to examine logged key-value +// pairs, one of which will be {"caller", Caller} if the Options.LogCaller +// field is enabled for the given MessageClass. +type Caller struct { + // File is the basename of the file for this call site. + File string `json:"file"` + // Line is the line number in the file for this call site. + Line int `json:"line"` + // Func is the function name for this call site, or empty if + // Options.LogCallerFunc is not enabled. + Func string `json:"function,omitempty"` +} + +func (f Formatter) caller() Caller { + // +1 for this frame, +1 for Info/Error. + pc, file, line, ok := runtime.Caller(f.depth + 2) + if !ok { + return Caller{"", 0, ""} + } + fn := "" + if f.opts.LogCallerFunc { + if fp := runtime.FuncForPC(pc); fp != nil { + fn = fp.Name() + } + } + + return Caller{filepath.Base(file), line, fn} +} + +const noValue = "" + +func (f Formatter) nonStringKey(v interface{}) string { + return fmt.Sprintf("", f.snippet(v)) +} + +// snippet produces a short snippet string of an arbitrary value. +func (f Formatter) snippet(v interface{}) string { + const snipLen = 16 + + snip := f.pretty(v) + if len(snip) > snipLen { + snip = snip[:snipLen] + } + return snip +} + +// sanitize ensures that a list of key-value pairs has a value for every key +// (adding a value if needed) and that each key is a string (substituting a key +// if needed). +func (f Formatter) sanitize(kvList []interface{}) []interface{} { + if len(kvList)%2 != 0 { + kvList = append(kvList, noValue) + } + for i := 0; i < len(kvList); i += 2 { + _, ok := kvList[i].(string) + if !ok { + kvList[i] = f.nonStringKey(kvList[i]) + } + } + return kvList +} + +// Init configures this Formatter from runtime info, such as the call depth +// imposed by logr itself. +// Note that this receiver is a pointer, so depth can be saved. +func (f *Formatter) Init(info logr.RuntimeInfo) { + f.depth += info.CallDepth +} + +// Enabled checks whether an info message at the given level should be logged. +func (f Formatter) Enabled(level int) bool { + return level <= f.opts.Verbosity +} + +// GetDepth returns the current depth of this Formatter. This is useful for +// implementations which do their own caller attribution. +func (f Formatter) GetDepth() int { + return f.depth +} + +// FormatInfo renders an Info log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatInfo(level int, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Info { + args = append(args, "caller", f.caller()) + } + args = append(args, "level", level, "msg", msg) + return prefix, f.render(args, kvList) +} + +// FormatError renders an Error log message into strings. The prefix will be +// empty when no names were set (via AddNames), or when the output is +// configured for JSON. +func (f Formatter) FormatError(err error, msg string, kvList []interface{}) (prefix, argsStr string) { + args := make([]interface{}, 0, 64) // using a constant here impacts perf + prefix = f.prefix + if f.outputFormat == outputJSON { + args = append(args, "logger", prefix) + prefix = "" + } + if f.opts.LogTimestamp { + args = append(args, "ts", time.Now().Format(f.opts.TimestampFormat)) + } + if policy := f.opts.LogCaller; policy == All || policy == Error { + args = append(args, "caller", f.caller()) + } + args = append(args, "msg", msg) + var loggableErr interface{} + if err != nil { + loggableErr = err.Error() + } + args = append(args, "error", loggableErr) + return f.prefix, f.render(args, kvList) +} + +// AddName appends the specified name. funcr uses '/' characters to separate +// name elements. Callers should not pass '/' in the provided name string, but +// this library does not actually enforce that. +func (f *Formatter) AddName(name string) { + if len(f.prefix) > 0 { + f.prefix += "/" + } + f.prefix += name +} + +// AddValues adds key-value pairs to the set of saved values to be logged with +// each log line. +func (f *Formatter) AddValues(kvList []interface{}) { + // Three slice args forces a copy. + n := len(f.values) + f.values = append(f.values[:n:n], kvList...) + + vals := f.values + if hook := f.opts.RenderValuesHook; hook != nil { + vals = hook(f.sanitize(vals)) + } + + // Pre-render values, so we don't have to do it on each Info/Error call. + buf := bytes.NewBuffer(make([]byte, 0, 1024)) + f.flatten(buf, vals, false, true) // escape user-provided keys + f.valuesStr = buf.String() +} + +// AddCallDepth increases the number of stack-frames to skip when attributing +// the log line to a file and line. +func (f *Formatter) AddCallDepth(depth int) { + f.depth += depth +} diff --git a/vendor/github.com/go-logr/logr/logr.go b/vendor/github.com/go-logr/logr/logr.go new file mode 100644 index 0000000000..c05482a203 --- /dev/null +++ b/vendor/github.com/go-logr/logr/logr.go @@ -0,0 +1,501 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// This design derives from Dave Cheney's blog: +// http://dave.cheney.net/2015/11/05/lets-talk-about-logging + +// Package logr defines a general-purpose logging API and abstract interfaces +// to back that API. Packages in the Go ecosystem can depend on this package, +// while callers can implement logging with whatever backend is appropriate. +// +// Usage +// +// Logging is done using a Logger instance. Logger is a concrete type with +// methods, which defers the actual logging to a LogSink interface. The main +// methods of Logger are Info() and Error(). Arguments to Info() and Error() +// are key/value pairs rather than printf-style formatted strings, emphasizing +// "structured logging". +// +// With Go's standard log package, we might write: +// log.Printf("setting target value %s", targetValue) +// +// With logr's structured logging, we'd write: +// logger.Info("setting target", "value", targetValue) +// +// Errors are much the same. Instead of: +// log.Printf("failed to open the pod bay door for user %s: %v", user, err) +// +// We'd write: +// logger.Error(err, "failed to open the pod bay door", "user", user) +// +// Info() and Error() are very similar, but they are separate methods so that +// LogSink implementations can choose to do things like attach additional +// information (such as stack traces) on calls to Error(). Error() messages are +// always logged, regardless of the current verbosity. If there is no error +// instance available, passing nil is valid. +// +// Verbosity +// +// Often we want to log information only when the application in "verbose +// mode". To write log lines that are more verbose, Logger has a V() method. +// The higher the V-level of a log line, the less critical it is considered. +// Log-lines with V-levels that are not enabled (as per the LogSink) will not +// be written. Level V(0) is the default, and logger.V(0).Info() has the same +// meaning as logger.Info(). Negative V-levels have the same meaning as V(0). +// Error messages do not have a verbosity level and are always logged. +// +// Where we might have written: +// if flVerbose >= 2 { +// log.Printf("an unusual thing happened") +// } +// +// We can write: +// logger.V(2).Info("an unusual thing happened") +// +// Logger Names +// +// Logger instances can have name strings so that all messages logged through +// that instance have additional context. For example, you might want to add +// a subsystem name: +// +// logger.WithName("compactor").Info("started", "time", time.Now()) +// +// The WithName() method returns a new Logger, which can be passed to +// constructors or other functions for further use. Repeated use of WithName() +// will accumulate name "segments". These name segments will be joined in some +// way by the LogSink implementation. It is strongly recommended that name +// segments contain simple identifiers (letters, digits, and hyphen), and do +// not contain characters that could muddle the log output or confuse the +// joining operation (e.g. whitespace, commas, periods, slashes, brackets, +// quotes, etc). +// +// Saved Values +// +// Logger instances can store any number of key/value pairs, which will be +// logged alongside all messages logged through that instance. For example, +// you might want to create a Logger instance per managed object: +// +// With the standard log package, we might write: +// log.Printf("decided to set field foo to value %q for object %s/%s", +// targetValue, object.Namespace, object.Name) +// +// With logr we'd write: +// // Elsewhere: set up the logger to log the object name. +// obj.logger = mainLogger.WithValues( +// "name", obj.name, "namespace", obj.namespace) +// +// // later on... +// obj.logger.Info("setting foo", "value", targetValue) +// +// Best Practices +// +// Logger has very few hard rules, with the goal that LogSink implementations +// might have a lot of freedom to differentiate. There are, however, some +// things to consider. +// +// The log message consists of a constant message attached to the log line. +// This should generally be a simple description of what's occurring, and should +// never be a format string. Variable information can then be attached using +// named values. +// +// Keys are arbitrary strings, but should generally be constant values. Values +// may be any Go value, but how the value is formatted is determined by the +// LogSink implementation. +// +// Key Naming Conventions +// +// Keys are not strictly required to conform to any specification or regex, but +// it is recommended that they: +// * be human-readable and meaningful (not auto-generated or simple ordinals) +// * be constant (not dependent on input data) +// * contain only printable characters +// * not contain whitespace or punctuation +// * use lower case for simple keys and lowerCamelCase for more complex ones +// +// These guidelines help ensure that log data is processed properly regardless +// of the log implementation. For example, log implementations will try to +// output JSON data or will store data for later database (e.g. SQL) queries. +// +// While users are generally free to use key names of their choice, it's +// generally best to avoid using the following keys, as they're frequently used +// by implementations: +// * "caller": the calling information (file/line) of a particular log line +// * "error": the underlying error value in the `Error` method +// * "level": the log level +// * "logger": the name of the associated logger +// * "msg": the log message +// * "stacktrace": the stack trace associated with a particular log line or +// error (often from the `Error` message) +// * "ts": the timestamp for a log line +// +// Implementations are encouraged to make use of these keys to represent the +// above concepts, when necessary (for example, in a pure-JSON output form, it +// would be necessary to represent at least message and timestamp as ordinary +// named values). +// +// Break Glass +// +// Implementations may choose to give callers access to the underlying +// logging implementation. The recommended pattern for this is: +// // Underlier exposes access to the underlying logging implementation. +// // Since callers only have a logr.Logger, they have to know which +// // implementation is in use, so this interface is less of an abstraction +// // and more of way to test type conversion. +// type Underlier interface { +// GetUnderlying() +// } +// +// Logger grants access to the sink to enable type assertions like this: +// func DoSomethingWithImpl(log logr.Logger) { +// if underlier, ok := log.GetSink()(impl.Underlier) { +// implLogger := underlier.GetUnderlying() +// ... +// } +// } +// +// Custom `With*` functions can be implemented by copying the complete +// Logger struct and replacing the sink in the copy: +// // WithFooBar changes the foobar parameter in the log sink and returns a +// // new logger with that modified sink. It does nothing for loggers where +// // the sink doesn't support that parameter. +// func WithFoobar(log logr.Logger, foobar int) logr.Logger { +// if foobarLogSink, ok := log.GetSink()(FoobarSink); ok { +// log = log.WithSink(foobarLogSink.WithFooBar(foobar)) +// } +// return log +// } +// +// Don't use New to construct a new Logger with a LogSink retrieved from an +// existing Logger. Source code attribution might not work correctly and +// unexported fields in Logger get lost. +// +// Beware that the same LogSink instance may be shared by different logger +// instances. Calling functions that modify the LogSink will affect all of +// those. +package logr + +import ( + "context" +) + +// New returns a new Logger instance. This is primarily used by libraries +// implementing LogSink, rather than end users. +func New(sink LogSink) Logger { + logger := Logger{} + logger.setSink(sink) + sink.Init(runtimeInfo) + return logger +} + +// setSink stores the sink and updates any related fields. It mutates the +// logger and thus is only safe to use for loggers that are not currently being +// used concurrently. +func (l *Logger) setSink(sink LogSink) { + l.sink = sink +} + +// GetSink returns the stored sink. +func (l Logger) GetSink() LogSink { + return l.sink +} + +// WithSink returns a copy of the logger with the new sink. +func (l Logger) WithSink(sink LogSink) Logger { + l.setSink(sink) + return l +} + +// Logger is an interface to an abstract logging implementation. This is a +// concrete type for performance reasons, but all the real work is passed on to +// a LogSink. Implementations of LogSink should provide their own constructors +// that return Logger, not LogSink. +// +// The underlying sink can be accessed through GetSink and be modified through +// WithSink. This enables the implementation of custom extensions (see "Break +// Glass" in the package documentation). Normally the sink should be used only +// indirectly. +type Logger struct { + sink LogSink + level int +} + +// Enabled tests whether this Logger is enabled. For example, commandline +// flags might be used to set the logging verbosity and disable some info logs. +func (l Logger) Enabled() bool { + return l.sink.Enabled(l.level) +} + +// Info logs a non-error message with the given key/value pairs as context. +// +// The msg argument should be used to add some constant description to the log +// line. The key/value pairs can then be used to add additional variable +// information. The key/value pairs must alternate string keys and arbitrary +// values. +func (l Logger) Info(msg string, keysAndValues ...interface{}) { + if l.Enabled() { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Info(l.level, msg, keysAndValues...) + } +} + +// Error logs an error, with the given message and key/value pairs as context. +// It functions similarly to Info, but may have unique behavior, and should be +// preferred for logging errors (see the package documentations for more +// information). The log message will always be emitted, regardless of +// verbosity level. +// +// The msg argument should be used to add context to any underlying error, +// while the err argument should be used to attach the actual error that +// triggered this log line, if present. The err parameter is optional +// and nil may be passed instead of an error instance. +func (l Logger) Error(err error, msg string, keysAndValues ...interface{}) { + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + withHelper.GetCallStackHelper()() + } + l.sink.Error(err, msg, keysAndValues...) +} + +// V returns a new Logger instance for a specific verbosity level, relative to +// this Logger. In other words, V-levels are additive. A higher verbosity +// level means a log message is less important. Negative V-levels are treated +// as 0. +func (l Logger) V(level int) Logger { + if level < 0 { + level = 0 + } + l.level += level + return l +} + +// WithValues returns a new Logger instance with additional key/value pairs. +// See Info for documentation on how key/value pairs work. +func (l Logger) WithValues(keysAndValues ...interface{}) Logger { + l.setSink(l.sink.WithValues(keysAndValues...)) + return l +} + +// WithName returns a new Logger instance with the specified name element added +// to the Logger's name. Successive calls with WithName append additional +// suffixes to the Logger's name. It's strongly recommended that name segments +// contain only letters, digits, and hyphens (see the package documentation for +// more information). +func (l Logger) WithName(name string) Logger { + l.setSink(l.sink.WithName(name)) + return l +} + +// WithCallDepth returns a Logger instance that offsets the call stack by the +// specified number of frames when logging call site information, if possible. +// This is useful for users who have helper functions between the "real" call +// site and the actual calls to Logger methods. If depth is 0 the attribution +// should be to the direct caller of this function. If depth is 1 the +// attribution should skip 1 call frame, and so on. Successive calls to this +// are additive. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// it will be called and the result returned. If the implementation does not +// support CallDepthLogSink, the original Logger will be returned. +// +// To skip one level, WithCallStackHelper() should be used instead of +// WithCallDepth(1) because it works with implementions that support the +// CallDepthLogSink and/or CallStackHelperLogSink interfaces. +func (l Logger) WithCallDepth(depth int) Logger { + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(depth)) + } + return l +} + +// WithCallStackHelper returns a new Logger instance that skips the direct +// caller when logging call site information, if possible. This is useful for +// users who have helper functions between the "real" call site and the actual +// calls to Logger methods and want to support loggers which depend on marking +// each individual helper function, like loggers based on testing.T. +// +// In addition to using that new logger instance, callers also must call the +// returned function. +// +// If the underlying log implementation supports a WithCallDepth(int) method, +// WithCallDepth(1) will be called to produce a new logger. If it supports a +// WithCallStackHelper() method, that will be also called. If the +// implementation does not support either of these, the original Logger will be +// returned. +func (l Logger) WithCallStackHelper() (func(), Logger) { + var helper func() + if withCallDepth, ok := l.sink.(CallDepthLogSink); ok { + l.setSink(withCallDepth.WithCallDepth(1)) + } + if withHelper, ok := l.sink.(CallStackHelperLogSink); ok { + helper = withHelper.GetCallStackHelper() + } else { + helper = func() {} + } + return helper, l +} + +// contextKey is how we find Loggers in a context.Context. +type contextKey struct{} + +// FromContext returns a Logger from ctx or an error if no Logger is found. +func FromContext(ctx context.Context) (Logger, error) { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v, nil + } + + return Logger{}, notFoundError{} +} + +// notFoundError exists to carry an IsNotFound method. +type notFoundError struct{} + +func (notFoundError) Error() string { + return "no logr.Logger was present" +} + +func (notFoundError) IsNotFound() bool { + return true +} + +// FromContextOrDiscard returns a Logger from ctx. If no Logger is found, this +// returns a Logger that discards all log messages. +func FromContextOrDiscard(ctx context.Context) Logger { + if v, ok := ctx.Value(contextKey{}).(Logger); ok { + return v + } + + return Discard() +} + +// NewContext returns a new Context, derived from ctx, which carries the +// provided Logger. +func NewContext(ctx context.Context, logger Logger) context.Context { + return context.WithValue(ctx, contextKey{}, logger) +} + +// RuntimeInfo holds information that the logr "core" library knows which +// LogSinks might want to know. +type RuntimeInfo struct { + // CallDepth is the number of call frames the logr library adds between the + // end-user and the LogSink. LogSink implementations which choose to print + // the original logging site (e.g. file & line) should climb this many + // additional frames to find it. + CallDepth int +} + +// runtimeInfo is a static global. It must not be changed at run time. +var runtimeInfo = RuntimeInfo{ + CallDepth: 1, +} + +// LogSink represents a logging implementation. End-users will generally not +// interact with this type. +type LogSink interface { + // Init receives optional information about the logr library for LogSink + // implementations that need it. + Init(info RuntimeInfo) + + // Enabled tests whether this LogSink is enabled at the specified V-level. + // For example, commandline flags might be used to set the logging + // verbosity and disable some info logs. + Enabled(level int) bool + + // Info logs a non-error message with the given key/value pairs as context. + // The level argument is provided for optional logging. This method will + // only be called when Enabled(level) is true. See Logger.Info for more + // details. + Info(level int, msg string, keysAndValues ...interface{}) + + // Error logs an error, with the given message and key/value pairs as + // context. See Logger.Error for more details. + Error(err error, msg string, keysAndValues ...interface{}) + + // WithValues returns a new LogSink with additional key/value pairs. See + // Logger.WithValues for more details. + WithValues(keysAndValues ...interface{}) LogSink + + // WithName returns a new LogSink with the specified name appended. See + // Logger.WithName for more details. + WithName(name string) LogSink +} + +// CallDepthLogSink represents a Logger that knows how to climb the call stack +// to identify the original call site and can offset the depth by a specified +// number of frames. This is useful for users who have helper functions +// between the "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as file, +// function, or line) would otherwise log information about the intermediate +// helper functions. +// +// This is an optional interface and implementations are not required to +// support it. +type CallDepthLogSink interface { + // WithCallDepth returns a LogSink that will offset the call + // stack by the specified number of frames when logging call + // site information. + // + // If depth is 0, the LogSink should skip exactly the number + // of call frames defined in RuntimeInfo.CallDepth when Info + // or Error are called, i.e. the attribution should be to the + // direct caller of Logger.Info or Logger.Error. + // + // If depth is 1 the attribution should skip 1 call frame, and so on. + // Successive calls to this are additive. + WithCallDepth(depth int) LogSink +} + +// CallStackHelperLogSink represents a Logger that knows how to climb +// the call stack to identify the original call site and can skip +// intermediate helper functions if they mark themselves as +// helper. Go's testing package uses that approach. +// +// This is useful for users who have helper functions between the +// "real" call site and the actual calls to Logger methods. +// Implementations that log information about the call site (such as +// file, function, or line) would otherwise log information about the +// intermediate helper functions. +// +// This is an optional interface and implementations are not required +// to support it. Implementations that choose to support this must not +// simply implement it as WithCallDepth(1), because +// Logger.WithCallStackHelper will call both methods if they are +// present. This should only be implemented for LogSinks that actually +// need it, as with testing.T. +type CallStackHelperLogSink interface { + // GetCallStackHelper returns a function that must be called + // to mark the direct caller as helper function when logging + // call site information. + GetCallStackHelper() func() +} + +// Marshaler is an optional interface that logged values may choose to +// implement. Loggers with structured output, such as JSON, should +// log the object return by the MarshalLog method instead of the +// original value. +type Marshaler interface { + // MarshalLog can be used to: + // - ensure that structs are not logged as strings when the original + // value has a String method: return a different type without a + // String method + // - select which fields of a complex type should get logged: + // return a simpler struct with fewer fields + // - log unexported fields: return a different struct + // with exported fields + // + // It may return any value of any type. + MarshalLog() interface{} +} diff --git a/vendor/github.com/go-logr/stdr/LICENSE b/vendor/github.com/go-logr/stdr/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/github.com/go-logr/stdr/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/go-logr/stdr/README.md b/vendor/github.com/go-logr/stdr/README.md new file mode 100644 index 0000000000..5158667890 --- /dev/null +++ b/vendor/github.com/go-logr/stdr/README.md @@ -0,0 +1,6 @@ +# Minimal Go logging using logr and Go's standard library + +[![Go Reference](https://pkg.go.dev/badge/github.com/go-logr/stdr.svg)](https://pkg.go.dev/github.com/go-logr/stdr) + +This package implements the [logr interface](https://github.com/go-logr/logr) +in terms of Go's standard log package(https://pkg.go.dev/log). diff --git a/vendor/github.com/go-logr/stdr/stdr.go b/vendor/github.com/go-logr/stdr/stdr.go new file mode 100644 index 0000000000..93a8aab51b --- /dev/null +++ b/vendor/github.com/go-logr/stdr/stdr.go @@ -0,0 +1,170 @@ +/* +Copyright 2019 The logr Authors. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +// Package stdr implements github.com/go-logr/logr.Logger in terms of +// Go's standard log package. +package stdr + +import ( + "log" + "os" + + "github.com/go-logr/logr" + "github.com/go-logr/logr/funcr" +) + +// The global verbosity level. See SetVerbosity(). +var globalVerbosity int + +// SetVerbosity sets the global level against which all info logs will be +// compared. If this is greater than or equal to the "V" of the logger, the +// message will be logged. A higher value here means more logs will be written. +// The previous verbosity value is returned. This is not concurrent-safe - +// callers must be sure to call it from only one goroutine. +func SetVerbosity(v int) int { + old := globalVerbosity + globalVerbosity = v + return old +} + +// New returns a logr.Logger which is implemented by Go's standard log package, +// or something like it. If std is nil, this will use a default logger +// instead. +// +// Example: stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile))) +func New(std StdLogger) logr.Logger { + return NewWithOptions(std, Options{}) +} + +// NewWithOptions returns a logr.Logger which is implemented by Go's standard +// log package, or something like it. See New for details. +func NewWithOptions(std StdLogger, opts Options) logr.Logger { + if std == nil { + // Go's log.Default() is only available in 1.16 and higher. + std = log.New(os.Stderr, "", log.LstdFlags) + } + + if opts.Depth < 0 { + opts.Depth = 0 + } + + fopts := funcr.Options{ + LogCaller: funcr.MessageClass(opts.LogCaller), + } + + sl := &logger{ + Formatter: funcr.NewFormatter(fopts), + std: std, + } + + // For skipping our own logger.Info/Error. + sl.Formatter.AddCallDepth(1 + opts.Depth) + + return logr.New(sl) +} + +// Options carries parameters which influence the way logs are generated. +type Options struct { + // Depth biases the assumed number of call frames to the "true" caller. + // This is useful when the calling code calls a function which then calls + // stdr (e.g. a logging shim to another API). Values less than zero will + // be treated as zero. + Depth int + + // LogCaller tells stdr to add a "caller" key to some or all log lines. + // Go's log package has options to log this natively, too. + LogCaller MessageClass + + // TODO: add an option to log the date/time +} + +// MessageClass indicates which category or categories of messages to consider. +type MessageClass int + +const ( + // None ignores all message classes. + None MessageClass = iota + // All considers all message classes. + All + // Info only considers info messages. + Info + // Error only considers error messages. + Error +) + +// StdLogger is the subset of the Go stdlib log.Logger API that is needed for +// this adapter. +type StdLogger interface { + // Output is the same as log.Output and log.Logger.Output. + Output(calldepth int, logline string) error +} + +type logger struct { + funcr.Formatter + std StdLogger +} + +var _ logr.LogSink = &logger{} +var _ logr.CallDepthLogSink = &logger{} + +func (l logger) Enabled(level int) bool { + return globalVerbosity >= level +} + +func (l logger) Info(level int, msg string, kvList ...interface{}) { + prefix, args := l.FormatInfo(level, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) Error(err error, msg string, kvList ...interface{}) { + prefix, args := l.FormatError(err, msg, kvList) + if prefix != "" { + args = prefix + ": " + args + } + _ = l.std.Output(l.Formatter.GetDepth()+1, args) +} + +func (l logger) WithName(name string) logr.LogSink { + l.Formatter.AddName(name) + return &l +} + +func (l logger) WithValues(kvList ...interface{}) logr.LogSink { + l.Formatter.AddValues(kvList) + return &l +} + +func (l logger) WithCallDepth(depth int) logr.LogSink { + l.Formatter.AddCallDepth(depth) + return &l +} + +// Underlier exposes access to the underlying logging implementation. Since +// callers only have a logr.Logger, they have to know which implementation is +// in use, so this interface is less of an abstraction and more of way to test +// type conversion. +type Underlier interface { + GetUnderlying() StdLogger +} + +// GetUnderlying returns the StdLogger underneath this logger. Since StdLogger +// is itself an interface, the result may or may not be a Go log.Logger. +func (l logger) GetUnderlying() StdLogger { + return l.std +} diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go index 1f4d4d4322..12a135dbe7 100644 --- a/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go +++ b/vendor/github.com/gogo/googleapis/google/rpc/code.pb.go @@ -21,7 +21,7 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package -// The canonical error codes for Google APIs. +// The canonical error codes for gRPC APIs. // // // Sometimes multiple error codes may apply. Services should return @@ -156,7 +156,8 @@ const ( INTERNAL Code = 13 // The service is currently unavailable. This is most likely a // transient condition, which can be corrected by retrying with - // a backoff. + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. // // See the guidelines above for deciding between `FAILED_PRECONDITION`, // `ABORTED`, and `UNAVAILABLE`. diff --git a/vendor/github.com/gogo/googleapis/google/rpc/code.proto b/vendor/github.com/gogo/googleapis/google/rpc/code.proto index 0540a4f6c5..29954b14d5 100644 --- a/vendor/github.com/gogo/googleapis/google/rpc/code.proto +++ b/vendor/github.com/gogo/googleapis/google/rpc/code.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -22,7 +22,7 @@ option java_outer_classname = "CodeProto"; option java_package = "com.google.rpc"; option objc_class_prefix = "RPC"; -// The canonical error codes for Google APIs. +// The canonical error codes for gRPC APIs. // // // Sometimes multiple error codes may apply. Services should return @@ -170,7 +170,8 @@ enum Code { // The service is currently unavailable. This is most likely a // transient condition, which can be corrected by retrying with - // a backoff. + // a backoff. Note that it is not always safe to retry + // non-idempotent operations. // // See the guidelines above for deciding between `FAILED_PRECONDITION`, // `ABORTED`, and `UNAVAILABLE`. diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go index 07d251d4b1..18accb7277 100644 --- a/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go +++ b/vendor/github.com/gogo/googleapis/google/rpc/error_details.pb.go @@ -7,6 +7,7 @@ import ( bytes "bytes" fmt "fmt" proto "github.com/gogo/protobuf/proto" + github_com_gogo_protobuf_sortkeys "github.com/gogo/protobuf/sortkeys" types "github.com/gogo/protobuf/types" io "io" math "math" @@ -37,7 +38,7 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // receiving the error response before retrying. If retrying requests also // fail, clients should use an exponential backoff scheme to gradually increase // the delay between retries based on `retry_delay`, until either a maximum -// number of retires have been reached or a maximum retry delay cap has been +// number of retries have been reached or a maximum retry delay cap has been // reached. type RetryInfo struct { // Clients should wait at least this long between retrying the same request. @@ -160,7 +161,7 @@ func (*DebugInfo) XXX_MessageName() string { // a service could respond with the project id and set `service_disabled` // to true. // -// Also see RetryDetail and Help types for other details about handling a +// Also see RetryInfo and Help types for other details about handling a // quota failure. type QuotaFailure struct { // Describes all quota violations. @@ -283,6 +284,114 @@ func (*QuotaFailure_Violation) XXX_MessageName() string { return "google.rpc.QuotaFailure.Violation" } +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +type ErrorInfo struct { + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match + // /[A-Z0-9_]+/. + Reason string `protobuf:"bytes,1,opt,name=reason,proto3" json:"reason,omitempty"` + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + Domain string `protobuf:"bytes,2,opt,name=domain,proto3" json:"domain,omitempty"` + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + Metadata map[string]string `protobuf:"bytes,3,rep,name=metadata,proto3" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *ErrorInfo) Reset() { *m = ErrorInfo{} } +func (*ErrorInfo) ProtoMessage() {} +func (*ErrorInfo) Descriptor() ([]byte, []int) { + return fileDescriptor_851816e4d6b6361a, []int{3} +} +func (m *ErrorInfo) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ErrorInfo) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_ErrorInfo.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *ErrorInfo) XXX_Merge(src proto.Message) { + xxx_messageInfo_ErrorInfo.Merge(m, src) +} +func (m *ErrorInfo) XXX_Size() int { + return m.Size() +} +func (m *ErrorInfo) XXX_DiscardUnknown() { + xxx_messageInfo_ErrorInfo.DiscardUnknown(m) +} + +var xxx_messageInfo_ErrorInfo proto.InternalMessageInfo + +func (m *ErrorInfo) GetReason() string { + if m != nil { + return m.Reason + } + return "" +} + +func (m *ErrorInfo) GetDomain() string { + if m != nil { + return m.Domain + } + return "" +} + +func (m *ErrorInfo) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +func (*ErrorInfo) XXX_MessageName() string { + return "google.rpc.ErrorInfo" +} + // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be @@ -299,7 +408,7 @@ type PreconditionFailure struct { func (m *PreconditionFailure) Reset() { *m = PreconditionFailure{} } func (*PreconditionFailure) ProtoMessage() {} func (*PreconditionFailure) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{3} + return fileDescriptor_851816e4d6b6361a, []int{4} } func (m *PreconditionFailure) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -342,12 +451,12 @@ func (*PreconditionFailure) XXX_MessageName() string { // A message type used to describe a single precondition failure. type PreconditionFailure_Violation struct { // The type of PreconditionFailure. We recommend using a service-specific - // enum type to define the supported precondition violation types. For + // enum type to define the supported precondition violation subjects. For // example, "TOS" for "Terms of Service violation". Type string `protobuf:"bytes,1,opt,name=type,proto3" json:"type,omitempty"` // The subject, relative to the type, that failed. - // For example, "google.com/cloud" relative to the "TOS" type would - // indicate which terms of service is being referenced. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. Subject string `protobuf:"bytes,2,opt,name=subject,proto3" json:"subject,omitempty"` // A description of how the precondition failed. Developers can use this // description to understand how to fix the failure. @@ -362,7 +471,7 @@ type PreconditionFailure_Violation struct { func (m *PreconditionFailure_Violation) Reset() { *m = PreconditionFailure_Violation{} } func (*PreconditionFailure_Violation) ProtoMessage() {} func (*PreconditionFailure_Violation) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{3, 0} + return fileDescriptor_851816e4d6b6361a, []int{4, 0} } func (m *PreconditionFailure_Violation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -429,7 +538,7 @@ type BadRequest struct { func (m *BadRequest) Reset() { *m = BadRequest{} } func (*BadRequest) ProtoMessage() {} func (*BadRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{4} + return fileDescriptor_851816e4d6b6361a, []int{5} } func (m *BadRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -485,7 +594,7 @@ type BadRequest_FieldViolation struct { func (m *BadRequest_FieldViolation) Reset() { *m = BadRequest_FieldViolation{} } func (*BadRequest_FieldViolation) ProtoMessage() {} func (*BadRequest_FieldViolation) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{4, 0} + return fileDescriptor_851816e4d6b6361a, []int{5, 0} } func (m *BadRequest_FieldViolation) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -549,7 +658,7 @@ type RequestInfo struct { func (m *RequestInfo) Reset() { *m = RequestInfo{} } func (*RequestInfo) ProtoMessage() {} func (*RequestInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_851816e4d6b6361a, []int{5} + return fileDescriptor_851816e4d6b6361a, []int{6} } func (m *RequestInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -604,8 +713,7 @@ type ResourceInfo struct { ResourceType string `protobuf:"bytes,1,opt,name=resource_type,json=resourceType,proto3" json:"resource_type,omitempty"` // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is - // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. ResourceName string `protobuf:"bytes,2,opt,name=resource_name,json=resourceName,proto3" json:"resource_name,omitempty"` // The owner of the resource (optional). // For example, "user:" or "project: 0 { + for k := range m.Metadata { + v := m.Metadata[k] + baseI := i + i -= len(v) + copy(dAtA[i:], v) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(v))) + i-- + dAtA[i] = 0x12 + i -= len(k) + copy(dAtA[i:], k) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(k))) + i-- + dAtA[i] = 0xa + i = encodeVarintErrorDetails(dAtA, i, uint64(baseI-i)) + i-- + dAtA[i] = 0x1a + } + } + if len(m.Domain) > 0 { + i -= len(m.Domain) + copy(dAtA[i:], m.Domain) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Domain))) + i-- + dAtA[i] = 0x12 + } + if len(m.Reason) > 0 { + i -= len(m.Reason) + copy(dAtA[i:], m.Reason) + i = encodeVarintErrorDetails(dAtA, i, uint64(len(m.Reason))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *PreconditionFailure) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2706,12 +3003,29 @@ func NewPopulatedQuotaFailure_Violation(r randyErrorDetails, easy bool) *QuotaFa return this } +func NewPopulatedErrorInfo(r randyErrorDetails, easy bool) *ErrorInfo { + this := &ErrorInfo{} + this.Reason = string(randStringErrorDetails(r)) + this.Domain = string(randStringErrorDetails(r)) + if r.Intn(5) != 0 { + v3 := r.Intn(10) + this.Metadata = make(map[string]string) + for i := 0; i < v3; i++ { + this.Metadata[randStringErrorDetails(r)] = randStringErrorDetails(r) + } + } + if !easy && r.Intn(10) != 0 { + this.XXX_unrecognized = randUnrecognizedErrorDetails(r, 4) + } + return this +} + func NewPopulatedPreconditionFailure(r randyErrorDetails, easy bool) *PreconditionFailure { this := &PreconditionFailure{} if r.Intn(5) != 0 { - v3 := r.Intn(5) - this.Violations = make([]*PreconditionFailure_Violation, v3) - for i := 0; i < v3; i++ { + v4 := r.Intn(5) + this.Violations = make([]*PreconditionFailure_Violation, v4) + for i := 0; i < v4; i++ { this.Violations[i] = NewPopulatedPreconditionFailure_Violation(r, easy) } } @@ -2735,9 +3049,9 @@ func NewPopulatedPreconditionFailure_Violation(r randyErrorDetails, easy bool) * func NewPopulatedBadRequest(r randyErrorDetails, easy bool) *BadRequest { this := &BadRequest{} if r.Intn(5) != 0 { - v4 := r.Intn(5) - this.FieldViolations = make([]*BadRequest_FieldViolation, v4) - for i := 0; i < v4; i++ { + v5 := r.Intn(5) + this.FieldViolations = make([]*BadRequest_FieldViolation, v5) + for i := 0; i < v5; i++ { this.FieldViolations[i] = NewPopulatedBadRequest_FieldViolation(r, easy) } } @@ -2782,9 +3096,9 @@ func NewPopulatedResourceInfo(r randyErrorDetails, easy bool) *ResourceInfo { func NewPopulatedHelp(r randyErrorDetails, easy bool) *Help { this := &Help{} if r.Intn(5) != 0 { - v5 := r.Intn(5) - this.Links = make([]*Help_Link, v5) - for i := 0; i < v5; i++ { + v6 := r.Intn(5) + this.Links = make([]*Help_Link, v6) + for i := 0; i < v6; i++ { this.Links[i] = NewPopulatedHelp_Link(r, easy) } } @@ -2833,9 +3147,9 @@ func randUTF8RuneErrorDetails(r randyErrorDetails) rune { return rune(ru + 61) } func randStringErrorDetails(r randyErrorDetails) string { - v6 := r.Intn(100) - tmps := make([]rune, v6) - for i := 0; i < v6; i++ { + v7 := r.Intn(100) + tmps := make([]rune, v7) + for i := 0; i < v7; i++ { tmps[i] = randUTF8RuneErrorDetails(r) } return string(tmps) @@ -2857,11 +3171,11 @@ func randFieldErrorDetails(dAtA []byte, r randyErrorDetails, fieldNumber int, wi switch wire { case 0: dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) - v7 := r.Int63() + v8 := r.Int63() if r.Intn(2) == 0 { - v7 *= -1 + v8 *= -1 } - dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(v7)) + dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(v8)) case 1: dAtA = encodeVarintPopulateErrorDetails(dAtA, uint64(key)) dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) @@ -2962,6 +3276,34 @@ func (m *QuotaFailure_Violation) Size() (n int) { return n } +func (m *ErrorInfo) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Reason) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + l = len(m.Domain) + if l > 0 { + n += 1 + l + sovErrorDetails(uint64(l)) + } + if len(m.Metadata) > 0 { + for k, v := range m.Metadata { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovErrorDetails(uint64(len(k))) + 1 + len(v) + sovErrorDetails(uint64(len(v))) + n += mapEntrySize + 1 + sovErrorDetails(uint64(mapEntrySize)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *PreconditionFailure) Size() (n int) { if m == nil { return 0 @@ -3205,6 +3547,29 @@ func (this *QuotaFailure_Violation) String() string { }, "") return s } +func (this *ErrorInfo) String() string { + if this == nil { + return "nil" + } + keysForMetadata := make([]string, 0, len(this.Metadata)) + for k, _ := range this.Metadata { + keysForMetadata = append(keysForMetadata, k) + } + github_com_gogo_protobuf_sortkeys.Strings(keysForMetadata) + mapStringForMetadata := "map[string]string{" + for _, k := range keysForMetadata { + mapStringForMetadata += fmt.Sprintf("%v: %v,", k, this.Metadata[k]) + } + mapStringForMetadata += "}" + s := strings.Join([]string{`&ErrorInfo{`, + `Reason:` + fmt.Sprintf("%v", this.Reason) + `,`, + `Domain:` + fmt.Sprintf("%v", this.Domain) + `,`, + `Metadata:` + mapStringForMetadata + `,`, + `XXX_unrecognized:` + fmt.Sprintf("%v", this.XXX_unrecognized) + `,`, + `}`, + }, "") + return s +} func (this *PreconditionFailure) String() string { if this == nil { return "nil" @@ -3407,10 +3772,7 @@ func (m *RetryInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -3525,10 +3887,7 @@ func (m *DebugInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -3613,10 +3972,7 @@ func (m *QuotaFailure) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -3731,10 +4087,249 @@ func (m *QuotaFailure_Violation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } - if (iNdEx + skippy) < 0 { + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ErrorInfo) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ErrorInfo: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ErrorInfo: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Reason", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Reason = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Domain", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Domain = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Metadata", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthErrorDetails + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthErrorDetails + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Metadata == nil { + m.Metadata = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthErrorDetails + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey < 0 { + return ErrInvalidLengthErrorDetails + } + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowErrorDetails + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthErrorDetails + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue < 0 { + return ErrInvalidLengthErrorDetails + } + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthErrorDetails + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Metadata[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipErrorDetails(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -3819,10 +4414,7 @@ func (m *PreconditionFailure) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -3969,10 +4561,7 @@ func (m *PreconditionFailure_Violation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -4057,10 +4646,7 @@ func (m *BadRequest) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -4175,10 +4761,7 @@ func (m *BadRequest_FieldViolation) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -4293,10 +4876,7 @@ func (m *RequestInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -4475,10 +5055,7 @@ func (m *ResourceInfo) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -4563,10 +5140,7 @@ func (m *Help) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -4681,10 +5255,7 @@ func (m *Help_Link) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { @@ -4799,10 +5370,7 @@ func (m *LocalizedMessage) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthErrorDetails - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthErrorDetails } if (iNdEx + skippy) > l { diff --git a/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto b/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto index 0682cc97bb..7675af6633 100644 --- a/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto +++ b/vendor/github.com/gogo/googleapis/google/rpc/error_details.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -35,7 +35,7 @@ option objc_class_prefix = "RPC"; // receiving the error response before retrying. If retrying requests also // fail, clients should use an exponential backoff scheme to gradually increase // the delay between retries based on `retry_delay`, until either a maximum -// number of retires have been reached or a maximum retry delay cap has been +// number of retries have been reached or a maximum retry delay cap has been // reached. message RetryInfo { // Clients should wait at least this long between retrying the same request. @@ -60,7 +60,7 @@ message DebugInfo { // a service could respond with the project id and set `service_disabled` // to true. // -// Also see RetryDetail and Help types for other details about handling a +// Also see RetryInfo and Help types for other details about handling a // quota failure. message QuotaFailure { // A message type used to describe a single quota violation. For example, a @@ -85,6 +85,56 @@ message QuotaFailure { repeated Violation violations = 1; } +// Describes the cause of the error with structured details. +// +// Example of an error when contacting the "pubsub.googleapis.com" API when it +// is not enabled: +// +// { "reason": "API_DISABLED" +// "domain": "googleapis.com" +// "metadata": { +// "resource": "projects/123", +// "service": "pubsub.googleapis.com" +// } +// } +// +// This response indicates that the pubsub.googleapis.com API is not enabled. +// +// Example of an error that is returned when attempting to create a Spanner +// instance in a region that is out of stock: +// +// { "reason": "STOCKOUT" +// "domain": "spanner.googleapis.com", +// "metadata": { +// "availableRegions": "us-central1,us-east2" +// } +// } +message ErrorInfo { + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. Error reasons are unique within a particular + // domain of errors. This should be at most 63 characters and match + // /[A-Z0-9_]+/. + string reason = 1; + + // The logical grouping to which the "reason" belongs. The error domain + // is typically the registered service name of the tool or product that + // generates the error. Example: "pubsub.googleapis.com". If the error is + // generated by some common infrastructure, the error domain must be a + // globally unique value that identifies the infrastructure. For Google API + // infrastructure, the error domain is "googleapis.com". + string domain = 2; + + // Additional structured details about this error. + // + // Keys should match /[a-zA-Z0-9-_]/ and be limited to 64 characters in + // length. When identifying the current value of an exceeded limit, the units + // should be contained in the key, not the value. For example, rather than + // {"instanceLimit": "100/request"}, should be returned as, + // {"instanceLimitPerRequest": "100"}, if the client exceeds the number of + // instances that can be created in a single (batch) request. + map metadata = 3; +} + // Describes what preconditions have failed. // // For example, if an RPC failed because it required the Terms of Service to be @@ -94,13 +144,13 @@ message PreconditionFailure { // A message type used to describe a single precondition failure. message Violation { // The type of PreconditionFailure. We recommend using a service-specific - // enum type to define the supported precondition violation types. For + // enum type to define the supported precondition violation subjects. For // example, "TOS" for "Terms of Service violation". string type = 1; // The subject, relative to the type, that failed. - // For example, "google.com/cloud" relative to the "TOS" type would - // indicate which terms of service is being referenced. + // For example, "google.com/cloud" relative to the "TOS" type would indicate + // which terms of service is being referenced. string subject = 2; // A description of how the precondition failed. Developers can use this @@ -153,8 +203,7 @@ message ResourceInfo { // The name of the resource being accessed. For example, a shared calendar // name: "example.com_4fghdhgsrgh@group.calendar.google.com", if the current - // error is - // [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. + // error is [google.rpc.Code.PERMISSION_DENIED][google.rpc.Code.PERMISSION_DENIED]. string resource_name = 2; // The owner of the resource (optional). diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go index 59793ba380..aba2aba6ad 100644 --- a/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go +++ b/vendor/github.com/gogo/googleapis/google/rpc/status.pb.go @@ -28,65 +28,17 @@ const _ = proto.GoGoProtoPackageIsVersion3 // please upgrade the proto package // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. // -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). type Status struct { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. Code int32 `protobuf:"varint,1,opt,name=code,proto3" json:"code,omitempty"` // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` // A list of messages that carry the error details. There is a common set of // message types for APIs to use. @@ -159,7 +111,7 @@ func init() { func init() { proto.RegisterFile("google/rpc/status.proto", fileDescriptor_24d244abaf643bfe) } var fileDescriptor_24d244abaf643bfe = []byte{ - // 235 bytes of a gzipped FileDescriptorProto + // 238 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4f, 0xcf, 0xcf, 0x4f, 0xcf, 0x49, 0xd5, 0x2f, 0x2a, 0x48, 0xd6, 0x2f, 0x2e, 0x49, 0x2c, 0x29, 0x2d, 0xd6, 0x2b, 0x28, 0xca, 0x2f, 0xc9, 0x17, 0xe2, 0x82, 0x48, 0xe8, 0x15, 0x15, 0x24, 0x4b, 0x49, 0x42, 0x15, 0x81, @@ -168,13 +120,13 @@ var fileDescriptor_24d244abaf643bfe = []byte{ 0xc0, 0x6c, 0x21, 0x09, 0x2e, 0xf6, 0xdc, 0xd4, 0xe2, 0xe2, 0xc4, 0xf4, 0x54, 0x09, 0x26, 0x05, 0x46, 0x0d, 0xce, 0x20, 0x18, 0x57, 0x48, 0x8f, 0x8b, 0x3d, 0x25, 0xb5, 0x24, 0x31, 0x33, 0xa7, 0x58, 0x82, 0x59, 0x81, 0x59, 0x83, 0xdb, 0x48, 0x44, 0x0f, 0x6a, 0x21, 0xcc, 0x12, 0x3d, 0xc7, - 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xb8, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, + 0xbc, 0xca, 0x20, 0x98, 0x22, 0xa7, 0xc4, 0x0b, 0x0f, 0xe5, 0x18, 0x6e, 0x3c, 0x94, 0x63, 0xf8, 0xf0, 0x50, 0x8e, 0xf1, 0xc7, 0x43, 0x39, 0xc6, 0x86, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xc9, 0x31, 0x9e, 0x78, 0x24, 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x2f, 0x1e, 0xc9, 0x31, 0x7c, 0x00, 0x89, 0x3f, 0x96, 0x63, 0x3c, 0xf1, 0x58, 0x8e, 0x91, 0x8b, 0x2f, 0x39, 0x3f, 0x57, 0x0f, 0xe1, 0x11, 0x27, 0x6e, 0x88, 0x5b, 0x03, 0x40, 0x56, 0x04, 0x30, 0x46, 0x31, 0x17, - 0x15, 0x24, 0x2f, 0x62, 0x62, 0x0e, 0x0a, 0x70, 0x4e, 0x62, 0x03, 0x5b, 0x6b, 0x0c, 0x08, 0x00, - 0x00, 0xff, 0xff, 0xaa, 0x06, 0xa1, 0xaa, 0x10, 0x01, 0x00, 0x00, + 0x15, 0x24, 0xff, 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0x1c, 0x14, 0xe0, 0x9c, 0xc4, 0x06, 0xb6, 0xd9, + 0x18, 0x10, 0x00, 0x00, 0xff, 0xff, 0x16, 0xcd, 0x7b, 0x60, 0x13, 0x01, 0x00, 0x00, } func (this *Status) Compare(that interface{}) int { @@ -626,10 +578,7 @@ func (m *Status) Unmarshal(dAtA []byte) error { if err != nil { return err } - if skippy < 0 { - return ErrInvalidLengthStatus - } - if (iNdEx + skippy) < 0 { + if (skippy < 0) || (iNdEx+skippy) < 0 { return ErrInvalidLengthStatus } if (iNdEx + skippy) > l { diff --git a/vendor/github.com/gogo/googleapis/google/rpc/status.proto b/vendor/github.com/gogo/googleapis/google/rpc/status.proto index abcd453431..4edafe19ec 100644 --- a/vendor/github.com/gogo/googleapis/google/rpc/status.proto +++ b/vendor/github.com/gogo/googleapis/google/rpc/status.proto @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. +// Copyright 2020 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -18,6 +18,7 @@ package google.rpc; import "google/protobuf/any.proto"; +option cc_enable_arenas = true; option go_package = "rpc"; option java_multiple_files = true; option java_outer_classname = "StatusProto"; @@ -26,66 +27,18 @@ option objc_class_prefix = "RPC"; // The `Status` type defines a logical error model that is suitable for // different programming environments, including REST APIs and RPC APIs. It is -// used by [gRPC](https://github.com/grpc). The error model is designed to be: +// used by [gRPC](https://github.com/grpc). Each `Status` message contains +// three pieces of data: error code, error message, and error details. // -// - Simple to use and understand for most users -// - Flexible enough to meet unexpected needs -// -// # Overview -// -// The `Status` message contains three pieces of data: error code, error -// message, and error details. The error code should be an enum value of -// [google.rpc.Code][google.rpc.Code], but it may accept additional error codes -// if needed. The error message should be a developer-facing English message -// that helps developers *understand* and *resolve* the error. If a localized -// user-facing error message is needed, put the localized message in the error -// details or localize it in the client. The optional error details may contain -// arbitrary information about the error. There is a predefined set of error -// detail types in the package `google.rpc` that can be used for common error -// conditions. -// -// # Language mapping -// -// The `Status` message is the logical representation of the error model, but it -// is not necessarily the actual wire format. When the `Status` message is -// exposed in different client libraries and different wire protocols, it can be -// mapped differently. For example, it will likely be mapped to some exceptions -// in Java, but more likely mapped to some error codes in C. -// -// # Other uses -// -// The error model and the `Status` message can be used in a variety of -// environments, either with or without APIs, to provide a -// consistent developer experience across different environments. -// -// Example uses of this error model include: -// -// - Partial errors. If a service needs to return partial errors to the client, -// it may embed the `Status` in the normal response to indicate the partial -// errors. -// -// - Workflow errors. A typical workflow has multiple steps. Each step may -// have a `Status` message for error reporting. -// -// - Batch operations. If a client uses batch request and batch response, the -// `Status` message should be used directly inside batch response, one for -// each error sub-response. -// -// - Asynchronous operations. If an API call embeds asynchronous operation -// results in its response, the status of those operations should be -// represented directly using the `Status` message. -// -// - Logging. If some API errors are stored in logs, the message `Status` could -// be used directly after any stripping needed for security/privacy reasons. +// You can find out more about this error model and how to work with it in the +// [API Design Guide](https://cloud.google.com/apis/design/errors). message Status { - // The status code, which should be an enum value of - // [google.rpc.Code][google.rpc.Code]. + // The status code, which should be an enum value of [google.rpc.Code][google.rpc.Code]. int32 code = 1; // A developer-facing error message, which should be in English. Any // user-facing error message should be localized and sent in the - // [google.rpc.Status.details][google.rpc.Status.details] field, or localized - // by the client. + // [google.rpc.Status.details][google.rpc.Status.details] field, or localized by the client. string message = 2; // A list of messages that carry the error details. There is a common set of diff --git a/vendor/github.com/golang/protobuf/descriptor/descriptor.go b/vendor/github.com/golang/protobuf/descriptor/descriptor.go new file mode 100644 index 0000000000..ffde8a6508 --- /dev/null +++ b/vendor/github.com/golang/protobuf/descriptor/descriptor.go @@ -0,0 +1,180 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package descriptor provides functions for obtaining the protocol buffer +// descriptors of generated Go types. +// +// Deprecated: See the "google.golang.org/protobuf/reflect/protoreflect" package +// for how to obtain an EnumDescriptor or MessageDescriptor in order to +// programatically interact with the protobuf type system. +package descriptor + +import ( + "bytes" + "compress/gzip" + "io/ioutil" + "sync" + + "github.com/golang/protobuf/proto" + "google.golang.org/protobuf/reflect/protodesc" + "google.golang.org/protobuf/reflect/protoreflect" + "google.golang.org/protobuf/runtime/protoimpl" + + descriptorpb "github.com/golang/protobuf/protoc-gen-go/descriptor" +) + +// Message is proto.Message with a method to return its descriptor. +// +// Deprecated: The Descriptor method may not be generated by future +// versions of protoc-gen-go, meaning that this interface may not +// be implemented by many concrete message types. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns the file descriptor proto containing +// the message and the message descriptor proto for the message itself. +// The returned proto messages must not be mutated. +// +// Deprecated: Not all concrete message types satisfy the Message interface. +// Use MessageDescriptorProto instead. If possible, the calling code should +// be rewritten to use protobuf reflection instead. +// See package "google.golang.org/protobuf/reflect/protoreflect" for details. +func ForMessage(m Message) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { + return MessageDescriptorProto(m) +} + +type rawDesc struct { + fileDesc []byte + indexes []int +} + +var rawDescCache sync.Map // map[protoreflect.Descriptor]*rawDesc + +func deriveRawDescriptor(d protoreflect.Descriptor) ([]byte, []int) { + // Fast-path: check whether raw descriptors are already cached. + origDesc := d + if v, ok := rawDescCache.Load(origDesc); ok { + return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes + } + + // Slow-path: derive the raw descriptor from the v2 descriptor. + + // Start with the leaf (a given enum or message declaration) and + // ascend upwards until we hit the parent file descriptor. + var idxs []int + for { + idxs = append(idxs, d.Index()) + d = d.Parent() + if d == nil { + // TODO: We could construct a FileDescriptor stub for standalone + // descriptors to satisfy the API. + return nil, nil + } + if _, ok := d.(protoreflect.FileDescriptor); ok { + break + } + } + + // Obtain the raw file descriptor. + fd := d.(protoreflect.FileDescriptor) + b, _ := proto.Marshal(protodesc.ToFileDescriptorProto(fd)) + file := protoimpl.X.CompressGZIP(b) + + // Reverse the indexes, since we populated it in reverse. + for i, j := 0, len(idxs)-1; i < j; i, j = i+1, j-1 { + idxs[i], idxs[j] = idxs[j], idxs[i] + } + + if v, ok := rawDescCache.LoadOrStore(origDesc, &rawDesc{file, idxs}); ok { + return v.(*rawDesc).fileDesc, v.(*rawDesc).indexes + } + return file, idxs +} + +// EnumRawDescriptor returns the GZIP'd raw file descriptor representing +// the enum and the index path to reach the enum declaration. +// The returned slices must not be mutated. +func EnumRawDescriptor(e proto.GeneratedEnum) ([]byte, []int) { + if ev, ok := e.(interface{ EnumDescriptor() ([]byte, []int) }); ok { + return ev.EnumDescriptor() + } + ed := protoimpl.X.EnumTypeOf(e) + return deriveRawDescriptor(ed.Descriptor()) +} + +// MessageRawDescriptor returns the GZIP'd raw file descriptor representing +// the message and the index path to reach the message declaration. +// The returned slices must not be mutated. +func MessageRawDescriptor(m proto.GeneratedMessage) ([]byte, []int) { + if mv, ok := m.(interface{ Descriptor() ([]byte, []int) }); ok { + return mv.Descriptor() + } + md := protoimpl.X.MessageTypeOf(m) + return deriveRawDescriptor(md.Descriptor()) +} + +var fileDescCache sync.Map // map[*byte]*descriptorpb.FileDescriptorProto + +func deriveFileDescriptor(rawDesc []byte) *descriptorpb.FileDescriptorProto { + // Fast-path: check whether descriptor protos are already cached. + if v, ok := fileDescCache.Load(&rawDesc[0]); ok { + return v.(*descriptorpb.FileDescriptorProto) + } + + // Slow-path: derive the descriptor proto from the GZIP'd message. + zr, err := gzip.NewReader(bytes.NewReader(rawDesc)) + if err != nil { + panic(err) + } + b, err := ioutil.ReadAll(zr) + if err != nil { + panic(err) + } + fd := new(descriptorpb.FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + panic(err) + } + if v, ok := fileDescCache.LoadOrStore(&rawDesc[0], fd); ok { + return v.(*descriptorpb.FileDescriptorProto) + } + return fd +} + +// EnumDescriptorProto returns the file descriptor proto representing +// the enum and the enum descriptor proto for the enum itself. +// The returned proto messages must not be mutated. +func EnumDescriptorProto(e proto.GeneratedEnum) (*descriptorpb.FileDescriptorProto, *descriptorpb.EnumDescriptorProto) { + rawDesc, idxs := EnumRawDescriptor(e) + if rawDesc == nil || idxs == nil { + return nil, nil + } + fd := deriveFileDescriptor(rawDesc) + if len(idxs) == 1 { + return fd, fd.EnumType[idxs[0]] + } + md := fd.MessageType[idxs[0]] + for _, i := range idxs[1 : len(idxs)-1] { + md = md.NestedType[i] + } + ed := md.EnumType[idxs[len(idxs)-1]] + return fd, ed +} + +// MessageDescriptorProto returns the file descriptor proto representing +// the message and the message descriptor proto for the message itself. +// The returned proto messages must not be mutated. +func MessageDescriptorProto(m proto.GeneratedMessage) (*descriptorpb.FileDescriptorProto, *descriptorpb.DescriptorProto) { + rawDesc, idxs := MessageRawDescriptor(m) + if rawDesc == nil || idxs == nil { + return nil, nil + } + fd := deriveFileDescriptor(rawDesc) + md := fd.MessageType[idxs[0]] + for _, i := range idxs[1:] { + md = md.NestedType[i] + } + return fd, md +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt new file mode 100644 index 0000000000..364516251b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/LICENSE.txt @@ -0,0 +1,27 @@ +Copyright (c) 2015, Gengo, Inc. +All rights reserved. + +Redistribution and use in source and binary forms, with or without modification, +are permitted provided that the following conditions are met: + + * Redistributions of source code must retain the above copyright notice, + this list of conditions and the following disclaimer. + + * Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + + * Neither the name of Gengo, Inc. nor the names of its + contributors may be used to endorse or promote products derived from this + software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND +ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED +WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR +ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES +(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; +LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON +ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel new file mode 100644 index 0000000000..5242751fb2 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/BUILD.bazel @@ -0,0 +1,23 @@ +load("@rules_proto//proto:defs.bzl", "proto_library") +load("@io_bazel_rules_go//go:def.bzl", "go_library") +load("@io_bazel_rules_go//proto:def.bzl", "go_proto_library") + +package(default_visibility = ["//visibility:public"]) + +proto_library( + name = "internal_proto", + srcs = ["errors.proto"], + deps = ["@com_google_protobuf//:any_proto"], +) + +go_proto_library( + name = "internal_go_proto", + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", + proto = ":internal_proto", +) + +go_library( + name = "go_default_library", + embed = [":internal_go_proto"], + importpath = "github.com/grpc-ecosystem/grpc-gateway/internal", +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go new file mode 100644 index 0000000000..61101d7177 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.pb.go @@ -0,0 +1,189 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: internal/errors.proto + +package internal + +import ( + fmt "fmt" + proto "github.com/golang/protobuf/proto" + any "github.com/golang/protobuf/ptypes/any" + math "math" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion3 // please upgrade the proto package + +// Error is the generic error returned from unary RPCs. +type Error struct { + Error string `protobuf:"bytes,1,opt,name=error,proto3" json:"error,omitempty"` + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + Code int32 `protobuf:"varint,2,opt,name=code,proto3" json:"code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + Details []*any.Any `protobuf:"bytes,4,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *Error) Reset() { *m = Error{} } +func (m *Error) String() string { return proto.CompactTextString(m) } +func (*Error) ProtoMessage() {} +func (*Error) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{0} +} + +func (m *Error) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_Error.Unmarshal(m, b) +} +func (m *Error) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_Error.Marshal(b, m, deterministic) +} +func (m *Error) XXX_Merge(src proto.Message) { + xxx_messageInfo_Error.Merge(m, src) +} +func (m *Error) XXX_Size() int { + return xxx_messageInfo_Error.Size(m) +} +func (m *Error) XXX_DiscardUnknown() { + xxx_messageInfo_Error.DiscardUnknown(m) +} + +var xxx_messageInfo_Error proto.InternalMessageInfo + +func (m *Error) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *Error) GetCode() int32 { + if m != nil { + return m.Code + } + return 0 +} + +func (m *Error) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *Error) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +type StreamError struct { + GrpcCode int32 `protobuf:"varint,1,opt,name=grpc_code,json=grpcCode,proto3" json:"grpc_code,omitempty"` + HttpCode int32 `protobuf:"varint,2,opt,name=http_code,json=httpCode,proto3" json:"http_code,omitempty"` + Message string `protobuf:"bytes,3,opt,name=message,proto3" json:"message,omitempty"` + HttpStatus string `protobuf:"bytes,4,opt,name=http_status,json=httpStatus,proto3" json:"http_status,omitempty"` + Details []*any.Any `protobuf:"bytes,5,rep,name=details,proto3" json:"details,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *StreamError) Reset() { *m = StreamError{} } +func (m *StreamError) String() string { return proto.CompactTextString(m) } +func (*StreamError) ProtoMessage() {} +func (*StreamError) Descriptor() ([]byte, []int) { + return fileDescriptor_9b093362ca6d1e03, []int{1} +} + +func (m *StreamError) XXX_Unmarshal(b []byte) error { + return xxx_messageInfo_StreamError.Unmarshal(m, b) +} +func (m *StreamError) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + return xxx_messageInfo_StreamError.Marshal(b, m, deterministic) +} +func (m *StreamError) XXX_Merge(src proto.Message) { + xxx_messageInfo_StreamError.Merge(m, src) +} +func (m *StreamError) XXX_Size() int { + return xxx_messageInfo_StreamError.Size(m) +} +func (m *StreamError) XXX_DiscardUnknown() { + xxx_messageInfo_StreamError.DiscardUnknown(m) +} + +var xxx_messageInfo_StreamError proto.InternalMessageInfo + +func (m *StreamError) GetGrpcCode() int32 { + if m != nil { + return m.GrpcCode + } + return 0 +} + +func (m *StreamError) GetHttpCode() int32 { + if m != nil { + return m.HttpCode + } + return 0 +} + +func (m *StreamError) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func (m *StreamError) GetHttpStatus() string { + if m != nil { + return m.HttpStatus + } + return "" +} + +func (m *StreamError) GetDetails() []*any.Any { + if m != nil { + return m.Details + } + return nil +} + +func init() { + proto.RegisterType((*Error)(nil), "grpc.gateway.runtime.Error") + proto.RegisterType((*StreamError)(nil), "grpc.gateway.runtime.StreamError") +} + +func init() { proto.RegisterFile("internal/errors.proto", fileDescriptor_9b093362ca6d1e03) } + +var fileDescriptor_9b093362ca6d1e03 = []byte{ + // 252 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x8c, 0x90, 0xc1, 0x4a, 0xc4, 0x30, + 0x10, 0x86, 0x89, 0xbb, 0x75, 0xdb, 0xe9, 0x2d, 0x54, 0x88, 0xee, 0xc1, 0xb2, 0xa7, 0x9e, 0x52, + 0xd0, 0x27, 0xd0, 0xc5, 0x17, 0xe8, 0xde, 0xbc, 0x2c, 0xd9, 0xdd, 0x31, 0x16, 0xda, 0xa4, 0x24, + 0x53, 0xa4, 0xf8, 0x56, 0x3e, 0xa1, 0x24, 0xa5, 0xb0, 0x27, 0xf1, 0xd6, 0xf9, 0xfb, 0xcf, 0x7c, + 0x1f, 0x81, 0xbb, 0xd6, 0x10, 0x3a, 0xa3, 0xba, 0x1a, 0x9d, 0xb3, 0xce, 0xcb, 0xc1, 0x59, 0xb2, + 0xbc, 0xd0, 0x6e, 0x38, 0x4b, 0xad, 0x08, 0xbf, 0xd4, 0x24, 0xdd, 0x68, 0xa8, 0xed, 0xf1, 0xe1, + 0x5e, 0x5b, 0xab, 0x3b, 0xac, 0x63, 0xe7, 0x34, 0x7e, 0xd4, 0xca, 0x4c, 0xf3, 0xc2, 0xee, 0x1b, + 0x92, 0xb7, 0x70, 0x80, 0x17, 0x90, 0xc4, 0x4b, 0x82, 0x95, 0xac, 0xca, 0x9a, 0x79, 0xe0, 0x1c, + 0xd6, 0x67, 0x7b, 0x41, 0x71, 0x53, 0xb2, 0x2a, 0x69, 0xe2, 0x37, 0x17, 0xb0, 0xe9, 0xd1, 0x7b, + 0xa5, 0x51, 0xac, 0x62, 0x77, 0x19, 0xb9, 0x84, 0xcd, 0x05, 0x49, 0xb5, 0x9d, 0x17, 0xeb, 0x72, + 0x55, 0xe5, 0x4f, 0x85, 0x9c, 0xc9, 0x72, 0x21, 0xcb, 0x17, 0x33, 0x35, 0x4b, 0x69, 0xf7, 0xc3, + 0x20, 0x3f, 0x90, 0x43, 0xd5, 0xcf, 0x0e, 0x5b, 0xc8, 0x82, 0xff, 0x31, 0x22, 0x59, 0x44, 0xa6, + 0x21, 0xd8, 0x07, 0xec, 0x16, 0xb2, 0x4f, 0xa2, 0xe1, 0x78, 0xe5, 0x93, 0x86, 0x60, 0xff, 0xb7, + 0xd3, 0x23, 0xe4, 0x71, 0xcd, 0x93, 0xa2, 0x31, 0x78, 0x85, 0xbf, 0x10, 0xa2, 0x43, 0x4c, 0xae, + 0xa5, 0x93, 0x7f, 0x48, 0xbf, 0xc2, 0x7b, 0xba, 0xbc, 0xfd, 0xe9, 0x36, 0x56, 0x9e, 0x7f, 0x03, + 0x00, 0x00, 0xff, 0xff, 0xde, 0x72, 0x6b, 0x83, 0x8e, 0x01, 0x00, 0x00, +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto new file mode 100644 index 0000000000..4fb212c6b6 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/internal/errors.proto @@ -0,0 +1,26 @@ +syntax = "proto3"; +package grpc.gateway.runtime; +option go_package = "internal"; + +import "google/protobuf/any.proto"; + +// Error is the generic error returned from unary RPCs. +message Error { + string error = 1; + // This is to make the error more compatible with users that expect errors to be Status objects: + // https://github.com/grpc/grpc/blob/master/src/proto/grpc/status/status.proto + // It should be the exact same message as the Error field. + int32 code = 2; + string message = 3; + repeated google.protobuf.Any details = 4; +} + +// StreamError is a response type which is returned when +// streaming rpc returns an error. +message StreamError { + int32 grpc_code = 1; + int32 http_code = 2; + string message = 3; + string http_status = 4; + repeated google.protobuf.Any details = 5; +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel new file mode 100644 index 0000000000..58b72b9cf7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/BUILD.bazel @@ -0,0 +1,85 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "context.go", + "convert.go", + "doc.go", + "errors.go", + "fieldmask.go", + "handler.go", + "marshal_httpbodyproto.go", + "marshal_json.go", + "marshal_jsonpb.go", + "marshal_proto.go", + "marshaler.go", + "marshaler_registry.go", + "mux.go", + "pattern.go", + "proto2_convert.go", + "proto_errors.go", + "query.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/runtime", + deps = [ + "//internal:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//descriptor:go_default_library_gen", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@go_googleapis//google/api:httpbody_go_proto", + "@io_bazel_rules_go//proto/wkt:any_go_proto", + "@io_bazel_rules_go//proto/wkt:descriptor_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//grpclog:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) + +go_test( + name = "go_default_test", + size = "small", + srcs = [ + "context_test.go", + "convert_test.go", + "errors_test.go", + "fieldmask_test.go", + "handler_test.go", + "marshal_httpbodyproto_test.go", + "marshal_json_test.go", + "marshal_jsonpb_test.go", + "marshal_proto_test.go", + "marshaler_registry_test.go", + "mux_test.go", + "pattern_test.go", + "query_test.go", + ], + embed = [":go_default_library"], + deps = [ + "//internal:go_default_library", + "//runtime/internal/examplepb:go_default_library", + "//utilities:go_default_library", + "@com_github_golang_protobuf//jsonpb:go_default_library_gen", + "@com_github_golang_protobuf//proto:go_default_library", + "@com_github_golang_protobuf//ptypes:go_default_library_gen", + "@go_googleapis//google/api:httpbody_go_proto", + "@go_googleapis//google/rpc:errdetails_go_proto", + "@io_bazel_rules_go//proto/wkt:duration_go_proto", + "@io_bazel_rules_go//proto/wkt:empty_go_proto", + "@io_bazel_rules_go//proto/wkt:field_mask_go_proto", + "@io_bazel_rules_go//proto/wkt:struct_go_proto", + "@io_bazel_rules_go//proto/wkt:timestamp_go_proto", + "@io_bazel_rules_go//proto/wkt:wrappers_go_proto", + "@org_golang_google_grpc//codes:go_default_library", + "@org_golang_google_grpc//metadata:go_default_library", + "@org_golang_google_grpc//status:go_default_library", + ], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go new file mode 100644 index 0000000000..d8cbd4cc96 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/context.go @@ -0,0 +1,291 @@ +package runtime + +import ( + "context" + "encoding/base64" + "fmt" + "net" + "net/http" + "net/textproto" + "strconv" + "strings" + "sync" + "time" + + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// MetadataHeaderPrefix is the http prefix that represents custom metadata +// parameters to or from a gRPC call. +const MetadataHeaderPrefix = "Grpc-Metadata-" + +// MetadataPrefix is prepended to permanent HTTP header keys (as specified +// by the IANA) when added to the gRPC context. +const MetadataPrefix = "grpcgateway-" + +// MetadataTrailerPrefix is prepended to gRPC metadata as it is converted to +// HTTP headers in a response handled by grpc-gateway +const MetadataTrailerPrefix = "Grpc-Trailer-" + +const metadataGrpcTimeout = "Grpc-Timeout" +const metadataHeaderBinarySuffix = "-Bin" + +const xForwardedFor = "X-Forwarded-For" +const xForwardedHost = "X-Forwarded-Host" + +var ( + // DefaultContextTimeout is used for gRPC call context.WithTimeout whenever a Grpc-Timeout inbound + // header isn't present. If the value is 0 the sent `context` will not have a timeout. + DefaultContextTimeout = 0 * time.Second +) + +func decodeBinHeader(v string) ([]byte, error) { + if len(v)%4 == 0 { + // Input was padded, or padding was not necessary. + return base64.StdEncoding.DecodeString(v) + } + return base64.RawStdEncoding.DecodeString(v) +} + +/* +AnnotateContext adds context information such as metadata from the request. + +At a minimum, the RemoteAddr is included in the fashion of "X-Forwarded-For", +except that the forwarded destination is not another HTTP service but rather +a gRPC service. +*/ +func AnnotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewOutgoingContext(ctx, md), nil +} + +// AnnotateIncomingContext adds context information such as metadata from the request. +// Attach metadata as incoming context. +func AnnotateIncomingContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, error) { + ctx, md, err := annotateContext(ctx, mux, req) + if err != nil { + return nil, err + } + if md == nil { + return ctx, nil + } + + return metadata.NewIncomingContext(ctx, md), nil +} + +func annotateContext(ctx context.Context, mux *ServeMux, req *http.Request) (context.Context, metadata.MD, error) { + var pairs []string + timeout := DefaultContextTimeout + if tm := req.Header.Get(metadataGrpcTimeout); tm != "" { + var err error + timeout, err = timeoutDecode(tm) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid grpc-timeout: %s", tm) + } + } + + for key, vals := range req.Header { + key = textproto.CanonicalMIMEHeaderKey(key) + for _, val := range vals { + // For backwards-compatibility, pass through 'authorization' header with no prefix. + if key == "Authorization" { + pairs = append(pairs, "authorization", val) + } + if h, ok := mux.incomingHeaderMatcher(key); ok { + // Handles "-bin" metadata in grpc, since grpc will do another base64 + // encode before sending to server, we need to decode it first. + if strings.HasSuffix(key, metadataHeaderBinarySuffix) { + b, err := decodeBinHeader(val) + if err != nil { + return nil, nil, status.Errorf(codes.InvalidArgument, "invalid binary header %s: %s", key, err) + } + + val = string(b) + } + pairs = append(pairs, h, val) + } + } + } + if host := req.Header.Get(xForwardedHost); host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), host) + } else if req.Host != "" { + pairs = append(pairs, strings.ToLower(xForwardedHost), req.Host) + } + + if addr := req.RemoteAddr; addr != "" { + if remoteIP, _, err := net.SplitHostPort(addr); err == nil { + if fwd := req.Header.Get(xForwardedFor); fwd == "" { + pairs = append(pairs, strings.ToLower(xForwardedFor), remoteIP) + } else { + pairs = append(pairs, strings.ToLower(xForwardedFor), fmt.Sprintf("%s, %s", fwd, remoteIP)) + } + } + } + + if timeout != 0 { + ctx, _ = context.WithTimeout(ctx, timeout) + } + if len(pairs) == 0 { + return ctx, nil, nil + } + md := metadata.Pairs(pairs...) + for _, mda := range mux.metadataAnnotators { + md = metadata.Join(md, mda(ctx, req)) + } + return ctx, md, nil +} + +// ServerMetadata consists of metadata sent from gRPC server. +type ServerMetadata struct { + HeaderMD metadata.MD + TrailerMD metadata.MD +} + +type serverMetadataKey struct{} + +// NewServerMetadataContext creates a new context with ServerMetadata +func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context { + return context.WithValue(ctx, serverMetadataKey{}, md) +} + +// ServerMetadataFromContext returns the ServerMetadata in ctx +func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) { + md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata) + return +} + +// ServerTransportStream implements grpc.ServerTransportStream. +// It should only be used by the generated files to support grpc.SendHeader +// outside of gRPC server use. +type ServerTransportStream struct { + mu sync.Mutex + header metadata.MD + trailer metadata.MD +} + +// Method returns the method for the stream. +func (s *ServerTransportStream) Method() string { + return "" +} + +// Header returns the header metadata of the stream. +func (s *ServerTransportStream) Header() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.header.Copy() +} + +// SetHeader sets the header metadata. +func (s *ServerTransportStream) SetHeader(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.header = metadata.Join(s.header, md) + s.mu.Unlock() + return nil +} + +// SendHeader sets the header metadata. +func (s *ServerTransportStream) SendHeader(md metadata.MD) error { + return s.SetHeader(md) +} + +// Trailer returns the cached trailer metadata. +func (s *ServerTransportStream) Trailer() metadata.MD { + s.mu.Lock() + defer s.mu.Unlock() + return s.trailer.Copy() +} + +// SetTrailer sets the trailer metadata. +func (s *ServerTransportStream) SetTrailer(md metadata.MD) error { + if md.Len() == 0 { + return nil + } + + s.mu.Lock() + s.trailer = metadata.Join(s.trailer, md) + s.mu.Unlock() + return nil +} + +func timeoutDecode(s string) (time.Duration, error) { + size := len(s) + if size < 2 { + return 0, fmt.Errorf("timeout string is too short: %q", s) + } + d, ok := timeoutUnitToDuration(s[size-1]) + if !ok { + return 0, fmt.Errorf("timeout unit is not recognized: %q", s) + } + t, err := strconv.ParseInt(s[:size-1], 10, 64) + if err != nil { + return 0, err + } + return d * time.Duration(t), nil +} + +func timeoutUnitToDuration(u uint8) (d time.Duration, ok bool) { + switch u { + case 'H': + return time.Hour, true + case 'M': + return time.Minute, true + case 'S': + return time.Second, true + case 'm': + return time.Millisecond, true + case 'u': + return time.Microsecond, true + case 'n': + return time.Nanosecond, true + default: + } + return +} + +// isPermanentHTTPHeader checks whether hdr belongs to the list of +// permanent request headers maintained by IANA. +// http://www.iana.org/assignments/message-headers/message-headers.xml +func isPermanentHTTPHeader(hdr string) bool { + switch hdr { + case + "Accept", + "Accept-Charset", + "Accept-Language", + "Accept-Ranges", + "Authorization", + "Cache-Control", + "Content-Type", + "Cookie", + "Date", + "Expect", + "From", + "Host", + "If-Match", + "If-Modified-Since", + "If-None-Match", + "If-Schedule-Tag-Match", + "If-Unmodified-Since", + "Max-Forwards", + "Origin", + "Pragma", + "Referer", + "User-Agent", + "Via", + "Warning": + return true + } + return false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go new file mode 100644 index 0000000000..2c279344dc --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/convert.go @@ -0,0 +1,318 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "strconv" + "strings" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/ptypes/duration" + "github.com/golang/protobuf/ptypes/timestamp" + "github.com/golang/protobuf/ptypes/wrappers" +) + +// String just returns the given string. +// It is just for compatibility to other types. +func String(val string) (string, error) { + return val, nil +} + +// StringSlice converts 'val' where individual strings are separated by +// 'sep' into a string slice. +func StringSlice(val, sep string) ([]string, error) { + return strings.Split(val, sep), nil +} + +// Bool converts the given string representation of a boolean value into bool. +func Bool(val string) (bool, error) { + return strconv.ParseBool(val) +} + +// BoolSlice converts 'val' where individual booleans are separated by +// 'sep' into a bool slice. +func BoolSlice(val, sep string) ([]bool, error) { + s := strings.Split(val, sep) + values := make([]bool, len(s)) + for i, v := range s { + value, err := Bool(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float64 converts the given string representation into representation of a floating point number into float64. +func Float64(val string) (float64, error) { + return strconv.ParseFloat(val, 64) +} + +// Float64Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float64 slice. +func Float64Slice(val, sep string) ([]float64, error) { + s := strings.Split(val, sep) + values := make([]float64, len(s)) + for i, v := range s { + value, err := Float64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Float32 converts the given string representation of a floating point number into float32. +func Float32(val string) (float32, error) { + f, err := strconv.ParseFloat(val, 32) + if err != nil { + return 0, err + } + return float32(f), nil +} + +// Float32Slice converts 'val' where individual floating point numbers are separated by +// 'sep' into a float32 slice. +func Float32Slice(val, sep string) ([]float32, error) { + s := strings.Split(val, sep) + values := make([]float32, len(s)) + for i, v := range s { + value, err := Float32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int64 converts the given string representation of an integer into int64. +func Int64(val string) (int64, error) { + return strconv.ParseInt(val, 0, 64) +} + +// Int64Slice converts 'val' where individual integers are separated by +// 'sep' into a int64 slice. +func Int64Slice(val, sep string) ([]int64, error) { + s := strings.Split(val, sep) + values := make([]int64, len(s)) + for i, v := range s { + value, err := Int64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Int32 converts the given string representation of an integer into int32. +func Int32(val string) (int32, error) { + i, err := strconv.ParseInt(val, 0, 32) + if err != nil { + return 0, err + } + return int32(i), nil +} + +// Int32Slice converts 'val' where individual integers are separated by +// 'sep' into a int32 slice. +func Int32Slice(val, sep string) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Int32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint64 converts the given string representation of an integer into uint64. +func Uint64(val string) (uint64, error) { + return strconv.ParseUint(val, 0, 64) +} + +// Uint64Slice converts 'val' where individual integers are separated by +// 'sep' into a uint64 slice. +func Uint64Slice(val, sep string) ([]uint64, error) { + s := strings.Split(val, sep) + values := make([]uint64, len(s)) + for i, v := range s { + value, err := Uint64(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Uint32 converts the given string representation of an integer into uint32. +func Uint32(val string) (uint32, error) { + i, err := strconv.ParseUint(val, 0, 32) + if err != nil { + return 0, err + } + return uint32(i), nil +} + +// Uint32Slice converts 'val' where individual integers are separated by +// 'sep' into a uint32 slice. +func Uint32Slice(val, sep string) ([]uint32, error) { + s := strings.Split(val, sep) + values := make([]uint32, len(s)) + for i, v := range s { + value, err := Uint32(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Bytes converts the given string representation of a byte sequence into a slice of bytes +// A bytes sequence is encoded in URL-safe base64 without padding +func Bytes(val string) ([]byte, error) { + b, err := base64.StdEncoding.DecodeString(val) + if err != nil { + b, err = base64.URLEncoding.DecodeString(val) + if err != nil { + return nil, err + } + } + return b, nil +} + +// BytesSlice converts 'val' where individual bytes sequences, encoded in URL-safe +// base64 without padding, are separated by 'sep' into a slice of bytes slices slice. +func BytesSlice(val, sep string) ([][]byte, error) { + s := strings.Split(val, sep) + values := make([][]byte, len(s)) + for i, v := range s { + value, err := Bytes(v) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +// Timestamp converts the given RFC3339 formatted string into a timestamp.Timestamp. +func Timestamp(val string) (*timestamp.Timestamp, error) { + var r timestamp.Timestamp + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Duration converts the given string into a timestamp.Duration. +func Duration(val string) (*duration.Duration, error) { + var r duration.Duration + err := jsonpb.UnmarshalString(val, &r) + if err != nil { + return nil, err + } + return &r, nil +} + +// Enum converts the given string into an int32 that should be type casted into the +// correct enum proto type. +func Enum(val string, enumValMap map[string]int32) (int32, error) { + e, ok := enumValMap[val] + if ok { + return e, nil + } + + i, err := Int32(val) + if err != nil { + return 0, fmt.Errorf("%s is not valid", val) + } + for _, v := range enumValMap { + if v == i { + return i, nil + } + } + return 0, fmt.Errorf("%s is not valid", val) +} + +// EnumSlice converts 'val' where individual enums are separated by 'sep' +// into a int32 slice. Each individual int32 should be type casted into the +// correct enum proto type. +func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) { + s := strings.Split(val, sep) + values := make([]int32, len(s)) + for i, v := range s { + value, err := Enum(v, enumValMap) + if err != nil { + return values, err + } + values[i] = value + } + return values, nil +} + +/* + Support fot google.protobuf.wrappers on top of primitive types +*/ + +// StringValue well-known type support as wrapper around string type +func StringValue(val string) (*wrappers.StringValue, error) { + return &wrappers.StringValue{Value: val}, nil +} + +// FloatValue well-known type support as wrapper around float32 type +func FloatValue(val string) (*wrappers.FloatValue, error) { + parsedVal, err := Float32(val) + return &wrappers.FloatValue{Value: parsedVal}, err +} + +// DoubleValue well-known type support as wrapper around float64 type +func DoubleValue(val string) (*wrappers.DoubleValue, error) { + parsedVal, err := Float64(val) + return &wrappers.DoubleValue{Value: parsedVal}, err +} + +// BoolValue well-known type support as wrapper around bool type +func BoolValue(val string) (*wrappers.BoolValue, error) { + parsedVal, err := Bool(val) + return &wrappers.BoolValue{Value: parsedVal}, err +} + +// Int32Value well-known type support as wrapper around int32 type +func Int32Value(val string) (*wrappers.Int32Value, error) { + parsedVal, err := Int32(val) + return &wrappers.Int32Value{Value: parsedVal}, err +} + +// UInt32Value well-known type support as wrapper around uint32 type +func UInt32Value(val string) (*wrappers.UInt32Value, error) { + parsedVal, err := Uint32(val) + return &wrappers.UInt32Value{Value: parsedVal}, err +} + +// Int64Value well-known type support as wrapper around int64 type +func Int64Value(val string) (*wrappers.Int64Value, error) { + parsedVal, err := Int64(val) + return &wrappers.Int64Value{Value: parsedVal}, err +} + +// UInt64Value well-known type support as wrapper around uint64 type +func UInt64Value(val string) (*wrappers.UInt64Value, error) { + parsedVal, err := Uint64(val) + return &wrappers.UInt64Value{Value: parsedVal}, err +} + +// BytesValue well-known type support as wrapper around bytes[] type +func BytesValue(val string) (*wrappers.BytesValue, error) { + parsedVal, err := Bytes(val) + return &wrappers.BytesValue{Value: parsedVal}, err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go new file mode 100644 index 0000000000..b6e5ddf7a9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/doc.go @@ -0,0 +1,5 @@ +/* +Package runtime contains runtime helper functions used by +servers which protoc-gen-grpc-gateway generates. +*/ +package runtime diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go new file mode 100644 index 0000000000..b2ce743bdd --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/errors.go @@ -0,0 +1,186 @@ +package runtime + +import ( + "context" + "io" + "net/http" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// HTTPStatusFromCode converts a gRPC error code into the corresponding HTTP response status. +// See: https://github.com/googleapis/googleapis/blob/master/google/rpc/code.proto +func HTTPStatusFromCode(code codes.Code) int { + switch code { + case codes.OK: + return http.StatusOK + case codes.Canceled: + return http.StatusRequestTimeout + case codes.Unknown: + return http.StatusInternalServerError + case codes.InvalidArgument: + return http.StatusBadRequest + case codes.DeadlineExceeded: + return http.StatusGatewayTimeout + case codes.NotFound: + return http.StatusNotFound + case codes.AlreadyExists: + return http.StatusConflict + case codes.PermissionDenied: + return http.StatusForbidden + case codes.Unauthenticated: + return http.StatusUnauthorized + case codes.ResourceExhausted: + return http.StatusTooManyRequests + case codes.FailedPrecondition: + // Note, this deliberately doesn't translate to the similarly named '412 Precondition Failed' HTTP response status. + return http.StatusBadRequest + case codes.Aborted: + return http.StatusConflict + case codes.OutOfRange: + return http.StatusBadRequest + case codes.Unimplemented: + return http.StatusNotImplemented + case codes.Internal: + return http.StatusInternalServerError + case codes.Unavailable: + return http.StatusServiceUnavailable + case codes.DataLoss: + return http.StatusInternalServerError + } + + grpclog.Infof("Unknown gRPC error code: %v", code) + return http.StatusInternalServerError +} + +var ( + // HTTPError replies to the request with an error. + // + // HTTPError is called: + // - From generated per-endpoint gateway handler code, when calling the backend results in an error. + // - From gateway runtime code, when forwarding the response message results in an error. + // + // The default value for HTTPError calls the custom error handler configured on the ServeMux via the + // WithProtoErrorHandler serve option if that option was used, calling GlobalHTTPErrorHandler otherwise. + // + // To customize the error handling of a particular ServeMux instance, use the WithProtoErrorHandler + // serve option. + // + // To customize the error format for all ServeMux instances not using the WithProtoErrorHandler serve + // option, set GlobalHTTPErrorHandler to a custom function. + // + // Setting this variable directly to customize error format is deprecated. + HTTPError = MuxOrGlobalHTTPError + + // GlobalHTTPErrorHandler is the HTTPError handler for all ServeMux instances not using the + // WithProtoErrorHandler serve option. + // + // You can set a custom function to this variable to customize error format. + GlobalHTTPErrorHandler = DefaultHTTPError + + // OtherErrorHandler handles gateway errors from parsing and routing client requests for all + // ServeMux instances not using the WithProtoErrorHandler serve option. + // + // It returns the following error codes: StatusMethodNotAllowed StatusNotFound StatusBadRequest + // + // To customize parsing and routing error handling of a particular ServeMux instance, use the + // WithProtoErrorHandler serve option. + // + // To customize parsing and routing error handling of all ServeMux instances not using the + // WithProtoErrorHandler serve option, set a custom function to this variable. + OtherErrorHandler = DefaultOtherErrorHandler +) + +// MuxOrGlobalHTTPError uses the mux-configured error handler, falling back to GlobalErrorHandler. +func MuxOrGlobalHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + if mux.protoErrorHandler != nil { + mux.protoErrorHandler(ctx, mux, marshaler, w, r, err) + } else { + GlobalHTTPErrorHandler(ctx, mux, marshaler, w, r, err) + } +} + +// DefaultHTTPError is the default implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a JSON object, +// which contains a member whose key is "error" and whose value is err.Error(). +func DefaultHTTPError(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, err error) { + const fallback = `{"error": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + w.Header().Del("Transfer-Encoding") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + pb := s.Proto() + contentType = typeMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + body := &internal.Error{ + Error: s.Message(), + Message: s.Message(), + Code: int32(s.Code()), + Details: s.Proto().GetDetails(), + } + + buf, merr := marshaler.Marshal(body) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", body, merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + + // RFC 7230 https://tools.ietf.org/html/rfc7230#section-4.1.2 + // Unless the request includes a TE header field indicating "trailers" + // is acceptable, as described in Section 4.3, a server SHOULD NOT + // generate trailer fields that it believes are necessary for the user + // agent to receive. + var wantsTrailers bool + + if te := r.Header.Get("TE"); strings.Contains(strings.ToLower(te), "trailers") { + wantsTrailers = true + handleForwardResponseTrailerHeader(w, md) + w.Header().Set("Transfer-Encoding", "chunked") + } + + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + if wantsTrailers { + handleForwardResponseTrailer(w, md) + } +} + +// DefaultOtherErrorHandler is the default implementation of OtherErrorHandler. +// It simply writes a string representation of the given error into "w". +func DefaultOtherErrorHandler(w http.ResponseWriter, _ *http.Request, msg string, code int) { + http.Error(w, msg, code) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go new file mode 100644 index 0000000000..aef645e40b --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/fieldmask.go @@ -0,0 +1,89 @@ +package runtime + +import ( + "encoding/json" + "io" + "strings" + + descriptor2 "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/protoc-gen-go/descriptor" + "google.golang.org/genproto/protobuf/field_mask" +) + +func translateName(name string, md *descriptor.DescriptorProto) (string, *descriptor.DescriptorProto) { + // TODO - should really gate this with a test that the marshaller has used json names + if md != nil { + for _, f := range md.Field { + if f.JsonName != nil && f.Name != nil && *f.JsonName == name { + var subType *descriptor.DescriptorProto + + // If the field has a TypeName then we retrieve the nested type for translating the embedded message names. + if f.TypeName != nil { + typeSplit := strings.Split(*f.TypeName, ".") + typeName := typeSplit[len(typeSplit)-1] + for _, t := range md.NestedType { + if typeName == *t.Name { + subType = t + } + } + } + return *f.Name, subType + } + } + } + return name, nil +} + +// FieldMaskFromRequestBody creates a FieldMask printing all complete paths from the JSON body. +func FieldMaskFromRequestBody(r io.Reader, md *descriptor.DescriptorProto) (*field_mask.FieldMask, error) { + fm := &field_mask.FieldMask{} + var root interface{} + if err := json.NewDecoder(r).Decode(&root); err != nil { + if err == io.EOF { + return fm, nil + } + return nil, err + } + + queue := []fieldMaskPathItem{{node: root, md: md}} + for len(queue) > 0 { + // dequeue an item + item := queue[0] + queue = queue[1:] + + if m, ok := item.node.(map[string]interface{}); ok { + // if the item is an object, then enqueue all of its children + for k, v := range m { + protoName, subMd := translateName(k, item.md) + if subMsg, ok := v.(descriptor2.Message); ok { + _, subMd = descriptor2.ForMessage(subMsg) + } + + var path string + if item.path == "" { + path = protoName + } else { + path = item.path + "." + protoName + } + queue = append(queue, fieldMaskPathItem{path: path, node: v, md: subMd}) + } + } else if len(item.path) > 0 { + // otherwise, it's a leaf node so print its path + fm.Paths = append(fm.Paths, item.path) + } + } + + return fm, nil +} + +// fieldMaskPathItem stores a in-progress deconstruction of a path for a fieldmask +type fieldMaskPathItem struct { + // the list of prior fields leading up to node connected by dots + path string + + // a generic decoded json object the current item to inspect for further path extraction + node interface{} + + // descriptor for parent message + md *descriptor.DescriptorProto +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go new file mode 100644 index 0000000000..e6e8f286e1 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/handler.go @@ -0,0 +1,212 @@ +package runtime + +import ( + "context" + "errors" + "fmt" + "io" + "net/http" + "net/textproto" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/grpclog" +) + +var errEmptyResponse = errors.New("empty response") + +// ForwardResponseStream forwards the stream from gRPC server to REST client. +func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, recv func() (proto.Message, error), opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + f, ok := w.(http.Flusher) + if !ok { + grpclog.Infof("Flush not supported in %T", w) + http.Error(w, "unexpected type of web server", http.StatusInternalServerError) + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + http.Error(w, "unexpected error", http.StatusInternalServerError) + return + } + handleForwardResponseServerMetadata(w, mux, md) + + w.Header().Set("Transfer-Encoding", "chunked") + w.Header().Set("Content-Type", marshaler.ContentType()) + if err := handleForwardResponseOptions(ctx, w, nil, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + var delimiter []byte + if d, ok := marshaler.(Delimited); ok { + delimiter = d.Delimiter() + } else { + delimiter = []byte("\n") + } + + var wroteHeader bool + for { + resp, err := recv() + if err == io.EOF { + return + } + if err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + + var buf []byte + switch { + case resp == nil: + buf, err = marshaler.Marshal(errorChunk(streamError(ctx, mux.streamErrorHandler, errEmptyResponse))) + default: + result := map[string]interface{}{"result": resp} + if rb, ok := resp.(responseBody); ok { + result["result"] = rb.XXX_ResponseBody() + } + + buf, err = marshaler.Marshal(result) + } + + if err != nil { + grpclog.Infof("Failed to marshal response chunk: %v", err) + handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err) + return + } + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to send response chunk: %v", err) + return + } + wroteHeader = true + if _, err = w.Write(delimiter); err != nil { + grpclog.Infof("Failed to send delimiter chunk: %v", err) + return + } + f.Flush() + } +} + +func handleForwardResponseServerMetadata(w http.ResponseWriter, mux *ServeMux, md ServerMetadata) { + for k, vs := range md.HeaderMD { + if h, ok := mux.outgoingHeaderMatcher(k); ok { + for _, v := range vs { + w.Header().Add(h, v) + } + } + } +} + +func handleForwardResponseTrailerHeader(w http.ResponseWriter, md ServerMetadata) { + for k := range md.TrailerMD { + tKey := textproto.CanonicalMIMEHeaderKey(fmt.Sprintf("%s%s", MetadataTrailerPrefix, k)) + w.Header().Add("Trailer", tKey) + } +} + +func handleForwardResponseTrailer(w http.ResponseWriter, md ServerMetadata) { + for k, vs := range md.TrailerMD { + tKey := fmt.Sprintf("%s%s", MetadataTrailerPrefix, k) + for _, v := range vs { + w.Header().Add(tKey, v) + } + } +} + +// responseBody interface contains method for getting field for marshaling to the response body +// this method is generated for response struct from the value of `response_body` in the `google.api.HttpRule` +type responseBody interface { + XXX_ResponseBody() interface{} +} + +// ForwardResponseMessage forwards the message "resp" from gRPC server to REST client. +func ForwardResponseMessage(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, req *http.Request, resp proto.Message, opts ...func(context.Context, http.ResponseWriter, proto.Message) error) { + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + contentType = typeMarshaler.ContentTypeFromMessage(resp) + } + w.Header().Set("Content-Type", contentType) + + if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil { + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + var buf []byte + var err error + if rb, ok := resp.(responseBody); ok { + buf, err = marshaler.Marshal(rb.XXX_ResponseBody()) + } else { + buf, err = marshaler.Marshal(resp) + } + if err != nil { + grpclog.Infof("Marshal error: %v", err) + HTTPError(ctx, mux, marshaler, w, req, err) + return + } + + if _, err = w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, resp proto.Message, opts []func(context.Context, http.ResponseWriter, proto.Message) error) error { + if len(opts) == 0 { + return nil + } + for _, opt := range opts { + if err := opt(ctx, w, resp); err != nil { + grpclog.Infof("Error handling ForwardResponseOptions: %v", err) + return err + } + } + return nil +} + +func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) { + serr := streamError(ctx, mux.streamErrorHandler, err) + if !wroteHeader { + w.WriteHeader(int(serr.HttpCode)) + } + buf, merr := marshaler.Marshal(errorChunk(serr)) + if merr != nil { + grpclog.Infof("Failed to marshal an error: %v", merr) + return + } + if _, werr := w.Write(buf); werr != nil { + grpclog.Infof("Failed to notify error to client: %v", werr) + return + } +} + +// streamError returns the payload for the final message in a response stream +// that represents the given err. +func streamError(ctx context.Context, errHandler StreamErrorHandlerFunc, err error) *StreamError { + serr := errHandler(ctx, err) + if serr != nil { + return serr + } + // TODO: log about misbehaving stream error handler? + return DefaultHTTPStreamErrorHandler(ctx, err) +} + +func errorChunk(err *StreamError) map[string]proto.Message { + return map[string]proto.Message{"error": (*internal.StreamError)(err)} +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go new file mode 100644 index 0000000000..525b0338c7 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_httpbodyproto.go @@ -0,0 +1,43 @@ +package runtime + +import ( + "google.golang.org/genproto/googleapis/api/httpbody" +) + +// SetHTTPBodyMarshaler overwrite the default marshaler with the HTTPBodyMarshaler +func SetHTTPBodyMarshaler(serveMux *ServeMux) { + serveMux.marshalers.mimeMap[MIMEWildcard] = &HTTPBodyMarshaler{ + Marshaler: &JSONPb{OrigName: true}, + } +} + +// HTTPBodyMarshaler is a Marshaler which supports marshaling of a +// google.api.HttpBody message as the full response body if it is +// the actual message used as the response. If not, then this will +// simply fallback to the Marshaler specified as its default Marshaler. +type HTTPBodyMarshaler struct { + Marshaler +} + +// ContentType implementation to keep backwards compatibility with marshal interface +func (h *HTTPBodyMarshaler) ContentType() string { + return h.ContentTypeFromMessage(nil) +} + +// ContentTypeFromMessage in case v is a google.api.HttpBody message it returns +// its specified content type otherwise fall back to the default Marshaler. +func (h *HTTPBodyMarshaler) ContentTypeFromMessage(v interface{}) string { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.GetContentType() + } + return h.Marshaler.ContentType() +} + +// Marshal marshals "v" by returning the body bytes if v is a +// google.api.HttpBody message, otherwise it falls back to the default Marshaler. +func (h *HTTPBodyMarshaler) Marshal(v interface{}) ([]byte, error) { + if httpBody, ok := v.(*httpbody.HttpBody); ok { + return httpBody.Data, nil + } + return h.Marshaler.Marshal(v) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go new file mode 100644 index 0000000000..f9d3a585a4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_json.go @@ -0,0 +1,45 @@ +package runtime + +import ( + "encoding/json" + "io" +) + +// JSONBuiltin is a Marshaler which marshals/unmarshals into/from JSON +// with the standard "encoding/json" package of Golang. +// Although it is generally faster for simple proto messages than JSONPb, +// it does not support advanced features of protobuf, e.g. map, oneof, .... +// +// The NewEncoder and NewDecoder types return *json.Encoder and +// *json.Decoder respectively. +type JSONBuiltin struct{} + +// ContentType always Returns "application/json". +func (*JSONBuiltin) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON +func (j *JSONBuiltin) Marshal(v interface{}) ([]byte, error) { + return json.Marshal(v) +} + +// Unmarshal unmarshals JSON data into "v". +func (j *JSONBuiltin) Unmarshal(data []byte, v interface{}) error { + return json.Unmarshal(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONBuiltin) NewDecoder(r io.Reader) Decoder { + return json.NewDecoder(r) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONBuiltin) NewEncoder(w io.Writer) Encoder { + return json.NewEncoder(w) +} + +// Delimiter for newline encoded JSON streams. +func (j *JSONBuiltin) Delimiter() []byte { + return []byte("\n") +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go new file mode 100644 index 0000000000..f0de351b21 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_jsonpb.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "bytes" + "encoding/json" + "fmt" + "io" + "reflect" + + "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/proto" +) + +// JSONPb is a Marshaler which marshals/unmarshals into/from JSON +// with the "github.com/golang/protobuf/jsonpb". +// It supports fully functionality of protobuf unlike JSONBuiltin. +// +// The NewDecoder method returns a DecoderWrapper, so the underlying +// *json.Decoder methods can be used. +type JSONPb jsonpb.Marshaler + +// ContentType always returns "application/json". +func (*JSONPb) ContentType() string { + return "application/json" +} + +// Marshal marshals "v" into JSON. +func (j *JSONPb) Marshal(v interface{}) ([]byte, error) { + if _, ok := v.(proto.Message); !ok { + return j.marshalNonProtoField(v) + } + + var buf bytes.Buffer + if err := j.marshalTo(&buf, v); err != nil { + return nil, err + } + return buf.Bytes(), nil +} + +func (j *JSONPb) marshalTo(w io.Writer, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + buf, err := j.marshalNonProtoField(v) + if err != nil { + return err + } + _, err = w.Write(buf) + return err + } + return (*jsonpb.Marshaler)(j).Marshal(w, p) +} + +var ( + // protoMessageType is stored to prevent constant lookup of the same type at runtime. + protoMessageType = reflect.TypeOf((*proto.Message)(nil)).Elem() +) + +// marshalNonProto marshals a non-message field of a protobuf message. +// This function does not correctly marshals arbitrary data structure into JSON, +// but it is only capable of marshaling non-message field values of protobuf, +// i.e. primitive types, enums; pointers to primitives or enums; maps from +// integer/string types to primitives/enums/pointers to messages. +func (j *JSONPb) marshalNonProtoField(v interface{}) ([]byte, error) { + if v == nil { + return []byte("null"), nil + } + rv := reflect.ValueOf(v) + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + return []byte("null"), nil + } + rv = rv.Elem() + } + + if rv.Kind() == reflect.Slice { + if rv.IsNil() { + if j.EmitDefaults { + return []byte("[]"), nil + } + return []byte("null"), nil + } + + if rv.Type().Elem().Implements(protoMessageType) { + var buf bytes.Buffer + err := buf.WriteByte('[') + if err != nil { + return nil, err + } + for i := 0; i < rv.Len(); i++ { + if i != 0 { + err = buf.WriteByte(',') + if err != nil { + return nil, err + } + } + if err = (*jsonpb.Marshaler)(j).Marshal(&buf, rv.Index(i).Interface().(proto.Message)); err != nil { + return nil, err + } + } + err = buf.WriteByte(']') + if err != nil { + return nil, err + } + + return buf.Bytes(), nil + } + } + + if rv.Kind() == reflect.Map { + m := make(map[string]*json.RawMessage) + for _, k := range rv.MapKeys() { + buf, err := j.Marshal(rv.MapIndex(k).Interface()) + if err != nil { + return nil, err + } + m[fmt.Sprintf("%v", k.Interface())] = (*json.RawMessage)(&buf) + } + if j.Indent != "" { + return json.MarshalIndent(m, "", j.Indent) + } + return json.Marshal(m) + } + if enum, ok := rv.Interface().(protoEnum); ok && !j.EnumsAsInts { + return json.Marshal(enum.String()) + } + return json.Marshal(rv.Interface()) +} + +// Unmarshal unmarshals JSON "data" into "v" +func (j *JSONPb) Unmarshal(data []byte, v interface{}) error { + return unmarshalJSONPb(data, v) +} + +// NewDecoder returns a Decoder which reads JSON stream from "r". +func (j *JSONPb) NewDecoder(r io.Reader) Decoder { + d := json.NewDecoder(r) + return DecoderWrapper{Decoder: d} +} + +// DecoderWrapper is a wrapper around a *json.Decoder that adds +// support for protos to the Decode method. +type DecoderWrapper struct { + *json.Decoder +} + +// Decode wraps the embedded decoder's Decode method to support +// protos using a jsonpb.Unmarshaler. +func (d DecoderWrapper) Decode(v interface{}) error { + return decodeJSONPb(d.Decoder, v) +} + +// NewEncoder returns an Encoder which writes JSON stream into "w". +func (j *JSONPb) NewEncoder(w io.Writer) Encoder { + return EncoderFunc(func(v interface{}) error { + if err := j.marshalTo(w, v); err != nil { + return err + } + // mimic json.Encoder by adding a newline (makes output + // easier to read when it contains multiple encoded items) + _, err := w.Write(j.Delimiter()) + return err + }) +} + +func unmarshalJSONPb(data []byte, v interface{}) error { + d := json.NewDecoder(bytes.NewReader(data)) + return decodeJSONPb(d, v) +} + +func decodeJSONPb(d *json.Decoder, v interface{}) error { + p, ok := v.(proto.Message) + if !ok { + return decodeNonProtoField(d, v) + } + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, p) +} + +func decodeNonProtoField(d *json.Decoder, v interface{}) error { + rv := reflect.ValueOf(v) + if rv.Kind() != reflect.Ptr { + return fmt.Errorf("%T is not a pointer", v) + } + for rv.Kind() == reflect.Ptr { + if rv.IsNil() { + rv.Set(reflect.New(rv.Type().Elem())) + } + if rv.Type().ConvertibleTo(typeProtoMessage) { + unmarshaler := &jsonpb.Unmarshaler{AllowUnknownFields: allowUnknownFields} + return unmarshaler.UnmarshalNext(d, rv.Interface().(proto.Message)) + } + rv = rv.Elem() + } + if rv.Kind() == reflect.Map { + if rv.IsNil() { + rv.Set(reflect.MakeMap(rv.Type())) + } + conv, ok := convFromType[rv.Type().Key().Kind()] + if !ok { + return fmt.Errorf("unsupported type of map field key: %v", rv.Type().Key()) + } + + m := make(map[string]*json.RawMessage) + if err := d.Decode(&m); err != nil { + return err + } + for k, v := range m { + result := conv.Call([]reflect.Value{reflect.ValueOf(k)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + bk := result[0] + bv := reflect.New(rv.Type().Elem()) + if err := unmarshalJSONPb([]byte(*v), bv.Interface()); err != nil { + return err + } + rv.SetMapIndex(bk, bv.Elem()) + } + return nil + } + if _, ok := rv.Interface().(protoEnum); ok { + var repr interface{} + if err := d.Decode(&repr); err != nil { + return err + } + switch repr.(type) { + case string: + // TODO(yugui) Should use proto.StructProperties? + return fmt.Errorf("unmarshaling of symbolic enum %q not supported: %T", repr, rv.Interface()) + case float64: + rv.Set(reflect.ValueOf(int32(repr.(float64))).Convert(rv.Type())) + return nil + default: + return fmt.Errorf("cannot assign %#v into Go type %T", repr, rv.Interface()) + } + } + return d.Decode(v) +} + +type protoEnum interface { + fmt.Stringer + EnumDescriptor() ([]byte, []int) +} + +var typeProtoMessage = reflect.TypeOf((*proto.Message)(nil)).Elem() + +// Delimiter for newline encoded JSON streams. +func (j *JSONPb) Delimiter() []byte { + return []byte("\n") +} + +// allowUnknownFields helps not to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +var allowUnknownFields = true + +// DisallowUnknownFields enables option in decoder (unmarshaller) to +// return an error when it finds an unknown field. This function must be +// called before using the JSON marshaller. +func DisallowUnknownFields() { + allowUnknownFields = false +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go new file mode 100644 index 0000000000..f65d1a2676 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshal_proto.go @@ -0,0 +1,62 @@ +package runtime + +import ( + "io" + + "errors" + "github.com/golang/protobuf/proto" + "io/ioutil" +) + +// ProtoMarshaller is a Marshaller which marshals/unmarshals into/from serialize proto bytes +type ProtoMarshaller struct{} + +// ContentType always returns "application/octet-stream". +func (*ProtoMarshaller) ContentType() string { + return "application/octet-stream" +} + +// Marshal marshals "value" into Proto +func (*ProtoMarshaller) Marshal(value interface{}) ([]byte, error) { + message, ok := value.(proto.Message) + if !ok { + return nil, errors.New("unable to marshal non proto field") + } + return proto.Marshal(message) +} + +// Unmarshal unmarshals proto "data" into "value" +func (*ProtoMarshaller) Unmarshal(data []byte, value interface{}) error { + message, ok := value.(proto.Message) + if !ok { + return errors.New("unable to unmarshal non proto field") + } + return proto.Unmarshal(data, message) +} + +// NewDecoder returns a Decoder which reads proto stream from "reader". +func (marshaller *ProtoMarshaller) NewDecoder(reader io.Reader) Decoder { + return DecoderFunc(func(value interface{}) error { + buffer, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + return marshaller.Unmarshal(buffer, value) + }) +} + +// NewEncoder returns an Encoder which writes proto stream into "writer". +func (marshaller *ProtoMarshaller) NewEncoder(writer io.Writer) Encoder { + return EncoderFunc(func(value interface{}) error { + buffer, err := marshaller.Marshal(value) + if err != nil { + return err + } + _, err = writer.Write(buffer) + if err != nil { + return err + } + + return nil + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go new file mode 100644 index 0000000000..4615329421 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler.go @@ -0,0 +1,55 @@ +package runtime + +import ( + "io" +) + +// Marshaler defines a conversion between byte sequence and gRPC payloads / fields. +type Marshaler interface { + // Marshal marshals "v" into byte sequence. + Marshal(v interface{}) ([]byte, error) + // Unmarshal unmarshals "data" into "v". + // "v" must be a pointer value. + Unmarshal(data []byte, v interface{}) error + // NewDecoder returns a Decoder which reads byte sequence from "r". + NewDecoder(r io.Reader) Decoder + // NewEncoder returns an Encoder which writes bytes sequence into "w". + NewEncoder(w io.Writer) Encoder + // ContentType returns the Content-Type which this marshaler is responsible for. + ContentType() string +} + +// Marshalers that implement contentTypeMarshaler will have their ContentTypeFromMessage method called +// to set the Content-Type header on the response +type contentTypeMarshaler interface { + // ContentTypeFromMessage returns the Content-Type this marshaler produces from the provided message + ContentTypeFromMessage(v interface{}) string +} + +// Decoder decodes a byte sequence +type Decoder interface { + Decode(v interface{}) error +} + +// Encoder encodes gRPC payloads / fields into byte sequence. +type Encoder interface { + Encode(v interface{}) error +} + +// DecoderFunc adapts an decoder function into Decoder. +type DecoderFunc func(v interface{}) error + +// Decode delegates invocations to the underlying function itself. +func (f DecoderFunc) Decode(v interface{}) error { return f(v) } + +// EncoderFunc adapts an encoder function into Encoder +type EncoderFunc func(v interface{}) error + +// Encode delegates invocations to the underlying function itself. +func (f EncoderFunc) Encode(v interface{}) error { return f(v) } + +// Delimited defines the streaming delimiter. +type Delimited interface { + // Delimiter returns the record separator for the stream. + Delimiter() []byte +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go new file mode 100644 index 0000000000..8dd5c24db4 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/marshaler_registry.go @@ -0,0 +1,99 @@ +package runtime + +import ( + "errors" + "mime" + "net/http" + + "google.golang.org/grpc/grpclog" +) + +// MIMEWildcard is the fallback MIME type used for requests which do not match +// a registered MIME type. +const MIMEWildcard = "*" + +var ( + acceptHeader = http.CanonicalHeaderKey("Accept") + contentTypeHeader = http.CanonicalHeaderKey("Content-Type") + + defaultMarshaler = &JSONPb{OrigName: true} +) + +// MarshalerForRequest returns the inbound/outbound marshalers for this request. +// It checks the registry on the ServeMux for the MIME type set by the Content-Type header. +// If it isn't set (or the request Content-Type is empty), checks for "*". +// If there are multiple Content-Type headers set, choose the first one that it can +// exactly match in the registry. +// Otherwise, it follows the above logic for "*"/InboundMarshaler/OutboundMarshaler. +func MarshalerForRequest(mux *ServeMux, r *http.Request) (inbound Marshaler, outbound Marshaler) { + for _, acceptVal := range r.Header[acceptHeader] { + if m, ok := mux.marshalers.mimeMap[acceptVal]; ok { + outbound = m + break + } + } + + for _, contentTypeVal := range r.Header[contentTypeHeader] { + contentType, _, err := mime.ParseMediaType(contentTypeVal) + if err != nil { + grpclog.Infof("Failed to parse Content-Type %s: %v", contentTypeVal, err) + continue + } + if m, ok := mux.marshalers.mimeMap[contentType]; ok { + inbound = m + break + } + } + + if inbound == nil { + inbound = mux.marshalers.mimeMap[MIMEWildcard] + } + if outbound == nil { + outbound = inbound + } + + return inbound, outbound +} + +// marshalerRegistry is a mapping from MIME types to Marshalers. +type marshalerRegistry struct { + mimeMap map[string]Marshaler +} + +// add adds a marshaler for a case-sensitive MIME type string ("*" to match any +// MIME type). +func (m marshalerRegistry) add(mime string, marshaler Marshaler) error { + if len(mime) == 0 { + return errors.New("empty MIME type") + } + + m.mimeMap[mime] = marshaler + + return nil +} + +// makeMarshalerMIMERegistry returns a new registry of marshalers. +// It allows for a mapping of case-sensitive Content-Type MIME type string to runtime.Marshaler interfaces. +// +// For example, you could allow the client to specify the use of the runtime.JSONPb marshaler +// with a "application/jsonpb" Content-Type and the use of the runtime.JSONBuiltin marshaler +// with a "application/json" Content-Type. +// "*" can be used to match any Content-Type. +// This can be attached to a ServerMux with the marshaler option. +func makeMarshalerMIMERegistry() marshalerRegistry { + return marshalerRegistry{ + mimeMap: map[string]Marshaler{ + MIMEWildcard: defaultMarshaler, + }, + } +} + +// WithMarshalerOption returns a ServeMuxOption which associates inbound and outbound +// Marshalers to a MIME type in mux. +func WithMarshalerOption(mime string, marshaler Marshaler) ServeMuxOption { + return func(mux *ServeMux) { + if err := mux.marshalers.add(mime, marshaler); err != nil { + panic(err) + } + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go new file mode 100644 index 0000000000..523a9cb43c --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/mux.go @@ -0,0 +1,300 @@ +package runtime + +import ( + "context" + "fmt" + "net/http" + "net/textproto" + "strings" + + "github.com/golang/protobuf/proto" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// A HandlerFunc handles a specific pair of path pattern and HTTP method. +type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string) + +// ErrUnknownURI is the error supplied to a custom ProtoErrorHandlerFunc when +// a request is received with a URI path that does not match any registered +// service method. +// +// Since gRPC servers return an "Unimplemented" code for requests with an +// unrecognized URI path, this error also has a gRPC "Unimplemented" code. +var ErrUnknownURI = status.Error(codes.Unimplemented, http.StatusText(http.StatusNotImplemented)) + +// ServeMux is a request multiplexer for grpc-gateway. +// It matches http requests to patterns and invokes the corresponding handler. +type ServeMux struct { + // handlers maps HTTP method to a list of handlers. + handlers map[string][]handler + forwardResponseOptions []func(context.Context, http.ResponseWriter, proto.Message) error + marshalers marshalerRegistry + incomingHeaderMatcher HeaderMatcherFunc + outgoingHeaderMatcher HeaderMatcherFunc + metadataAnnotators []func(context.Context, *http.Request) metadata.MD + streamErrorHandler StreamErrorHandlerFunc + protoErrorHandler ProtoErrorHandlerFunc + disablePathLengthFallback bool + lastMatchWins bool +} + +// ServeMuxOption is an option that can be given to a ServeMux on construction. +type ServeMuxOption func(*ServeMux) + +// WithForwardResponseOption returns a ServeMuxOption representing the forwardResponseOption. +// +// forwardResponseOption is an option that will be called on the relevant context.Context, +// http.ResponseWriter, and proto.Message before every forwarded response. +// +// The message may be nil in the case where just a header is being sent. +func WithForwardResponseOption(forwardResponseOption func(context.Context, http.ResponseWriter, proto.Message) error) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.forwardResponseOptions = append(serveMux.forwardResponseOptions, forwardResponseOption) + } +} + +// SetQueryParameterParser sets the query parameter parser, used to populate message from query parameters. +// Configuring this will mean the generated swagger output is no longer correct, and it should be +// done with careful consideration. +func SetQueryParameterParser(queryParameterParser QueryParameterParser) ServeMuxOption { + return func(serveMux *ServeMux) { + currentQueryParser = queryParameterParser + } +} + +// HeaderMatcherFunc checks whether a header key should be forwarded to/from gRPC context. +type HeaderMatcherFunc func(string) (string, bool) + +// DefaultHeaderMatcher is used to pass http request headers to/from gRPC context. This adds permanent HTTP header +// keys (as specified by the IANA) to gRPC context with grpcgateway- prefix. HTTP headers that start with +// 'Grpc-Metadata-' are mapped to gRPC metadata after removing prefix 'Grpc-Metadata-'. +func DefaultHeaderMatcher(key string) (string, bool) { + key = textproto.CanonicalMIMEHeaderKey(key) + if isPermanentHTTPHeader(key) { + return MetadataPrefix + key, true + } else if strings.HasPrefix(key, MetadataHeaderPrefix) { + return key[len(MetadataHeaderPrefix):], true + } + return "", false +} + +// WithIncomingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for incoming request to gateway. +// +// This matcher will be called with each header in http.Request. If matcher returns true, that header will be +// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header. +func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.incomingHeaderMatcher = fn + } +} + +// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway. +// +// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be +// passed to http response returned from gateway. To transform the header before passing to response, +// matcher should return modified header. +func WithOutgoingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption { + return func(mux *ServeMux) { + mux.outgoingHeaderMatcher = fn + } +} + +// WithMetadata returns a ServeMuxOption for passing metadata to a gRPC context. +// +// This can be used by services that need to read from http.Request and modify gRPC context. A common use case +// is reading token from cookie and adding it in gRPC context. +func WithMetadata(annotator func(context.Context, *http.Request) metadata.MD) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.metadataAnnotators = append(serveMux.metadataAnnotators, annotator) + } +} + +// WithProtoErrorHandler returns a ServeMuxOption for configuring a custom error handler. +// +// This can be used to handle an error as general proto message defined by gRPC. +// When this option is used, the mux uses the configured error handler instead of HTTPError and +// OtherErrorHandler. +func WithProtoErrorHandler(fn ProtoErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.protoErrorHandler = fn + } +} + +// WithDisablePathLengthFallback returns a ServeMuxOption for disable path length fallback. +func WithDisablePathLengthFallback() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.disablePathLengthFallback = true + } +} + +// WithStreamErrorHandler returns a ServeMuxOption that will use the given custom stream +// error handler, which allows for customizing the error trailer for server-streaming +// calls. +// +// For stream errors that occur before any response has been written, the mux's +// ProtoErrorHandler will be invoked. However, once data has been written, the errors must +// be handled differently: they must be included in the response body. The response body's +// final message will include the error details returned by the stream error handler. +func WithStreamErrorHandler(fn StreamErrorHandlerFunc) ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.streamErrorHandler = fn + } +} + +// WithLastMatchWins returns a ServeMuxOption that will enable "last +// match wins" behavior, where if multiple path patterns match a +// request path, the last one defined in the .proto file will be used. +func WithLastMatchWins() ServeMuxOption { + return func(serveMux *ServeMux) { + serveMux.lastMatchWins = true + } +} + +// NewServeMux returns a new ServeMux whose internal mapping is empty. +func NewServeMux(opts ...ServeMuxOption) *ServeMux { + serveMux := &ServeMux{ + handlers: make(map[string][]handler), + forwardResponseOptions: make([]func(context.Context, http.ResponseWriter, proto.Message) error, 0), + marshalers: makeMarshalerMIMERegistry(), + streamErrorHandler: DefaultHTTPStreamErrorHandler, + } + + for _, opt := range opts { + opt(serveMux) + } + + if serveMux.incomingHeaderMatcher == nil { + serveMux.incomingHeaderMatcher = DefaultHeaderMatcher + } + + if serveMux.outgoingHeaderMatcher == nil { + serveMux.outgoingHeaderMatcher = func(key string) (string, bool) { + return fmt.Sprintf("%s%s", MetadataHeaderPrefix, key), true + } + } + + return serveMux +} + +// Handle associates "h" to the pair of HTTP method and path pattern. +func (s *ServeMux) Handle(meth string, pat Pattern, h HandlerFunc) { + if s.lastMatchWins { + s.handlers[meth] = append([]handler{handler{pat: pat, h: h}}, s.handlers[meth]...) + } else { + s.handlers[meth] = append(s.handlers[meth], handler{pat: pat, h: h}) + } +} + +// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path. +func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) { + ctx := r.Context() + + path := r.URL.Path + if !strings.HasPrefix(path, "/") { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, http.StatusText(http.StatusBadRequest)) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusBadRequest), http.StatusBadRequest) + } + return + } + + components := strings.Split(path[1:], "/") + l := len(components) + var verb string + if idx := strings.LastIndex(components[l-1], ":"); idx == 0 { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } + return + } else if idx > 0 { + c := components[l-1] + components[l-1], verb = c[:idx], c[idx+1:] + } + + if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) { + r.Method = strings.ToUpper(override) + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + } + for _, h := range s.handlers[r.Method] { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + h.h(w, r, pathParams) + return + } + + // lookup other methods to handle fallback from GET to POST and + // to determine if it is MethodNotAllowed or NotFound. + for m, handlers := range s.handlers { + if m == r.Method { + continue + } + for _, h := range handlers { + pathParams, err := h.pat.Match(components, verb) + if err != nil { + continue + } + // X-HTTP-Method-Override is optional. Always allow fallback to POST. + if s.isPathLengthFallback(r) { + if err := r.ParseForm(); err != nil { + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + sterr := status.Error(codes.InvalidArgument, err.Error()) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, sterr) + } else { + OtherErrorHandler(w, r, err.Error(), http.StatusBadRequest) + } + return + } + h.h(w, r, pathParams) + return + } + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusMethodNotAllowed), http.StatusMethodNotAllowed) + } + return + } + } + + if s.protoErrorHandler != nil { + _, outboundMarshaler := MarshalerForRequest(s, r) + s.protoErrorHandler(ctx, s, outboundMarshaler, w, r, ErrUnknownURI) + } else { + OtherErrorHandler(w, r, http.StatusText(http.StatusNotFound), http.StatusNotFound) + } +} + +// GetForwardResponseOptions returns the ForwardResponseOptions associated with this ServeMux. +func (s *ServeMux) GetForwardResponseOptions() []func(context.Context, http.ResponseWriter, proto.Message) error { + return s.forwardResponseOptions +} + +func (s *ServeMux) isPathLengthFallback(r *http.Request) bool { + return !s.disablePathLengthFallback && r.Method == "POST" && r.Header.Get("Content-Type") == "application/x-www-form-urlencoded" +} + +type handler struct { + pat Pattern + h HandlerFunc +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go new file mode 100644 index 0000000000..09053695da --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/pattern.go @@ -0,0 +1,262 @@ +package runtime + +import ( + "errors" + "fmt" + "strings" + + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var ( + // ErrNotMatch indicates that the given HTTP request path does not match to the pattern. + ErrNotMatch = errors.New("not match to the path pattern") + // ErrInvalidPattern indicates that the given definition of Pattern is not valid. + ErrInvalidPattern = errors.New("invalid pattern") +) + +type op struct { + code utilities.OpCode + operand int +} + +// Pattern is a template pattern of http request paths defined in github.com/googleapis/googleapis/google/api/http.proto. +type Pattern struct { + // ops is a list of operations + ops []op + // pool is a constant pool indexed by the operands or vars. + pool []string + // vars is a list of variables names to be bound by this pattern + vars []string + // stacksize is the max depth of the stack + stacksize int + // tailLen is the length of the fixed-size segments after a deep wildcard + tailLen int + // verb is the VERB part of the path pattern. It is empty if the pattern does not have VERB part. + verb string + // assumeColonVerb indicates whether a path suffix after a final + // colon may only be interpreted as a verb. + assumeColonVerb bool +} + +type patternOptions struct { + assumeColonVerb bool +} + +// PatternOpt is an option for creating Patterns. +type PatternOpt func(*patternOptions) + +// NewPattern returns a new Pattern from the given definition values. +// "ops" is a sequence of op codes. "pool" is a constant pool. +// "verb" is the verb part of the pattern. It is empty if the pattern does not have the part. +// "version" must be 1 for now. +// It returns an error if the given definition is invalid. +func NewPattern(version int, ops []int, pool []string, verb string, opts ...PatternOpt) (Pattern, error) { + options := patternOptions{ + assumeColonVerb: true, + } + for _, o := range opts { + o(&options) + } + + if version != 1 { + grpclog.Infof("unsupported version: %d", version) + return Pattern{}, ErrInvalidPattern + } + + l := len(ops) + if l%2 != 0 { + grpclog.Infof("odd number of ops codes: %d", l) + return Pattern{}, ErrInvalidPattern + } + + var ( + typedOps []op + stack, maxstack int + tailLen int + pushMSeen bool + vars []string + ) + for i := 0; i < l; i += 2 { + op := op{code: utilities.OpCode(ops[i]), operand: ops[i+1]} + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpPushM: + if pushMSeen { + grpclog.Infof("pushM appears twice") + return Pattern{}, ErrInvalidPattern + } + pushMSeen = true + stack++ + case utilities.OpLitPush: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("negative literal index: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + if pushMSeen { + tailLen++ + } + stack++ + case utilities.OpConcatN: + if op.operand <= 0 { + grpclog.Infof("negative concat size: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + stack -= op.operand + if stack < 0 { + grpclog.Print("stack underflow") + return Pattern{}, ErrInvalidPattern + } + stack++ + case utilities.OpCapture: + if op.operand < 0 || len(pool) <= op.operand { + grpclog.Infof("variable name index out of bound: %d", op.operand) + return Pattern{}, ErrInvalidPattern + } + v := pool[op.operand] + op.operand = len(vars) + vars = append(vars, v) + stack-- + if stack < 0 { + grpclog.Infof("stack underflow") + return Pattern{}, ErrInvalidPattern + } + default: + grpclog.Infof("invalid opcode: %d", op.code) + return Pattern{}, ErrInvalidPattern + } + + if maxstack < stack { + maxstack = stack + } + typedOps = append(typedOps, op) + } + return Pattern{ + ops: typedOps, + pool: pool, + vars: vars, + stacksize: maxstack, + tailLen: tailLen, + verb: verb, + assumeColonVerb: options.assumeColonVerb, + }, nil +} + +// MustPattern is a helper function which makes it easier to call NewPattern in variable initialization. +func MustPattern(p Pattern, err error) Pattern { + if err != nil { + grpclog.Fatalf("Pattern initialization failed: %v", err) + } + return p +} + +// Match examines components if it matches to the Pattern. +// If it matches, the function returns a mapping from field paths to their captured values. +// If otherwise, the function returns an error. +func (p Pattern) Match(components []string, verb string) (map[string]string, error) { + if p.verb != verb { + if p.assumeColonVerb || p.verb != "" { + return nil, ErrNotMatch + } + if len(components) == 0 { + components = []string{":" + verb} + } else { + components = append([]string{}, components...) + components[len(components)-1] += ":" + verb + } + verb = "" + } + + var pos int + stack := make([]string, 0, p.stacksize) + captured := make([]string, len(p.vars)) + l := len(components) + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush, utilities.OpLitPush: + if pos >= l { + return nil, ErrNotMatch + } + c := components[pos] + if op.code == utilities.OpLitPush { + if lit := p.pool[op.operand]; c != lit { + return nil, ErrNotMatch + } + } + stack = append(stack, c) + pos++ + case utilities.OpPushM: + end := len(components) + if end < pos+p.tailLen { + return nil, ErrNotMatch + } + end -= p.tailLen + stack = append(stack, strings.Join(components[pos:end], "/")) + pos = end + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + captured[op.operand] = stack[n] + stack = stack[:n] + } + } + if pos < l { + return nil, ErrNotMatch + } + bindings := make(map[string]string) + for i, val := range captured { + bindings[p.vars[i]] = val + } + return bindings, nil +} + +// Verb returns the verb part of the Pattern. +func (p Pattern) Verb() string { return p.verb } + +func (p Pattern) String() string { + var stack []string + for _, op := range p.ops { + switch op.code { + case utilities.OpNop: + continue + case utilities.OpPush: + stack = append(stack, "*") + case utilities.OpLitPush: + stack = append(stack, p.pool[op.operand]) + case utilities.OpPushM: + stack = append(stack, "**") + case utilities.OpConcatN: + n := op.operand + l := len(stack) - n + stack = append(stack[:l], strings.Join(stack[l:], "/")) + case utilities.OpCapture: + n := len(stack) - 1 + stack[n] = fmt.Sprintf("{%s=%s}", p.vars[op.operand], stack[n]) + } + } + segs := strings.Join(stack, "/") + if p.verb != "" { + return fmt.Sprintf("/%s:%s", segs, p.verb) + } + return "/" + segs +} + +// AssumeColonVerbOpt indicates whether a path suffix after a final +// colon may only be interpreted as a verb. +func AssumeColonVerbOpt(val bool) PatternOpt { + return PatternOpt(func(o *patternOptions) { + o.assumeColonVerb = val + }) +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go new file mode 100644 index 0000000000..a3151e2a55 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto2_convert.go @@ -0,0 +1,80 @@ +package runtime + +import ( + "github.com/golang/protobuf/proto" +) + +// StringP returns a pointer to a string whose pointee is same as the given string value. +func StringP(val string) (*string, error) { + return proto.String(val), nil +} + +// BoolP parses the given string representation of a boolean value, +// and returns a pointer to a bool whose value is same as the parsed value. +func BoolP(val string) (*bool, error) { + b, err := Bool(val) + if err != nil { + return nil, err + } + return proto.Bool(b), nil +} + +// Float64P parses the given string representation of a floating point number, +// and returns a pointer to a float64 whose value is same as the parsed number. +func Float64P(val string) (*float64, error) { + f, err := Float64(val) + if err != nil { + return nil, err + } + return proto.Float64(f), nil +} + +// Float32P parses the given string representation of a floating point number, +// and returns a pointer to a float32 whose value is same as the parsed number. +func Float32P(val string) (*float32, error) { + f, err := Float32(val) + if err != nil { + return nil, err + } + return proto.Float32(f), nil +} + +// Int64P parses the given string representation of an integer +// and returns a pointer to a int64 whose value is same as the parsed integer. +func Int64P(val string) (*int64, error) { + i, err := Int64(val) + if err != nil { + return nil, err + } + return proto.Int64(i), nil +} + +// Int32P parses the given string representation of an integer +// and returns a pointer to a int32 whose value is same as the parsed integer. +func Int32P(val string) (*int32, error) { + i, err := Int32(val) + if err != nil { + return nil, err + } + return proto.Int32(i), err +} + +// Uint64P parses the given string representation of an integer +// and returns a pointer to a uint64 whose value is same as the parsed integer. +func Uint64P(val string) (*uint64, error) { + i, err := Uint64(val) + if err != nil { + return nil, err + } + return proto.Uint64(i), err +} + +// Uint32P parses the given string representation of an integer +// and returns a pointer to a uint32 whose value is same as the parsed integer. +func Uint32P(val string) (*uint32, error) { + i, err := Uint32(val) + if err != nil { + return nil, err + } + return proto.Uint32(i), err +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go new file mode 100644 index 0000000000..3fd30da22a --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/proto_errors.go @@ -0,0 +1,106 @@ +package runtime + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/ptypes/any" + "github.com/grpc-ecosystem/grpc-gateway/internal" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/status" +) + +// StreamErrorHandlerFunc accepts an error as a gRPC error generated via status package and translates it into a +// a proto struct used to represent error at the end of a stream. +type StreamErrorHandlerFunc func(context.Context, error) *StreamError + +// StreamError is the payload for the final message in a server stream in the event that the server returns an +// error after a response message has already been sent. +type StreamError internal.StreamError + +// ProtoErrorHandlerFunc handles the error as a gRPC error generated via status package and replies to the request. +type ProtoErrorHandlerFunc func(context.Context, *ServeMux, Marshaler, http.ResponseWriter, *http.Request, error) + +var _ ProtoErrorHandlerFunc = DefaultHTTPProtoErrorHandler + +// DefaultHTTPProtoErrorHandler is an implementation of HTTPError. +// If "err" is an error from gRPC system, the function replies with the status code mapped by HTTPStatusFromCode. +// If otherwise, it replies with http.StatusInternalServerError. +// +// The response body returned by this function is a Status message marshaled by a Marshaler. +// +// Do not set this function to HTTPError variable directly, use WithProtoErrorHandler option instead. +func DefaultHTTPProtoErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, _ *http.Request, err error) { + // return Internal when Marshal failed + const fallback = `{"code": 13, "message": "failed to marshal error message"}` + + s, ok := status.FromError(err) + if !ok { + s = status.New(codes.Unknown, err.Error()) + } + + w.Header().Del("Trailer") + + contentType := marshaler.ContentType() + // Check marshaler on run time in order to keep backwards compatibility + // An interface param needs to be added to the ContentType() function on + // the Marshal interface to be able to remove this check + if typeMarshaler, ok := marshaler.(contentTypeMarshaler); ok { + pb := s.Proto() + contentType = typeMarshaler.ContentTypeFromMessage(pb) + } + w.Header().Set("Content-Type", contentType) + + buf, merr := marshaler.Marshal(s.Proto()) + if merr != nil { + grpclog.Infof("Failed to marshal error message %q: %v", s.Proto(), merr) + w.WriteHeader(http.StatusInternalServerError) + if _, err := io.WriteString(w, fallback); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + return + } + + md, ok := ServerMetadataFromContext(ctx) + if !ok { + grpclog.Infof("Failed to extract ServerMetadata from context") + } + + handleForwardResponseServerMetadata(w, mux, md) + handleForwardResponseTrailerHeader(w, md) + st := HTTPStatusFromCode(s.Code()) + w.WriteHeader(st) + if _, err := w.Write(buf); err != nil { + grpclog.Infof("Failed to write response: %v", err) + } + + handleForwardResponseTrailer(w, md) +} + +// DefaultHTTPStreamErrorHandler converts the given err into a *StreamError via +// default logic. +// +// It extracts the gRPC status from err if possible. The fields of the status are +// used to populate the returned StreamError, and the HTTP status code is derived +// from the gRPC code via HTTPStatusFromCode. If the given err does not contain a +// gRPC status, an "Unknown" gRPC code is used and "Internal Server Error" HTTP code. +func DefaultHTTPStreamErrorHandler(_ context.Context, err error) *StreamError { + grpcCode := codes.Unknown + grpcMessage := err.Error() + var grpcDetails []*any.Any + if s, ok := status.FromError(err); ok { + grpcCode = s.Code() + grpcMessage = s.Message() + grpcDetails = s.Proto().GetDetails() + } + httpCode := HTTPStatusFromCode(grpcCode) + return &StreamError{ + GrpcCode: int32(grpcCode), + HttpCode: int32(httpCode), + Message: grpcMessage, + HttpStatus: http.StatusText(httpCode), + Details: grpcDetails, + } +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go new file mode 100644 index 0000000000..ba66842c33 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/runtime/query.go @@ -0,0 +1,406 @@ +package runtime + +import ( + "encoding/base64" + "fmt" + "net/url" + "reflect" + "regexp" + "strconv" + "strings" + "time" + + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc/grpclog" +) + +var valuesKeyRegexp = regexp.MustCompile("^(.*)\\[(.*)\\]$") + +var currentQueryParser QueryParameterParser = &defaultQueryParser{} + +// QueryParameterParser defines interface for all query parameter parsers +type QueryParameterParser interface { + Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error +} + +// PopulateQueryParameters parses query parameters +// into "msg" using current query parser +func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + return currentQueryParser.Parse(msg, values, filter) +} + +type defaultQueryParser struct{} + +// Parse populates "values" into "msg". +// A value is ignored if its key starts with one of the elements in "filter". +func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error { + for key, values := range values { + match := valuesKeyRegexp.FindStringSubmatch(key) + if len(match) == 3 { + key = match[1] + values = append([]string{match[2]}, values...) + } + fieldPath := strings.Split(key, ".") + if filter.HasCommonPrefix(fieldPath) { + continue + } + if err := populateFieldValueFromPath(msg, fieldPath, values); err != nil { + return err + } + } + return nil +} + +// PopulateFieldFromPath sets a value in a nested Protobuf structure. +// It instantiates missing protobuf fields as it goes. +func PopulateFieldFromPath(msg proto.Message, fieldPathString string, value string) error { + fieldPath := strings.Split(fieldPathString, ".") + return populateFieldValueFromPath(msg, fieldPath, []string{value}) +} + +func populateFieldValueFromPath(msg proto.Message, fieldPath []string, values []string) error { + m := reflect.ValueOf(msg) + if m.Kind() != reflect.Ptr { + return fmt.Errorf("unexpected type %T: %v", msg, msg) + } + var props *proto.Properties + m = m.Elem() + for i, fieldName := range fieldPath { + isLast := i == len(fieldPath)-1 + if !isLast && m.Kind() != reflect.Struct { + return fmt.Errorf("non-aggregate type in the mid of path: %s", strings.Join(fieldPath, ".")) + } + var f reflect.Value + var err error + f, props, err = fieldByProtoName(m, fieldName) + if err != nil { + return err + } else if !f.IsValid() { + grpclog.Infof("field not found in %T: %s", msg, strings.Join(fieldPath, ".")) + return nil + } + + switch f.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, reflect.String, reflect.Uint32, reflect.Uint64: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + m = f + case reflect.Slice: + if !isLast { + return fmt.Errorf("unexpected repeated field in %s", strings.Join(fieldPath, ".")) + } + // Handle []byte + if f.Type().Elem().Kind() == reflect.Uint8 { + m = f + break + } + return populateRepeatedField(f, values, props) + case reflect.Ptr: + if f.IsNil() { + m = reflect.New(f.Type().Elem()) + f.Set(m.Convert(f.Type())) + } + m = f.Elem() + continue + case reflect.Struct: + m = f + continue + case reflect.Map: + if !isLast { + return fmt.Errorf("unexpected nested field %s in %s", fieldPath[i+1], strings.Join(fieldPath[:i+1], ".")) + } + return populateMapField(f, values, props) + default: + return fmt.Errorf("unexpected type %s in %T", f.Type(), msg) + } + } + switch len(values) { + case 0: + return fmt.Errorf("no value of field: %s", strings.Join(fieldPath, ".")) + case 1: + default: + grpclog.Infof("too many field values: %s", strings.Join(fieldPath, ".")) + } + return populateField(m, values[0], props) +} + +// fieldByProtoName looks up a field whose corresponding protobuf field name is "name". +// "m" must be a struct value. It returns zero reflect.Value if no such field found. +func fieldByProtoName(m reflect.Value, name string) (reflect.Value, *proto.Properties, error) { + props := proto.GetProperties(m.Type()) + + // look up field name in oneof map + for _, op := range props.OneofTypes { + if name == op.Prop.OrigName || name == op.Prop.JSONName { + v := reflect.New(op.Type.Elem()) + field := m.Field(op.Field) + if !field.IsNil() { + return reflect.Value{}, nil, fmt.Errorf("field already set for %s oneof", props.Prop[op.Field].OrigName) + } + field.Set(v) + return v.Elem().Field(0), op.Prop, nil + } + } + + for _, p := range props.Prop { + if p.OrigName == name { + return m.FieldByName(p.Name), p, nil + } + if p.JSONName == name { + return m.FieldByName(p.Name), p, nil + } + } + return reflect.Value{}, nil, nil +} + +func populateMapField(f reflect.Value, values []string, props *proto.Properties) error { + if len(values) != 2 { + return fmt.Errorf("more than one value provided for key %s in map %s", values[0], props.Name) + } + + key, value := values[0], values[1] + keyType := f.Type().Key() + valueType := f.Type().Elem() + if f.IsNil() { + f.Set(reflect.MakeMap(f.Type())) + } + + keyConv, ok := convFromType[keyType.Kind()] + if !ok { + return fmt.Errorf("unsupported key type %s in map %s", keyType, props.Name) + } + valueConv, ok := convFromType[valueType.Kind()] + if !ok { + return fmt.Errorf("unsupported value type %s in map %s", valueType, props.Name) + } + + keyV := keyConv.Call([]reflect.Value{reflect.ValueOf(key)}) + if err := keyV[1].Interface(); err != nil { + return err.(error) + } + valueV := valueConv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := valueV[1].Interface(); err != nil { + return err.(error) + } + + f.SetMapIndex(keyV[0].Convert(keyType), valueV[0].Convert(valueType)) + + return nil +} + +func populateRepeatedField(f reflect.Value, values []string, props *proto.Properties) error { + elemType := f.Type().Elem() + + // is the destination field a slice of an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnumRepeated(f, values, enumValMap) + } + + conv, ok := convFromType[elemType.Kind()] + if !ok { + return fmt.Errorf("unsupported field type %s", elemType) + } + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result := conv.Call([]reflect.Value{reflect.ValueOf(v)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Index(i).Set(result[0].Convert(f.Index(i).Type())) + } + return nil +} + +func populateField(f reflect.Value, value string, props *proto.Properties) error { + i := f.Addr().Interface() + + // Handle protobuf well known types + var name string + switch m := i.(type) { + case interface{ XXX_WellKnownType() string }: + name = m.XXX_WellKnownType() + case proto.Message: + const wktPrefix = "google.protobuf." + if fullName := proto.MessageName(m); strings.HasPrefix(fullName, wktPrefix) { + name = fullName[len(wktPrefix):] + } + } + switch name { + case "Timestamp": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + + t, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + f.FieldByName("Seconds").SetInt(int64(t.Unix())) + f.FieldByName("Nanos").SetInt(int64(t.Nanosecond())) + return nil + case "Duration": + if value == "null" { + f.FieldByName("Seconds").SetInt(0) + f.FieldByName("Nanos").SetInt(0) + return nil + } + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + f.FieldByName("Seconds").SetInt(s) + f.FieldByName("Nanos").SetInt(ns) + return nil + case "DoubleValue": + fallthrough + case "FloatValue": + float64Val, err := strconv.ParseFloat(value, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetFloat(float64Val) + return nil + case "Int64Value": + fallthrough + case "Int32Value": + int64Val, err := strconv.ParseInt(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetInt(int64Val) + return nil + case "UInt64Value": + fallthrough + case "UInt32Value": + uint64Val, err := strconv.ParseUint(value, 10, 64) + if err != nil { + return fmt.Errorf("bad DoubleValue: %s", value) + } + f.FieldByName("Value").SetUint(uint64Val) + return nil + case "BoolValue": + if value == "true" { + f.FieldByName("Value").SetBool(true) + } else if value == "false" { + f.FieldByName("Value").SetBool(false) + } else { + return fmt.Errorf("bad BoolValue: %s", value) + } + return nil + case "StringValue": + f.FieldByName("Value").SetString(value) + return nil + case "BytesValue": + bytesVal, err := base64.StdEncoding.DecodeString(value) + if err != nil { + return fmt.Errorf("bad BytesValue: %s", value) + } + f.FieldByName("Value").SetBytes(bytesVal) + return nil + case "FieldMask": + p := f.FieldByName("Paths") + for _, v := range strings.Split(value, ",") { + if v != "" { + p.Set(reflect.Append(p, reflect.ValueOf(v))) + } + } + return nil + } + + // Handle Time and Duration stdlib types + switch t := i.(type) { + case *time.Time: + pt, err := time.Parse(time.RFC3339Nano, value) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + *t = pt + return nil + case *time.Duration: + d, err := time.ParseDuration(value) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + *t = d + return nil + } + + // is the destination field an enumeration type? + if enumValMap := proto.EnumValueMap(props.Enum); enumValMap != nil { + return populateFieldEnum(f, value, enumValMap) + } + + conv, ok := convFromType[f.Kind()] + if !ok { + return fmt.Errorf("field type %T is not supported in query parameters", i) + } + result := conv.Call([]reflect.Value{reflect.ValueOf(value)}) + if err := result[1].Interface(); err != nil { + return err.(error) + } + f.Set(result[0].Convert(f.Type())) + return nil +} + +func convertEnum(value string, t reflect.Type, enumValMap map[string]int32) (reflect.Value, error) { + // see if it's an enumeration string + if enumVal, ok := enumValMap[value]; ok { + return reflect.ValueOf(enumVal).Convert(t), nil + } + + // check for an integer that matches an enumeration value + eVal, err := strconv.Atoi(value) + if err != nil { + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) + } + for _, v := range enumValMap { + if v == int32(eVal) { + return reflect.ValueOf(eVal).Convert(t), nil + } + } + return reflect.Value{}, fmt.Errorf("%s is not a valid %s", value, t) +} + +func populateFieldEnum(f reflect.Value, value string, enumValMap map[string]int32) error { + cval, err := convertEnum(value, f.Type(), enumValMap) + if err != nil { + return err + } + f.Set(cval) + return nil +} + +func populateFieldEnumRepeated(f reflect.Value, values []string, enumValMap map[string]int32) error { + elemType := f.Type().Elem() + f.Set(reflect.MakeSlice(f.Type(), len(values), len(values)).Convert(f.Type())) + for i, v := range values { + result, err := convertEnum(v, elemType, enumValMap) + if err != nil { + return err + } + f.Index(i).Set(result) + } + return nil +} + +var ( + convFromType = map[reflect.Kind]reflect.Value{ + reflect.String: reflect.ValueOf(String), + reflect.Bool: reflect.ValueOf(Bool), + reflect.Float64: reflect.ValueOf(Float64), + reflect.Float32: reflect.ValueOf(Float32), + reflect.Int64: reflect.ValueOf(Int64), + reflect.Int32: reflect.ValueOf(Int32), + reflect.Uint64: reflect.ValueOf(Uint64), + reflect.Uint32: reflect.ValueOf(Uint32), + reflect.Slice: reflect.ValueOf(Bytes), + } +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel new file mode 100644 index 0000000000..7109d79323 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/BUILD.bazel @@ -0,0 +1,21 @@ +load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test") + +package(default_visibility = ["//visibility:public"]) + +go_library( + name = "go_default_library", + srcs = [ + "doc.go", + "pattern.go", + "readerfactory.go", + "trie.go", + ], + importpath = "github.com/grpc-ecosystem/grpc-gateway/utilities", +) + +go_test( + name = "go_default_test", + size = "small", + srcs = ["trie_test.go"], + embed = [":go_default_library"], +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go new file mode 100644 index 0000000000..cf79a4d588 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/doc.go @@ -0,0 +1,2 @@ +// Package utilities provides members for internal use in grpc-gateway. +package utilities diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go new file mode 100644 index 0000000000..dfe7de4864 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/pattern.go @@ -0,0 +1,22 @@ +package utilities + +// An OpCode is a opcode of compiled path patterns. +type OpCode int + +// These constants are the valid values of OpCode. +const ( + // OpNop does nothing + OpNop = OpCode(iota) + // OpPush pushes a component to stack + OpPush + // OpLitPush pushes a component to stack if it matches to the literal + OpLitPush + // OpPushM concatenates the remaining components and pushes it to stack + OpPushM + // OpConcatN pops N items from stack, concatenates them and pushes it back to stack + OpConcatN + // OpCapture pops an item and binds it to the variable + OpCapture + // OpEnd is the least positive invalid opcode. + OpEnd +) diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go new file mode 100644 index 0000000000..6dd3854665 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/readerfactory.go @@ -0,0 +1,20 @@ +package utilities + +import ( + "bytes" + "io" + "io/ioutil" +) + +// IOReaderFactory takes in an io.Reader and returns a function that will allow you to create a new reader that begins +// at the start of the stream +func IOReaderFactory(r io.Reader) (func() io.Reader, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return func() io.Reader { + return bytes.NewReader(b) + }, nil +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go new file mode 100644 index 0000000000..c2b7b30dd9 --- /dev/null +++ b/vendor/github.com/grpc-ecosystem/grpc-gateway/utilities/trie.go @@ -0,0 +1,177 @@ +package utilities + +import ( + "sort" +) + +// DoubleArray is a Double Array implementation of trie on sequences of strings. +type DoubleArray struct { + // Encoding keeps an encoding from string to int + Encoding map[string]int + // Base is the base array of Double Array + Base []int + // Check is the check array of Double Array + Check []int +} + +// NewDoubleArray builds a DoubleArray from a set of sequences of strings. +func NewDoubleArray(seqs [][]string) *DoubleArray { + da := &DoubleArray{Encoding: make(map[string]int)} + if len(seqs) == 0 { + return da + } + + encoded := registerTokens(da, seqs) + sort.Sort(byLex(encoded)) + + root := node{row: -1, col: -1, left: 0, right: len(encoded)} + addSeqs(da, encoded, 0, root) + + for i := len(da.Base); i > 0; i-- { + if da.Check[i-1] != 0 { + da.Base = da.Base[:i] + da.Check = da.Check[:i] + break + } + } + return da +} + +func registerTokens(da *DoubleArray, seqs [][]string) [][]int { + var result [][]int + for _, seq := range seqs { + var encoded []int + for _, token := range seq { + if _, ok := da.Encoding[token]; !ok { + da.Encoding[token] = len(da.Encoding) + } + encoded = append(encoded, da.Encoding[token]) + } + result = append(result, encoded) + } + for i := range result { + result[i] = append(result[i], len(da.Encoding)) + } + return result +} + +type node struct { + row, col int + left, right int +} + +func (n node) value(seqs [][]int) int { + return seqs[n.row][n.col] +} + +func (n node) children(seqs [][]int) []*node { + var result []*node + lastVal := int(-1) + last := new(node) + for i := n.left; i < n.right; i++ { + if lastVal == seqs[i][n.col+1] { + continue + } + last.right = i + last = &node{ + row: i, + col: n.col + 1, + left: i, + } + result = append(result, last) + } + last.right = n.right + return result +} + +func addSeqs(da *DoubleArray, seqs [][]int, pos int, n node) { + ensureSize(da, pos) + + children := n.children(seqs) + var i int + for i = 1; ; i++ { + ok := func() bool { + for _, child := range children { + code := child.value(seqs) + j := i + code + ensureSize(da, j) + if da.Check[j] != 0 { + return false + } + } + return true + }() + if ok { + break + } + } + da.Base[pos] = i + for _, child := range children { + code := child.value(seqs) + j := i + code + da.Check[j] = pos + 1 + } + terminator := len(da.Encoding) + for _, child := range children { + code := child.value(seqs) + if code == terminator { + continue + } + j := i + code + addSeqs(da, seqs, j, *child) + } +} + +func ensureSize(da *DoubleArray, i int) { + for i >= len(da.Base) { + da.Base = append(da.Base, make([]int, len(da.Base)+1)...) + da.Check = append(da.Check, make([]int, len(da.Check)+1)...) + } +} + +type byLex [][]int + +func (l byLex) Len() int { return len(l) } +func (l byLex) Swap(i, j int) { l[i], l[j] = l[j], l[i] } +func (l byLex) Less(i, j int) bool { + si := l[i] + sj := l[j] + var k int + for k = 0; k < len(si) && k < len(sj); k++ { + if si[k] < sj[k] { + return true + } + if si[k] > sj[k] { + return false + } + } + if k < len(sj) { + return true + } + return false +} + +// HasCommonPrefix determines if any sequence in the DoubleArray is a prefix of the given sequence. +func (da *DoubleArray) HasCommonPrefix(seq []string) bool { + if len(da.Base) == 0 { + return false + } + + var i int + for _, t := range seq { + code, ok := da.Encoding[t] + if !ok { + break + } + j := da.Base[i] + code + if len(da.Check) <= j || da.Check[j] != i+1 { + break + } + i = j + } + j := da.Base[i] + len(da.Encoding) + if len(da.Check) <= j || da.Check[j] != i+1 { + return false + } + return true +} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE b/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE deleted file mode 100644 index abe5fe170b..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2016, gRPC Ecosystem -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -* Neither the name of grpc-opentracing nor the names of its - contributors may be used to endorse or promote products derived from - this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS b/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS deleted file mode 100644 index 5cfe0175ee..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/PATENTS +++ /dev/null @@ -1,23 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the GRPC project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of GRPC, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of GRPC. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of GRPC or any code incorporated within this -implementation of GRPC constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of GRPC -shall terminate as of the date such litigation is filed. -Status API Training Shop Blog About diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md deleted file mode 100644 index 78c49dbbea..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/README.md +++ /dev/null @@ -1,57 +0,0 @@ -# OpenTracing support for gRPC in Go - -The `otgrpc` package makes it easy to add OpenTracing support to gRPC-based -systems in Go. - -## Installation - -``` -go get github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc -``` - -## Documentation - -See the basic usage examples below and the [package documentation on -godoc.org](https://godoc.org/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc). - -## Client-side usage example - -Wherever you call `grpc.Dial`: - -```go -// You must have some sort of OpenTracing Tracer instance on hand. -var tracer opentracing.Tracer = ... -... - -// Set up a connection to the server peer. -conn, err := grpc.Dial( - address, - ... // other options - grpc.WithUnaryInterceptor( - otgrpc.OpenTracingClientInterceptor(tracer)), - grpc.WithStreamInterceptor( - otgrpc.OpenTracingStreamClientInterceptor(tracer))) - -// All future RPC activity involving `conn` will be automatically traced. -``` - -## Server-side usage example - -Wherever you call `grpc.NewServer`: - -```go -// You must have some sort of OpenTracing Tracer instance on hand. -var tracer opentracing.Tracer = ... -... - -// Initialize the gRPC server. -s := grpc.NewServer( - ... // other options - grpc.UnaryInterceptor( - otgrpc.OpenTracingServerInterceptor(tracer)), - grpc.StreamInterceptor( - otgrpc.OpenTracingStreamServerInterceptor(tracer))) - -// All future RPC activity involving `s` will be automatically traced. -``` - diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go deleted file mode 100644 index 3414e55cb1..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/client.go +++ /dev/null @@ -1,239 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" - "io" - "runtime" - "sync/atomic" -) - -// OpenTracingClientInterceptor returns a grpc.UnaryClientInterceptor suitable -// for use in a grpc.Dial call. -// -// For example: -// -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithUnaryInterceptor(otgrpc.OpenTracingClientInterceptor(tracer))) -// -// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC -// metadata; they will also look in the context.Context for an active -// in-process parent Span and establish a ChildOf reference if such a parent -// Span could be found. -func OpenTracingClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryClientInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - method string, - req, resp interface{}, - cc *grpc.ClientConn, - invoker grpc.UnaryInvoker, - opts ...grpc.CallOption, - ) error { - var err error - var parentCtx opentracing.SpanContext - if parent := opentracing.SpanFromContext(ctx); parent != nil { - parentCtx = parent.Context() - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(parentCtx, method, req, resp) { - return invoker(ctx, method, req, resp, cc, opts...) - } - clientSpan := tracer.StartSpan( - method, - opentracing.ChildOf(parentCtx), - ext.SpanKindRPCClient, - gRPCComponentTag, - ) - defer clientSpan.Finish() - ctx = injectSpanContext(ctx, tracer, clientSpan) - if otgrpcOpts.logPayloads { - clientSpan.LogFields(log.Object("gRPC request", req)) - } - err = invoker(ctx, method, req, resp, cc, opts...) - if err == nil { - if otgrpcOpts.logPayloads { - clientSpan.LogFields(log.Object("gRPC response", resp)) - } - } else { - SetSpanTags(clientSpan, err, true) - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(clientSpan, method, req, resp, err) - } - return err - } -} - -// OpenTracingStreamClientInterceptor returns a grpc.StreamClientInterceptor suitable -// for use in a grpc.Dial call. The interceptor instruments streaming RPCs by creating -// a single span to correspond to the lifetime of the RPC's stream. -// -// For example: -// -// conn, err := grpc.Dial( -// address, -// ..., // (existing DialOptions) -// grpc.WithStreamInterceptor(otgrpc.OpenTracingStreamClientInterceptor(tracer))) -// -// All gRPC client spans will inject the OpenTracing SpanContext into the gRPC -// metadata; they will also look in the context.Context for an active -// in-process parent Span and establish a ChildOf reference if such a parent -// Span could be found. -func OpenTracingStreamClientInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamClientInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - desc *grpc.StreamDesc, - cc *grpc.ClientConn, - method string, - streamer grpc.Streamer, - opts ...grpc.CallOption, - ) (grpc.ClientStream, error) { - var err error - var parentCtx opentracing.SpanContext - if parent := opentracing.SpanFromContext(ctx); parent != nil { - parentCtx = parent.Context() - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(parentCtx, method, nil, nil) { - return streamer(ctx, desc, cc, method, opts...) - } - - clientSpan := tracer.StartSpan( - method, - opentracing.ChildOf(parentCtx), - ext.SpanKindRPCClient, - gRPCComponentTag, - ) - ctx = injectSpanContext(ctx, tracer, clientSpan) - cs, err := streamer(ctx, desc, cc, method, opts...) - if err != nil { - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - SetSpanTags(clientSpan, err, true) - clientSpan.Finish() - return cs, err - } - return newOpenTracingClientStream(cs, method, desc, clientSpan, otgrpcOpts), nil - } -} - -func newOpenTracingClientStream(cs grpc.ClientStream, method string, desc *grpc.StreamDesc, clientSpan opentracing.Span, otgrpcOpts *options) grpc.ClientStream { - finishChan := make(chan struct{}) - - isFinished := new(int32) - *isFinished = 0 - finishFunc := func(err error) { - // The current OpenTracing specification forbids finishing a span more than - // once. Since we have multiple code paths that could concurrently call - // `finishFunc`, we need to add some sort of synchronization to guard against - // multiple finishing. - if !atomic.CompareAndSwapInt32(isFinished, 0, 1) { - return - } - close(finishChan) - defer clientSpan.Finish() - if err != nil { - clientSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - SetSpanTags(clientSpan, err, true) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(clientSpan, method, nil, nil, err) - } - } - go func() { - select { - case <-finishChan: - // The client span is being finished by another code path; hence, no - // action is necessary. - case <-cs.Context().Done(): - finishFunc(cs.Context().Err()) - } - }() - otcs := &openTracingClientStream{ - ClientStream: cs, - desc: desc, - finishFunc: finishFunc, - } - - // The `ClientStream` interface allows one to omit calling `Recv` if it's - // known that the result will be `io.EOF`. See - // http://stackoverflow.com/q/42915337 - // In such cases, there's nothing that triggers the span to finish. We, - // therefore, set a finalizer so that the span and the context goroutine will - // at least be cleaned up when the garbage collector is run. - runtime.SetFinalizer(otcs, func(otcs *openTracingClientStream) { - otcs.finishFunc(nil) - }) - return otcs -} - -type openTracingClientStream struct { - grpc.ClientStream - desc *grpc.StreamDesc - finishFunc func(error) -} - -func (cs *openTracingClientStream) Header() (metadata.MD, error) { - md, err := cs.ClientStream.Header() - if err != nil { - cs.finishFunc(err) - } - return md, err -} - -func (cs *openTracingClientStream) SendMsg(m interface{}) error { - err := cs.ClientStream.SendMsg(m) - if err != nil { - cs.finishFunc(err) - } - return err -} - -func (cs *openTracingClientStream) RecvMsg(m interface{}) error { - err := cs.ClientStream.RecvMsg(m) - if err == io.EOF { - cs.finishFunc(nil) - return err - } else if err != nil { - cs.finishFunc(err) - return err - } - if !cs.desc.ServerStreams { - cs.finishFunc(nil) - } - return err -} - -func (cs *openTracingClientStream) CloseSend() error { - err := cs.ClientStream.CloseSend() - if err != nil { - cs.finishFunc(err) - } - return err -} - -func injectSpanContext(ctx context.Context, tracer opentracing.Tracer, clientSpan opentracing.Span) context.Context { - md, ok := metadata.FromOutgoingContext(ctx) - if !ok { - md = metadata.New(nil) - } else { - md = md.Copy() - } - mdWriter := metadataReaderWriter{md} - err := tracer.Inject(clientSpan.Context(), opentracing.HTTPHeaders, mdWriter) - // We have no better place to record an error than the Span itself :-/ - if err != nil { - clientSpan.LogFields(log.String("event", "Tracer.Inject() failed"), log.Error(err)) - } - return metadata.NewOutgoingContext(ctx, md) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go deleted file mode 100644 index 41a6346f25..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/errors.go +++ /dev/null @@ -1,69 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" -) - -// A Class is a set of types of outcomes (including errors) that will often -// be handled in the same way. -type Class string - -const ( - Unknown Class = "0xx" - // Success represents outcomes that achieved the desired results. - Success Class = "2xx" - // ClientError represents errors that were the client's fault. - ClientError Class = "4xx" - // ServerError represents errors that were the server's fault. - ServerError Class = "5xx" -) - -// ErrorClass returns the class of the given error -func ErrorClass(err error) Class { - if s, ok := status.FromError(err); ok { - switch s.Code() { - // Success or "success" - case codes.OK, codes.Canceled: - return Success - - // Client errors - case codes.InvalidArgument, codes.NotFound, codes.AlreadyExists, - codes.PermissionDenied, codes.Unauthenticated, codes.FailedPrecondition, - codes.OutOfRange: - return ClientError - - // Server errors - case codes.DeadlineExceeded, codes.ResourceExhausted, codes.Aborted, - codes.Unimplemented, codes.Internal, codes.Unavailable, codes.DataLoss: - return ServerError - - // Not sure - case codes.Unknown: - fallthrough - default: - return Unknown - } - } - return Unknown -} - -// SetSpanTags sets one or more tags on the given span according to the -// error. -func SetSpanTags(span opentracing.Span, err error, client bool) { - c := ErrorClass(err) - code := codes.Unknown - if s, ok := status.FromError(err); ok { - code = s.Code() - } - span.SetTag("response_code", code) - span.SetTag("response_class", c) - if err == nil { - return - } - if client || c == ServerError { - ext.Error.Set(span, true) - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go deleted file mode 100644 index 903e8382e3..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/options.go +++ /dev/null @@ -1,76 +0,0 @@ -package otgrpc - -import "github.com/opentracing/opentracing-go" - -// Option instances may be used in OpenTracing(Server|Client)Interceptor -// initialization. -// -// See this post about the "functional options" pattern: -// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis -type Option func(o *options) - -// LogPayloads returns an Option that tells the OpenTracing instrumentation to -// try to log application payloads in both directions. -func LogPayloads() Option { - return func(o *options) { - o.logPayloads = true - } -} - -// SpanInclusionFunc provides an optional mechanism to decide whether or not -// to trace a given gRPC call. Return true to create a Span and initiate -// tracing, false to not create a Span and not trace. -// -// parentSpanCtx may be nil if no parent could be extraction from either the Go -// context.Context (on the client) or the RPC (on the server). -type SpanInclusionFunc func( - parentSpanCtx opentracing.SpanContext, - method string, - req, resp interface{}) bool - -// IncludingSpans binds a IncludeSpanFunc to the options -func IncludingSpans(inclusionFunc SpanInclusionFunc) Option { - return func(o *options) { - o.inclusionFunc = inclusionFunc - } -} - -// SpanDecoratorFunc provides an (optional) mechanism for otgrpc users to add -// arbitrary tags/logs/etc to the opentracing.Span associated with client -// and/or server RPCs. -type SpanDecoratorFunc func( - span opentracing.Span, - method string, - req, resp interface{}, - grpcError error) - -// SpanDecorator binds a function that decorates gRPC Spans. -func SpanDecorator(decorator SpanDecoratorFunc) Option { - return func(o *options) { - o.decorator = decorator - } -} - -// The internal-only options struct. Obviously overkill at the moment; but will -// scale well as production use dictates other configuration and tuning -// parameters. -type options struct { - logPayloads bool - decorator SpanDecoratorFunc - // May be nil. - inclusionFunc SpanInclusionFunc -} - -// newOptions returns the default options. -func newOptions() *options { - return &options{ - logPayloads: false, - inclusionFunc: nil, - } -} - -func (o *options) apply(opts ...Option) { - for _, opt := range opts { - opt(o) - } -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go deleted file mode 100644 index 4ff3d19978..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/package.go +++ /dev/null @@ -1,5 +0,0 @@ -// Package otgrpc provides OpenTracing support for any gRPC client or server. -// -// See the README for simple usage examples: -// https://github.com/grpc-ecosystem/grpc-opentracing/blob/master/go/otgrpc/README.md -package otgrpc diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go deleted file mode 100644 index 62cf54d221..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/server.go +++ /dev/null @@ -1,141 +0,0 @@ -package otgrpc - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" - "golang.org/x/net/context" - "google.golang.org/grpc" - "google.golang.org/grpc/metadata" -) - -// OpenTracingServerInterceptor returns a grpc.UnaryServerInterceptor suitable -// for use in a grpc.NewServer call. -// -// For example: -// -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.UnaryInterceptor(otgrpc.OpenTracingServerInterceptor(tracer))) -// -// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC -// metadata; if found, the server span will act as the ChildOf that RPC -// SpanContext. -// -// Root or not, the server Span will be embedded in the context.Context for the -// application-specific gRPC handler(s) to access. -func OpenTracingServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.UnaryServerInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func( - ctx context.Context, - req interface{}, - info *grpc.UnaryServerInfo, - handler grpc.UnaryHandler, - ) (resp interface{}, err error) { - spanContext, err := extractSpanContext(ctx, tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, req, nil) { - return handler(ctx, req) - } - serverSpan := tracer.StartSpan( - info.FullMethod, - ext.RPCServerOption(spanContext), - gRPCComponentTag, - ) - defer serverSpan.Finish() - - ctx = opentracing.ContextWithSpan(ctx, serverSpan) - if otgrpcOpts.logPayloads { - serverSpan.LogFields(log.Object("gRPC request", req)) - } - resp, err = handler(ctx, req) - if err == nil { - if otgrpcOpts.logPayloads { - serverSpan.LogFields(log.Object("gRPC response", resp)) - } - } else { - SetSpanTags(serverSpan, err, false) - serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(serverSpan, info.FullMethod, req, resp, err) - } - return resp, err - } -} - -// OpenTracingStreamServerInterceptor returns a grpc.StreamServerInterceptor suitable -// for use in a grpc.NewServer call. The interceptor instruments streaming RPCs by -// creating a single span to correspond to the lifetime of the RPC's stream. -// -// For example: -// -// s := grpc.NewServer( -// ..., // (existing ServerOptions) -// grpc.StreamInterceptor(otgrpc.OpenTracingStreamServerInterceptor(tracer))) -// -// All gRPC server spans will look for an OpenTracing SpanContext in the gRPC -// metadata; if found, the server span will act as the ChildOf that RPC -// SpanContext. -// -// Root or not, the server Span will be embedded in the context.Context for the -// application-specific gRPC handler(s) to access. -func OpenTracingStreamServerInterceptor(tracer opentracing.Tracer, optFuncs ...Option) grpc.StreamServerInterceptor { - otgrpcOpts := newOptions() - otgrpcOpts.apply(optFuncs...) - return func(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { - spanContext, err := extractSpanContext(ss.Context(), tracer) - if err != nil && err != opentracing.ErrSpanContextNotFound { - // TODO: establish some sort of error reporting mechanism here. We - // don't know where to put such an error and must rely on Tracer - // implementations to do something appropriate for the time being. - } - if otgrpcOpts.inclusionFunc != nil && - !otgrpcOpts.inclusionFunc(spanContext, info.FullMethod, nil, nil) { - return handler(srv, ss) - } - - serverSpan := tracer.StartSpan( - info.FullMethod, - ext.RPCServerOption(spanContext), - gRPCComponentTag, - ) - defer serverSpan.Finish() - ss = &openTracingServerStream{ - ServerStream: ss, - ctx: opentracing.ContextWithSpan(ss.Context(), serverSpan), - } - err = handler(srv, ss) - if err != nil { - SetSpanTags(serverSpan, err, false) - serverSpan.LogFields(log.String("event", "error"), log.String("message", err.Error())) - } - if otgrpcOpts.decorator != nil { - otgrpcOpts.decorator(serverSpan, info.FullMethod, nil, nil, err) - } - return err - } -} - -type openTracingServerStream struct { - grpc.ServerStream - ctx context.Context -} - -func (ss *openTracingServerStream) Context() context.Context { - return ss.ctx -} - -func extractSpanContext(ctx context.Context, tracer opentracing.Tracer) (opentracing.SpanContext, error) { - md, ok := metadata.FromIncomingContext(ctx) - if !ok { - md = metadata.New(nil) - } - return tracer.Extract(opentracing.HTTPHeaders, metadataReaderWriter{md}) -} diff --git a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go b/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go deleted file mode 100644 index 9abd5eaa62..0000000000 --- a/vendor/github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc/shared.go +++ /dev/null @@ -1,42 +0,0 @@ -package otgrpc - -import ( - "strings" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "google.golang.org/grpc/metadata" -) - -var ( - // Morally a const: - gRPCComponentTag = opentracing.Tag{string(ext.Component), "gRPC"} -) - -// metadataReaderWriter satisfies both the opentracing.TextMapReader and -// opentracing.TextMapWriter interfaces. -type metadataReaderWriter struct { - metadata.MD -} - -func (w metadataReaderWriter) Set(key, val string) { - // The GRPC HPACK implementation rejects any uppercase keys here. - // - // As such, since the HTTP_HEADERS format is case-insensitive anyway, we - // blindly lowercase the key (which is guaranteed to work in the - // Inject/Extract sense per the OpenTracing spec). - key = strings.ToLower(key) - w.MD[key] = append(w.MD[key], val) -} - -func (w metadataReaderWriter) ForeachKey(handler func(key, val string) error) error { - for k, vals := range w.MD { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - - return nil -} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml b/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml deleted file mode 100644 index 1a0bbea6c7..0000000000 --- a/vendor/github.com/hashicorp/go-immutable-radix/.travis.yml +++ /dev/null @@ -1,3 +0,0 @@ -language: go -go: - - tip diff --git a/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md new file mode 100644 index 0000000000..86c6d03fba --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/CHANGELOG.md @@ -0,0 +1,23 @@ +# UNRELEASED + +# 1.3.0 (September 17th, 2020) + +FEATURES + +* Add reverse tree traversal [[GH-30](https://github.com/hashicorp/go-immutable-radix/pull/30)] + +# 1.2.0 (March 18th, 2020) + +FEATURES + +* Adds a `Clone` method to `Txn` allowing transactions to be split either into two independently mutable trees. [[GH-26](https://github.com/hashicorp/go-immutable-radix/pull/26)] + +# 1.1.0 (May 22nd, 2019) + +FEATURES + +* Add `SeekLowerBound` to allow for range scans. [[GH-24](https://github.com/hashicorp/go-immutable-radix/pull/24)] + +# 1.0.0 (August 30th, 2018) + +* go mod adopted diff --git a/vendor/github.com/hashicorp/go-immutable-radix/README.md b/vendor/github.com/hashicorp/go-immutable-radix/README.md index 8910fcc035..aca15a6421 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/README.md +++ b/vendor/github.com/hashicorp/go-immutable-radix/README.md @@ -1,4 +1,4 @@ -go-immutable-radix [![Build Status](https://travis-ci.org/hashicorp/go-immutable-radix.png)](https://travis-ci.org/hashicorp/go-immutable-radix) +go-immutable-radix [![CircleCI](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master.svg?style=svg)](https://circleci.com/gh/hashicorp/go-immutable-radix/tree/master) ========= Provides the `iradix` package that implements an immutable [radix tree](http://en.wikipedia.org/wiki/Radix_tree). @@ -39,3 +39,28 @@ if string(m) != "foo" { } ``` +Here is an example of performing a range scan of the keys. + +```go +// Create a tree +r := iradix.New() +r, _, _ = r.Insert([]byte("001"), 1) +r, _, _ = r.Insert([]byte("002"), 2) +r, _, _ = r.Insert([]byte("005"), 5) +r, _, _ = r.Insert([]byte("010"), 10) +r, _, _ = r.Insert([]byte("100"), 10) + +// Range scan over the keys that sort lexicographically between [003, 050) +it := r.Root().Iterator() +it.SeekLowerBound([]byte("003")) +for key, _, ok := it.Next(); ok; key, _, ok = it.Next() { + if key >= "050" { + break + } + fmt.Println(key) +} +// Output: +// 005 +// 010 +``` + diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go index c7172c406b..168bda76df 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iradix.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iradix.go @@ -86,6 +86,20 @@ func (t *Tree) Txn() *Txn { return txn } +// Clone makes an independent copy of the transaction. The new transaction +// does not track any nodes and has TrackMutate turned off. The cloned transaction will contain any uncommitted writes in the original transaction but further mutations to either will be independent and result in different radix trees on Commit. A cloned transaction may be passed to another goroutine and mutated there independently however each transaction may only be mutated in a single thread. +func (t *Txn) Clone() *Txn { + // reset the writable node cache to avoid leaking future writes into the clone + t.writable = nil + + txn := &Txn{ + root: t.root, + snap: t.snap, + size: t.size, + } + return txn +} + // TrackMutate can be used to toggle if mutations are tracked. If this is enabled // then notifications will be issued for affected internal nodes and leaves when // the transaction is committed. @@ -338,6 +352,11 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { if !n.isLeaf() { return nil, nil } + // Copy the pointer in case we are in a transaction that already + // modified this node since the node will be reused. Any changes + // made to the node will not affect returning the original leaf + // value. + oldLeaf := n.leaf // Remove the leaf node nc := t.writeNode(n, true) @@ -347,7 +366,7 @@ func (t *Txn) delete(parent, n *Node, search []byte) (*Node, *leafNode) { if n != t.root && len(nc.edges) == 1 { t.mergeChild(nc) } - return nc, n.leaf + return nc, oldLeaf } // Look for an edge diff --git a/vendor/github.com/hashicorp/go-immutable-radix/iter.go b/vendor/github.com/hashicorp/go-immutable-radix/iter.go index 9815e02538..f17d0a644f 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/iter.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/iter.go @@ -1,6 +1,8 @@ package iradix -import "bytes" +import ( + "bytes" +) // Iterator is used to iterate over a set of nodes // in pre-order @@ -18,7 +20,7 @@ func (i *Iterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { watch = n.mutateCh search := prefix for { - // Check for key exhaution + // Check for key exhaustion if len(search) == 0 { i.node = n return @@ -53,12 +55,124 @@ func (i *Iterator) SeekPrefix(prefix []byte) { i.SeekPrefixWatch(prefix) } +func (i *Iterator) recurseMin(n *Node) *Node { + // Traverse to the minimum child + if n.leaf != nil { + return n + } + nEdges := len(n.edges) + if nEdges > 1 { + // Add all the other edges to the stack (the min node will be added as + // we recurse) + i.stack = append(i.stack, n.edges[1:]) + } + if nEdges > 0 { + return i.recurseMin(n.edges[0].node) + } + // Shouldn't be possible + return nil +} + +// SeekLowerBound is used to seek the iterator to the smallest key that is +// greater or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (i *Iterator) SeekLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + i.stack = []edges{} + // i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := i.node + i.node = nil + search := key + + found := func(n *Node) { + i.stack = append(i.stack, edges{edge{node: n}}) + } + + findMin := func(n *Node) { + n = i.recurseMin(n) + if n != nil { + found(n) + return + } + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp > 0 { + // Prefix is larger, that means the lower bound is greater than the search + // and from now on we need to follow the minimum path to the smallest + // leaf under this subtree. + findMin(n) + return + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no lower + // bound + i.node = nil + return + } + + // Prefix is equal, we are still heading for an exact match. If this is a + // leaf and an exact match we're done. + if n.leaf != nil && bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // Consume the search prefix if the current node has one. Note that this is + // safe because if n.prefix is longer than the search slice prefixCmp would + // have been > 0 above and the method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key, but the current node is not an exact + // match or not a leaf. That means that the leaf value if it exists, and + // all child nodes must be strictly greater, the smallest key in this + // subtree must be the lower bound. + findMin(n) + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + if lbNode == nil { + return + } + + // Create stack edges for the all strictly higher edges in this node. + if idx+1 < len(n.edges) { + i.stack = append(i.stack, n.edges[idx+1:]) + } + + // Recurse + n = lbNode + } +} + // Next returns the next node in order func (i *Iterator) Next() ([]byte, interface{}, bool) { // Initialize our stack if needed if i.stack == nil && i.node != nil { i.stack = []edges{ - edges{ + { edge{node: i.node}, }, } diff --git a/vendor/github.com/hashicorp/go-immutable-radix/node.go b/vendor/github.com/hashicorp/go-immutable-radix/node.go index ef494fa7ca..3598548087 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/node.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/node.go @@ -79,6 +79,18 @@ func (n *Node) getEdge(label byte) (int, *Node) { return -1, nil } +func (n *Node) getLowerBoundEdge(label byte) (int, *Node) { + num := len(n.edges) + idx := sort.Search(num, func(i int) bool { + return n.edges[i].label >= label + }) + // we want lower bound behavior so return even if it's not an exact match + if idx < num { + return idx, n.edges[idx].node + } + return -1, nil +} + func (n *Node) delEdge(label byte) { num := len(n.edges) idx := sort.Search(num, func(i int) bool { @@ -199,6 +211,12 @@ func (n *Node) Iterator() *Iterator { return &Iterator{node: n} } +// ReverseIterator is used to return an iterator at +// the given node to walk the tree backwards +func (n *Node) ReverseIterator() *ReverseIterator { + return NewReverseIterator(n) +} + // rawIterator is used to return a raw iterator at the given node to walk the // tree. func (n *Node) rawIterator() *rawIterator { @@ -212,6 +230,11 @@ func (n *Node) Walk(fn WalkFn) { recursiveWalk(n, fn) } +// WalkBackwards is used to walk the tree in reverse order +func (n *Node) WalkBackwards(fn WalkFn) { + reverseRecursiveWalk(n, fn) +} + // WalkPrefix is used to walk the tree under a prefix func (n *Node) WalkPrefix(prefix []byte, fn WalkFn) { search := prefix @@ -274,66 +297,6 @@ func (n *Node) WalkPath(path []byte, fn WalkFn) { } } -func (n *Node) Seek(prefix []byte) *Seeker { - search := prefix - p := &pos{n: n} - for { - // Check for key exhaution - if len(search) == 0 { - return &Seeker{p} - } - - num := len(n.edges) - idx := sort.Search(num, func(i int) bool { - return n.edges[i].label >= search[0] - }) - p.current = idx - if idx < len(n.edges) { - n = n.edges[idx].node - if bytes.HasPrefix(search, n.prefix) && len(n.edges) > 0 { - search = search[len(n.prefix):] - p.current++ - p = &pos{n: n, prev: p} - continue - } - } - p.current++ - return &Seeker{p} - } -} - -type Seeker struct { - *pos -} - -type pos struct { - n *Node - current int - prev *pos - isLeaf bool -} - -func (s *Seeker) Next() (k []byte, v interface{}, ok bool) { - if s.current >= len(s.n.edges) { - if s.prev == nil { - return nil, nil, false - } - s.pos = s.prev - return s.Next() - } - - edge := s.n.edges[s.current] - s.current++ - if edge.node.leaf != nil && !s.isLeaf { - s.isLeaf = true - s.current-- - return edge.node.leaf.key, edge.node.leaf.val, true - } - s.isLeaf = false - s.pos = &pos{n: edge.node, prev: s.pos} - return s.Next() -} - // recursiveWalk is used to do a pre-order walk of a node // recursively. Returns true if the walk should be aborted func recursiveWalk(n *Node, fn WalkFn) bool { @@ -350,3 +313,22 @@ func recursiveWalk(n *Node, fn WalkFn) bool { } return false } + +// reverseRecursiveWalk is used to do a reverse pre-order +// walk of a node recursively. Returns true if the walk +// should be aborted +func reverseRecursiveWalk(n *Node, fn WalkFn) bool { + // Visit the leaf values if any + if n.leaf != nil && fn(n.leaf.key, n.leaf.val) { + return true + } + + // Recurse on the children in reverse order + for i := len(n.edges) - 1; i >= 0; i-- { + e := n.edges[i] + if reverseRecursiveWalk(e.node, fn) { + return true + } + } + return false +} diff --git a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go index 04814c1323..3c6a22525c 100644 --- a/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go +++ b/vendor/github.com/hashicorp/go-immutable-radix/raw_iter.go @@ -41,7 +41,7 @@ func (i *rawIterator) Next() { // Initialize our stack if needed. if i.stack == nil && i.node != nil { i.stack = []rawStackEntry{ - rawStackEntry{ + { edges: edges{ edge{node: i.node}, }, diff --git a/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go new file mode 100644 index 0000000000..554fa7129c --- /dev/null +++ b/vendor/github.com/hashicorp/go-immutable-radix/reverse_iter.go @@ -0,0 +1,239 @@ +package iradix + +import ( + "bytes" +) + +// ReverseIterator is used to iterate over a set of nodes +// in reverse in-order +type ReverseIterator struct { + i *Iterator + + // expandedParents stores the set of parent nodes whose relevant children have + // already been pushed into the stack. This can happen during seek or during + // iteration. + // + // Unlike forward iteration we need to recurse into children before we can + // output the value stored in an internal leaf since all children are greater. + // We use this to track whether we have already ensured all the children are + // in the stack. + expandedParents map[*Node]struct{} +} + +// NewReverseIterator returns a new ReverseIterator at a node +func NewReverseIterator(n *Node) *ReverseIterator { + return &ReverseIterator{ + i: &Iterator{node: n}, + } +} + +// SeekPrefixWatch is used to seek the iterator to a given prefix +// and returns the watch channel of the finest granularity +func (ri *ReverseIterator) SeekPrefixWatch(prefix []byte) (watch <-chan struct{}) { + return ri.i.SeekPrefixWatch(prefix) +} + +// SeekPrefix is used to seek the iterator to a given prefix +func (ri *ReverseIterator) SeekPrefix(prefix []byte) { + ri.i.SeekPrefixWatch(prefix) +} + +// SeekReverseLowerBound is used to seek the iterator to the largest key that is +// lower or equal to the given key. There is no watch variant as it's hard to +// predict based on the radix structure which node(s) changes might affect the +// result. +func (ri *ReverseIterator) SeekReverseLowerBound(key []byte) { + // Wipe the stack. Unlike Prefix iteration, we need to build the stack as we + // go because we need only a subset of edges of many nodes in the path to the + // leaf with the lower bound. Note that the iterator will still recurse into + // children that we don't traverse on the way to the reverse lower bound as it + // walks the stack. + ri.i.stack = []edges{} + // ri.i.node starts off in the common case as pointing to the root node of the + // tree. By the time we return we have either found a lower bound and setup + // the stack to traverse all larger keys, or we have not and the stack and + // node should both be nil to prevent the iterator from assuming it is just + // iterating the whole tree from the root node. Either way this needs to end + // up as nil so just set it here. + n := ri.i.node + ri.i.node = nil + search := key + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + found := func(n *Node) { + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We need to mark this node as expanded in advance too otherwise the + // iterator will attempt to walk all of its children even though they are + // greater than the lower bound we have found. We've expanded it in the + // sense that all of its children that we want to walk are already in the + // stack (i.e. none of them). + ri.expandedParents[n] = struct{}{} + } + + for { + // Compare current prefix with the search key's same-length prefix. + var prefixCmp int + if len(n.prefix) < len(search) { + prefixCmp = bytes.Compare(n.prefix, search[0:len(n.prefix)]) + } else { + prefixCmp = bytes.Compare(n.prefix, search) + } + + if prefixCmp < 0 { + // Prefix is smaller than search prefix, that means there is no exact + // match for the search key. But we are looking in reverse, so the reverse + // lower bound will be the largest leaf under this subtree, since it is + // the value that would come right before the current search key if it + // were in the tree. So we need to follow the maximum path in this subtree + // to find it. Note that this is exactly what the iterator will already do + // if it finds a node in the stack that has _not_ been marked as expanded + // so in this one case we don't call `found` and instead let the iterator + // do the expansion and recursion through all the children. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + return + } + + if prefixCmp > 0 { + // Prefix is larger than search prefix, or there is no prefix but we've + // also exhausted the search key. Either way, that means there is no + // reverse lower bound since nothing comes before our current search + // prefix. + return + } + + // If this is a leaf, something needs to happen! Note that if it's a leaf + // and prefixCmp was zero (which it must be to get here) then the leaf value + // is either an exact match for the search, or it's lower. It can't be + // greater. + if n.isLeaf() { + + // Firstly, if it's an exact match, we're done! + if bytes.Equal(n.leaf.key, key) { + found(n) + return + } + + // It's not so this node's leaf value must be lower and could still be a + // valid contender for reverse lower bound. + + // If it has no children then we are also done. + if len(n.edges) == 0 { + // This leaf is the lower bound. + found(n) + return + } + + // Finally, this leaf is internal (has children) so we'll keep searching, + // but we need to add it to the iterator's stack since it has a leaf value + // that needs to be iterated over. It needs to be added to the stack + // before its children below as it comes first. + ri.i.stack = append(ri.i.stack, edges{edge{node: n}}) + // We also need to mark it as expanded since we'll be adding any of its + // relevant children below and so don't want the iterator to re-add them + // on its way back up the stack. + ri.expandedParents[n] = struct{}{} + } + + // Consume the search prefix. Note that this is safe because if n.prefix is + // longer than the search slice prefixCmp would have been > 0 above and the + // method would have already returned. + search = search[len(n.prefix):] + + if len(search) == 0 { + // We've exhausted the search key but we are not at a leaf. That means all + // children are greater than the search key so a reverse lower bound + // doesn't exist in this subtree. Note that there might still be one in + // the whole radix tree by following a different path somewhere further + // up. If that's the case then the iterator's stack will contain all the + // smaller nodes already and Previous will walk through them correctly. + return + } + + // Otherwise, take the lower bound next edge. + idx, lbNode := n.getLowerBoundEdge(search[0]) + + // From here, we need to update the stack with all values lower than + // the lower bound edge. Since getLowerBoundEdge() returns -1 when the + // search prefix is larger than all edges, we need to place idx at the + // last edge index so they can all be place in the stack, since they + // come before our search prefix. + if idx == -1 { + idx = len(n.edges) + } + + // Create stack edges for the all strictly lower edges in this node. + if len(n.edges[:idx]) > 0 { + ri.i.stack = append(ri.i.stack, n.edges[:idx]) + } + + // Exit if there's no lower bound edge. The stack will have the previous + // nodes already. + if lbNode == nil { + return + } + + // Recurse + n = lbNode + } +} + +// Previous returns the previous node in reverse order +func (ri *ReverseIterator) Previous() ([]byte, interface{}, bool) { + // Initialize our stack if needed + if ri.i.stack == nil && ri.i.node != nil { + ri.i.stack = []edges{ + { + edge{node: ri.i.node}, + }, + } + } + + if ri.expandedParents == nil { + ri.expandedParents = make(map[*Node]struct{}) + } + + for len(ri.i.stack) > 0 { + // Inspect the last element of the stack + n := len(ri.i.stack) + last := ri.i.stack[n-1] + m := len(last) + elem := last[m-1].node + + _, alreadyExpanded := ri.expandedParents[elem] + + // If this is an internal node and we've not seen it already, we need to + // leave it in the stack so we can return its possible leaf value _after_ + // we've recursed through all its children. + if len(elem.edges) > 0 && !alreadyExpanded { + // record that we've seen this node! + ri.expandedParents[elem] = struct{}{} + // push child edges onto stack and skip the rest of the loop to recurse + // into the largest one. + ri.i.stack = append(ri.i.stack, elem.edges) + continue + } + + // Remove the node from the stack + if m > 1 { + ri.i.stack[n-1] = last[:m-1] + } else { + ri.i.stack = ri.i.stack[:n-1] + } + // We don't need this state any more as it's no longer in the stack so we + // won't visit it again + if alreadyExpanded { + delete(ri.expandedParents, elem) + } + + // If this is a leaf, return it + if elem.leaf != nil { + return elem.leaf.key, elem.leaf.val, true + } + + // it's not a leaf so keep walking the stack to find the previous leaf + } + return nil, nil, false +} diff --git a/vendor/github.com/klauspost/compress/README.md b/vendor/github.com/klauspost/compress/README.md index a152bdd036..9ddf39f6f3 100644 --- a/vendor/github.com/klauspost/compress/README.md +++ b/vendor/github.com/klauspost/compress/README.md @@ -17,6 +17,17 @@ This package provides various compression algorithms. # changelog +* Feb 22, 2022 (v1.14.4) + * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) + * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) + * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 + * huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) + +* Feb 17, 2022 (v1.14.3) + * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) + * flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483) + * s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486) + * Jan 25, 2022 (v1.14.2) * zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476) * zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469) @@ -61,6 +72,9 @@ This package provides various compression algorithms. * zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382) * zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380) +
+ See changes to v1.12.x + * May 25, 2021 (v1.12.3) * deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374) * deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375) @@ -82,9 +96,10 @@ This package provides various compression algorithms. * s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352) * zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346) * s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349) +
- See changes prior to v1.12.1 + See changes to v1.11.x * Mar 26, 2021 (v1.11.13) * zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345) @@ -143,7 +158,7 @@ This package provides various compression algorithms.
- See changes prior to v1.11.0 + See changes to v1.10.x * July 8, 2020 (v1.10.11) * zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278) @@ -305,11 +320,6 @@ This package provides various compression algorithms. # deflate usage -* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/). -* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/). -* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/). -* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) - The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: | old import | new import | Documentation @@ -331,6 +341,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range. If you expect to have a lot of concurrently allocated Writers consider using the stateless compress described below. +For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). + # Stateless compression This package offers stateless compression as a special option for gzip/deflate. diff --git a/vendor/github.com/klauspost/compress/huff0/bitreader.go b/vendor/github.com/klauspost/compress/huff0/bitreader.go index a4979e8868..03562db16f 100644 --- a/vendor/github.com/klauspost/compress/huff0/bitreader.go +++ b/vendor/github.com/klauspost/compress/huff0/bitreader.go @@ -8,115 +8,10 @@ package huff0 import ( "encoding/binary" "errors" + "fmt" "io" ) -// bitReader reads a bitstream in reverse. -// The last set bit indicates the start of the stream and is used -// for aligning the input. -type bitReader struct { - in []byte - off uint // next byte to read is at in[off - 1] - value uint64 - bitsRead uint8 -} - -// init initializes and resets the bit reader. -func (b *bitReader) init(in []byte) error { - if len(in) < 1 { - return errors.New("corrupt stream: too short") - } - b.in = in - b.off = uint(len(in)) - // The highest bit of the last byte indicates where to start - v := in[len(in)-1] - if v == 0 { - return errors.New("corrupt stream, did not find end of stream") - } - b.bitsRead = 64 - b.value = 0 - if len(in) >= 8 { - b.fillFastStart() - } else { - b.fill() - b.fill() - } - b.bitsRead += 8 - uint8(highBit32(uint32(v))) - return nil -} - -// peekBitsFast requires that at least one bit is requested every time. -// There are no checks if the buffer is filled. -func (b *bitReader) peekBitsFast(n uint8) uint16 { - const regMask = 64 - 1 - v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask)) - return v -} - -// fillFast() will make sure at least 32 bits are available. -// There must be at least 4 bytes available. -func (b *bitReader) fillFast() { - if b.bitsRead < 32 { - return - } - - // 2 bounds checks. - v := b.in[b.off-4 : b.off] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 -} - -func (b *bitReader) advance(n uint8) { - b.bitsRead += n -} - -// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. -func (b *bitReader) fillFastStart() { - // Do single re-slice to avoid bounds checks. - b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) - b.bitsRead = 0 - b.off -= 8 -} - -// fill() will make sure at least 32 bits are available. -func (b *bitReader) fill() { - if b.bitsRead < 32 { - return - } - if b.off > 4 { - v := b.in[b.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - b.value = (b.value << 32) | uint64(low) - b.bitsRead -= 32 - b.off -= 4 - return - } - for b.off > 0 { - b.value = (b.value << 8) | uint64(b.in[b.off-1]) - b.bitsRead -= 8 - b.off-- - } -} - -// finished returns true if all bits have been read from the bit stream. -func (b *bitReader) finished() bool { - return b.off == 0 && b.bitsRead >= 64 -} - -// close the bitstream and returns an error if out-of-buffer reads occurred. -func (b *bitReader) close() error { - // Release reference. - b.in = nil - if b.bitsRead > 64 { - return io.ErrUnexpectedEOF - } - return nil -} - // bitReader reads a bitstream in reverse. // The last set bit indicates the start of the stream and is used // for aligning the input. @@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderBytes) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderBytes) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } @@ -318,10 +220,17 @@ func (b *bitReaderShifted) finished() bool { return b.off == 0 && b.bitsRead >= 64 } +func (b *bitReaderShifted) remaining() uint { + return b.off*8 + uint(64-b.bitsRead) +} + // close the bitstream and returns an error if out-of-buffer reads occurred. func (b *bitReaderShifted) close() error { // Release reference. b.in = nil + if b.remaining() > 0 { + return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/huff0/compress.go b/vendor/github.com/klauspost/compress/huff0/compress.go index 8323dc0538..bc95ac623b 100644 --- a/vendor/github.com/klauspost/compress/huff0/compress.go +++ b/vendor/github.com/klauspost/compress/huff0/compress.go @@ -2,6 +2,7 @@ package huff0 import ( "fmt" + "math" "runtime" "sync" ) @@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) { if err != nil { return nil, err } + if len(s.Out)-idx > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. @@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) { return nil, errs[i] } o := s.tmpOut[i] + if len(o) > math.MaxUint16 { + // We cannot store the size in the jump table + return nil, ErrIncompressible + } // Write compressed length as little endian before block. if i < 3 { // Last length is not written. diff --git a/vendor/github.com/klauspost/compress/huff0/decompress.go b/vendor/github.com/klauspost/compress/huff0/decompress.go index 2a06bd1a7e..3ae7d46771 100644 --- a/vendor/github.com/klauspost/compress/huff0/decompress.go +++ b/vendor/github.com/klauspost/compress/huff0/decompress.go @@ -4,6 +4,7 @@ import ( "errors" "fmt" "io" + "sync" "github.com/klauspost/compress/fse" ) @@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder { return &Decoder{ dt: s.dt, actualTableLog: s.actualTableLog, + bufs: &s.decPool, } } @@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder { type Decoder struct { dt dTable actualTableLog uint8 + bufs *sync.Pool +} + +func (d *Decoder) buffer() *[4][256]byte { + buf, ok := d.bufs.Get().(*[4][256]byte) + if ok { + return buf + } + return &[4][256]byte{} } // Decompress1X will decompress a 1X encoded stream. @@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { dt := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 for br.off >= 8 { @@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) { bitsLeft -= nBits dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 switch d.actualTableLog { @@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { if off == 0 { if len(dst)+256 > maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } dst = append(dst, buf[:]...) @@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } } default: + d.bufs.Put(bufs) return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog) } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { } if len(dst) >= maxDecodedSize { br.close() + d.bufs.Put(bufs) return nil, ErrMaxDecodedSizeExceeded } v := dt[br.peekByteFast()>>shift] @@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { dt := d.dt.single[:256] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + bufs := d.buffer() + buf := &bufs[0] var off uint8 const shift = 56 @@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { off += 4 if off == 0 { if len(dst)+256 > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } if len(dst)+int(off) > maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { } } if len(dst) >= maxDecodedSize { + d.bufs.Put(bufs) br.close() return nil, ErrMaxDecodedSizeExceeded } @@ -688,6 +721,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) { bitsLeft -= int8(nBits) dst = append(dst, uint8(v.entry>>8)) } + d.bufs.Put(bufs) return dst, br.close() } @@ -707,6 +741,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { } var br [4]bitReaderShifted + // Decode "jump table" start := 6 for i := 0; i < 3; i++ { length := int(src[i*2]) | (int(src[i*2+1]) << 8) @@ -735,12 +770,12 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 2 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -758,8 +793,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { v2 := single[val2&tlMask] br[stream].advance(uint8(v.entry)) br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) val = br[stream].peekBitsFast(d.actualTableLog) val2 = br[stream2].peekBitsFast(d.actualTableLog) @@ -767,8 +802,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { v2 = single[val2&tlMask] br[stream].advance(uint8(v.entry)) br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) } { @@ -783,8 +818,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { v2 := single[val2&tlMask] br[stream].advance(uint8(v.entry)) br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream] = uint8(v.entry >> 8) - buf[off+bufoff*stream2] = uint8(v2.entry >> 8) + buf[stream][off] = uint8(v.entry >> 8) + buf[stream2][off] = uint8(v2.entry >> 8) val = br[stream].peekBitsFast(d.actualTableLog) val2 = br[stream2].peekBitsFast(d.actualTableLog) @@ -792,25 +827,26 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { v2 = single[val2&tlMask] br[stream].advance(uint8(v.entry)) br[stream2].advance(uint8(v2.entry)) - buf[off+bufoff*stream+1] = uint8(v.entry >> 8) - buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8) + buf[stream][off+1] = uint8(v.entry >> 8) + buf[stream2][off+1] = uint8(v2.entry >> 8) } off += 2 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -818,41 +854,31 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := br.off*8 + uint(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { br.fill() - if false && br.bitsRead >= 32 { - if br.off >= 4 { - v := br.in[br.off-4:] - v = v[:4] - low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24) - br.value = (br.value << 32) | uint64(low) - br.bitsRead -= 32 - br.off -= 4 - } else { - for br.off > 0 { - br.value = (br.value << 8) | uint64(br.in[br.off-1]) - br.bitsRead -= 8 - br.off-- - } - } - } - // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -865,12 +891,17 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) { out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -916,12 +947,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -942,8 +973,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -951,8 +982,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -960,8 +991,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -969,8 +1000,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { @@ -987,8 +1018,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -996,8 +1027,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1005,8 +1036,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) v = single[uint8(br1.value>>shift)].entry v2 = single[uint8(br2.value>>shift)].entry @@ -1014,25 +1045,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { br1.value <<= v & 63 br2.bitsRead += uint8(v2) br2.value <<= v2 & 63 - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) - buf[off+bufoff*stream+3] = uint8(v >> 8) + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1040,23 +1072,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { if off > 0 { ioff := int(off) if len(out) < dstEvery*3+ioff { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1076,7 +1116,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1084,16 +1125,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) { v := single[uint8(br.value>>shift)].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } @@ -1135,12 +1182,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { single := d.dt.single[:tlSize] // Use temp table to avoid bound checks/append penalty. - var buf [256]byte + buf := d.buffer() var off uint8 var decoded int // Decode 4 values from each decoder/loop. - const bufoff = 256 / 4 + const bufoff = 256 for { if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 { break @@ -1150,104 +1197,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { // Interleave 2 decodes. const stream = 0 const stream2 = 1 - br[stream].fillFast() - br[stream2].fillFast() + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } { const stream = 2 const stream2 = 3 - br[stream].fillFast() - br[stream2].fillFast() + br1 := &br[stream] + br2 := &br[stream2] + br1.fillFast() + br2.fillFast() - v := single[uint8(br[stream].value>>shift)].entry - v2 := single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream] = uint8(v >> 8) - buf[off+bufoff*stream2] = uint8(v2 >> 8) + v := single[uint8(br1.value>>shift)].entry + v2 := single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off] = uint8(v >> 8) + buf[stream2][off] = uint8(v2 >> 8) - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+1] = uint8(v >> 8) - buf[off+bufoff*stream2+1] = uint8(v2 >> 8) + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+1] = uint8(v >> 8) + buf[stream2][off+1] = uint8(v2 >> 8) - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+2] = uint8(v >> 8) - buf[off+bufoff*stream2+2] = uint8(v2 >> 8) + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+2] = uint8(v >> 8) + buf[stream2][off+2] = uint8(v2 >> 8) - v = single[uint8(br[stream].value>>shift)].entry - v2 = single[uint8(br[stream2].value>>shift)].entry - br[stream].bitsRead += uint8(v) - br[stream].value <<= v & 63 - br[stream2].bitsRead += uint8(v2) - br[stream2].value <<= v2 & 63 - buf[off+bufoff*stream+3] = uint8(v >> 8) - buf[off+bufoff*stream2+3] = uint8(v2 >> 8) + v = single[uint8(br1.value>>shift)].entry + v2 = single[uint8(br2.value>>shift)].entry + br1.bitsRead += uint8(v) + br1.value <<= v & 63 + br2.bitsRead += uint8(v2) + br2.value <<= v2 & 63 + buf[stream][off+3] = uint8(v >> 8) + buf[stream2][off+3] = uint8(v2 >> 8) } off += 4 - if off == bufoff { + if off == 0 { if bufoff > dstEvery { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 1") } - copy(out, buf[:bufoff]) - copy(out[dstEvery:], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4]) - off = 0 + copy(out, buf[0][:]) + copy(out[dstEvery:], buf[1][:]) + copy(out[dstEvery*2:], buf[2][:]) + copy(out[dstEvery*3:], buf[3][:]) out = out[bufoff:] - decoded += 256 + decoded += bufoff * 4 // There must at least be 3 buffers left. if len(out) < dstEvery*3 { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 2") } } @@ -1257,21 +1309,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { if len(out) < dstEvery*3+ioff { return nil, errors.New("corruption detected: stream overrun 3") } - copy(out, buf[:off]) - copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2]) - copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3]) - copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4]) + copy(out, buf[0][:off]) + copy(out[dstEvery:], buf[1][:off]) + copy(out[dstEvery*2:], buf[2][:off]) + copy(out[dstEvery*3:], buf[3][:off]) decoded += int(off) * 4 out = out[off:] } // Decode remaining. + remainBytes := dstEvery - (decoded / 4) for i := range br { offset := dstEvery * i + endsAt := offset + remainBytes + if endsAt > len(out) { + endsAt = len(out) + } br := &br[i] - bitsLeft := int(br.off*8) + int(64-br.bitsRead) + bitsLeft := br.remaining() for bitsLeft > 0 { if br.finished() { + d.bufs.Put(buf) return nil, io.ErrUnexpectedEOF } if br.bitsRead >= 56 { @@ -1291,7 +1349,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { } } // end inline... - if offset >= len(out) { + if offset >= endsAt { + d.bufs.Put(buf) return nil, errors.New("corruption detected: stream overrun 4") } @@ -1299,16 +1358,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) { v := single[br.peekByteFast()].entry nBits := uint8(v) br.advance(nBits) - bitsLeft -= int(nBits) + bitsLeft -= uint(nBits) out[offset] = uint8(v >> 8) offset++ } + if offset != endsAt { + d.bufs.Put(buf) + return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt) + } + decoded += offset - dstEvery*i err = br.close() if err != nil { + d.bufs.Put(buf) return nil, err } } + d.bufs.Put(buf) if dstSize != decoded { return nil, errors.New("corruption detected: short output block") } diff --git a/vendor/github.com/klauspost/compress/huff0/huff0.go b/vendor/github.com/klauspost/compress/huff0/huff0.go index 3ee00ecb47..e8ad17ad08 100644 --- a/vendor/github.com/klauspost/compress/huff0/huff0.go +++ b/vendor/github.com/klauspost/compress/huff0/huff0.go @@ -8,6 +8,7 @@ import ( "fmt" "math" "math/bits" + "sync" "github.com/klauspost/compress/fse" ) @@ -116,6 +117,7 @@ type Scratch struct { nodes []nodeElt tmpOut [4][]byte fse *fse.Scratch + decPool sync.Pool // *[4][256]byte buffers. huffWeight [maxSymbolValue + 1]byte } diff --git a/vendor/github.com/klauspost/compress/zstd/README.md b/vendor/github.com/klauspost/compress/zstd/README.md index c8f0f16fc1..c876c591ac 100644 --- a/vendor/github.com/klauspost/compress/zstd/README.md +++ b/vendor/github.com/klauspost/compress/zstd/README.md @@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is in the future. So if you want to limit concurrency for future updates, specify the concurrency you would like. +If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)` +which will compress input as each block is completed, blocking on writes until each has completed. + You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined compression settings can be specified. @@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`. `EncodeAll` will encode all input in src and append it to dst. -This function can be called concurrently, but each call will only run on a single goroutine. +This function can be called concurrently. +Each call will only run on a same goroutine as the caller. Encoded blocks can be concatenated and the result will be the combined input stream. Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`. @@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error { } ``` -It is important to use the "Close" function when you no longer need the Reader to stop running goroutines. -See "Allocation-less operation" below. +It is important to use the "Close" function when you no longer need the Reader to stop running goroutines, +when running with default settings. +Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream. + +Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput. +However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data +as it is being requested only. For decoding buffers, it could look something like this: @@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd" // Create a reader that caches decompressors. // For this operation type we supply a nil Reader. -var decoder, _ = zstd.NewReader(nil) +var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0)) // Decompress a buffer. We don't supply a destination buffer, // so it will be allocated by the decoder. @@ -303,9 +312,12 @@ func Decompress(src []byte) ([]byte, error) { ``` Both of these cases should provide the functionality needed. -The decoder can be used for *concurrent* decompression of multiple buffers. +The decoder can be used for *concurrent* decompression of multiple buffers. +By default 4 decompressors will be created. + It will only allow a certain number of concurrent operations to run. -To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder. +It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders. ### Dictionaries @@ -357,19 +369,21 @@ In this case no unneeded allocations should be made. The buffer decoder does everything on the same goroutine and does nothing concurrently. It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that. -The stream decoder operates on +The stream decoder will create goroutines that: -* One goroutine reads input and splits the input to several block decoders. -* A number of decoders will decode blocks. -* A goroutine coordinates these blocks and sends history from one to the next. +1) Reads input and splits the input into blocks. +2) Decompression of literals. +3) Decompression of sequences. +4) Reconstruction of output stream. So effectively this also means the decoder will "read ahead" and prepare data to always be available for output. +The concurrency level will, for streams, determine how many blocks ahead the compression will start. + Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency. -In practice this means that concurrency is often limited to utilizing about 2 cores effectively. - - +In practice this means that concurrency is often limited to utilizing about 3 cores effectively. + ### Benchmarks These are some examples of performance compared to [datadog cgo library](https://github.com/DataDog/zstd). diff --git a/vendor/github.com/klauspost/compress/zstd/bitreader.go b/vendor/github.com/klauspost/compress/zstd/bitreader.go index 753d17df63..d7cd15ba29 100644 --- a/vendor/github.com/klauspost/compress/zstd/bitreader.go +++ b/vendor/github.com/klauspost/compress/zstd/bitreader.go @@ -7,6 +7,7 @@ package zstd import ( "encoding/binary" "errors" + "fmt" "io" "math/bits" ) @@ -132,6 +133,9 @@ func (b *bitReader) remain() uint { func (b *bitReader) close() error { // Release reference. b.in = nil + if !b.finished() { + return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) + } if b.bitsRead > 64 { return io.ErrUnexpectedEOF } diff --git a/vendor/github.com/klauspost/compress/zstd/blockdec.go b/vendor/github.com/klauspost/compress/zstd/blockdec.go index dc587b2c94..607b62ee37 100644 --- a/vendor/github.com/klauspost/compress/zstd/blockdec.go +++ b/vendor/github.com/klauspost/compress/zstd/blockdec.go @@ -76,16 +76,25 @@ type blockDec struct { // Window size of the block. WindowSize uint64 - history chan *history - input chan struct{} - result chan decodeOutput - err error - decWG sync.WaitGroup + err error + + // Check against this crc + checkCRC []byte // Frame to use for singlethreaded decoding. // Should not be used by the decoder itself since parent may be another frame. localFrame *frameDec + sequence []seqVals + + async struct { + newHist *history + literals []byte + seqData []byte + seqSize int // Size of uncompressed sequences + fcs uint64 + } + // Block is RLE, this is the size. RLESize uint32 tmp [4]byte @@ -108,13 +117,8 @@ func (b *blockDec) String() string { func newBlockDec(lowMem bool) *blockDec { b := blockDec{ - lowMem: lowMem, - result: make(chan decodeOutput, 1), - input: make(chan struct{}, 1), - history: make(chan *history, 1), + lowMem: lowMem, } - b.decWG.Add(1) - go b.startDecoder() return &b } @@ -137,6 +141,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { case blockTypeReserved: return ErrReservedBlockType case blockTypeRLE: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } b.RLESize = uint32(cSize) if b.lowMem { maxSize = cSize @@ -158,6 +168,13 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error { return ErrCompressedSizeTooBig } case blockTypeRaw: + if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) { + if debugDecoder { + printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b) + } + return ErrWindowSizeExceeded + } + b.RLESize = 0 // We do not need a destination for raw blocks. maxSize = -1 @@ -192,85 +209,14 @@ func (b *blockDec) sendErr(err error) { b.Last = true b.Type = blockTypeReserved b.err = err - b.input <- struct{}{} } // Close will release resources. // Closed blockDec cannot be reset. func (b *blockDec) Close() { - close(b.input) - close(b.history) - close(b.result) - b.decWG.Wait() } -// decodeAsync will prepare decoding the block when it receives input. -// This will separate output and history. -func (b *blockDec) startDecoder() { - defer b.decWG.Done() - for range b.input { - //println("blockDec: Got block input") - switch b.Type { - case blockTypeRLE: - if cap(b.dst) < int(b.RLESize) { - if b.lowMem { - b.dst = make([]byte, b.RLESize) - } else { - b.dst = make([]byte, maxBlockSize) - } - } - o := decodeOutput{ - d: b, - b: b.dst[:b.RLESize], - err: nil, - } - v := b.data[0] - for i := range o.b { - o.b[i] = v - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeRaw: - o := decodeOutput{ - d: b, - b: b.data, - err: nil, - } - hist := <-b.history - hist.append(o.b) - b.result <- o - case blockTypeCompressed: - b.dst = b.dst[:0] - err := b.decodeCompressed(nil) - o := decodeOutput{ - d: b, - b: b.dst, - err: err, - } - if debugDecoder { - println("Decompressed to", len(b.dst), "bytes, error:", err) - } - b.result <- o - case blockTypeReserved: - // Used for returning errors. - <-b.history - b.result <- decodeOutput{ - d: b, - b: nil, - err: b.err, - } - default: - panic("Invalid block type") - } - if debugDecoder { - println("blockDec: Finished block") - } - } -} - -// decodeAsync will prepare decoding the block when it receives the history. -// If history is provided, it will not fetch it from the channel. +// decodeBuf func (b *blockDec) decodeBuf(hist *history) error { switch b.Type { case blockTypeRLE: @@ -293,14 +239,23 @@ func (b *blockDec) decodeBuf(hist *history) error { return nil case blockTypeCompressed: saved := b.dst - b.dst = hist.b - hist.b = nil + // Append directly to history + if hist.ignoreBuffer == 0 { + b.dst = hist.b + hist.b = nil + } else { + b.dst = b.dst[:0] + } err := b.decodeCompressed(hist) if debugDecoder { println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err) } - hist.b = b.dst - b.dst = saved + if hist.ignoreBuffer == 0 { + hist.b = b.dst + b.dst = saved + } else { + hist.appendKeep(b.dst) + } return err case blockTypeReserved: // Used for returning errors. @@ -310,30 +265,18 @@ func (b *blockDec) decodeBuf(hist *history) error { } } -// decodeCompressed will start decompressing a block. -// If no history is supplied the decoder will decodeAsync as much as possible -// before fetching from blockDec.history -func (b *blockDec) decodeCompressed(hist *history) error { - in := b.data - delayedHistory := hist == nil - - if delayedHistory { - // We must always grab history. - defer func() { - if hist == nil { - <-b.history - } - }() - } +func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) { // There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header if len(in) < 2 { - return ErrBlockTooSmall + return in, ErrBlockTooSmall } + litType := literalsBlockType(in[0] & 3) var litRegenSize int var litCompSize int sizeFormat := (in[0] >> 2) & 3 var fourStreams bool + var literals []byte switch litType { case literalsBlockRaw, literalsBlockRLE: switch sizeFormat { @@ -349,7 +292,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes. if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12) in = in[3:] @@ -360,7 +303,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { // Both Regenerated_Size and Compressed_Size use 10 bits (0-1023). if len(in) < 3 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) litRegenSize = int(n & 1023) @@ -371,7 +314,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 4 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) litRegenSize = int(n & 16383) @@ -381,7 +324,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { fourStreams = true if len(in) < 5 { println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in)) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28) litRegenSize = int(n & 262143) @@ -392,13 +335,15 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams) } - var literals []byte - var huff *huff0.Scratch + if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize { + return in, ErrWindowSizeExceeded + } + switch litType { case literalsBlockRaw: if len(in) < litRegenSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } literals = in[:litRegenSize] in = in[litRegenSize:] @@ -406,7 +351,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockRLE: if len(in) < 1 { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } if cap(b.literalBuf) < litRegenSize { if b.lowMem { @@ -417,7 +362,6 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.literalBuf = make([]byte, litRegenSize) } else { b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize) - } } } @@ -433,7 +377,7 @@ func (b *blockDec) decodeCompressed(hist *history) error { case literalsBlockTreeless: if len(in) < litCompSize { println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + return in, ErrBlockTooSmall } // Store compressed literals, so we defer decoding until we get history. literals = in[:litCompSize] @@ -441,15 +385,10 @@ func (b *blockDec) decodeCompressed(hist *history) error { if debugDecoder { printf("Found %d compressed literals\n", litCompSize) } - case literalsBlockCompressed: - if len(in) < litCompSize { - println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) - return ErrBlockTooSmall + huff := hist.huffTree + if huff == nil { + return in, errors.New("literal block was treeless, but no history was defined") } - literals = in[:litCompSize] - in = in[litCompSize:] - huff = huffDecoderPool.Get().(*huff0.Scratch) - var err error // Ensure we have space to store it. if cap(b.literalBuf) < litRegenSize { if b.lowMem { @@ -458,14 +397,53 @@ func (b *blockDec) decodeCompressed(hist *history) error { b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) } } - if huff == nil { - huff = &huff0.Scratch{} + var err error + // Use our out buffer. + huff.MaxDecodedSize = maxCompressedBlockSize + if fourStreams { + literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) + } else { + literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) } + // Make sure we don't leak our literals buffer + if err != nil { + println("decompressing literals:", err) + return in, err + } + if len(literals) != litRegenSize { + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + } + + case literalsBlockCompressed: + if len(in) < litCompSize { + println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize) + return in, ErrBlockTooSmall + } + literals = in[:litCompSize] + in = in[litCompSize:] + // Ensure we have space to store it. + if cap(b.literalBuf) < litRegenSize { + if b.lowMem { + b.literalBuf = make([]byte, 0, litRegenSize) + } else { + b.literalBuf = make([]byte, 0, maxCompressedBlockSize) + } + } + huff := hist.huffTree + if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) { + huff = huffDecoderPool.Get().(*huff0.Scratch) + if huff == nil { + huff = &huff0.Scratch{} + } + } + var err error huff, literals, err = huff0.ReadTable(literals, huff) if err != nil { println("reading huffman table:", err) - return err + return in, err } + hist.huffTree = huff + huff.MaxDecodedSize = maxCompressedBlockSize // Use our out buffer. if fourStreams { literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) @@ -474,24 +452,52 @@ func (b *blockDec) decodeCompressed(hist *history) error { } if err != nil { println("decoding compressed literals:", err) - return err + return in, err } // Make sure we don't leak our literals buffer if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) + return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) } if debugDecoder { printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize) } } + hist.decoders.literals = literals + return in, nil +} +// decodeCompressed will start decompressing a block. +func (b *blockDec) decodeCompressed(hist *history) error { + in := b.data + in, err := b.decodeLiterals(in, hist) + if err != nil { + return err + } + err = b.prepareSequences(in, hist) + if err != nil { + return err + } + if hist.decoders.nSeqs == 0 { + b.dst = append(b.dst, hist.decoders.literals...) + return nil + } + err = hist.decoders.decodeSync(hist) + if err != nil { + return err + } + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset + return nil +} + +func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) { // Decode Sequences // https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section if len(in) < 1 { return ErrBlockTooSmall } + var nSeqs int seqHeader := in[0] - nSeqs := 0 switch { case seqHeader == 0: in = in[1:] @@ -512,7 +518,8 @@ func (b *blockDec) decodeCompressed(hist *history) error { in = in[3:] } - var seqs = &sequenceDecs{} + var seqs = &hist.decoders + seqs.nSeqs = nSeqs if nSeqs > 0 { if len(in) < 1 { return ErrBlockTooSmall @@ -541,6 +548,9 @@ func (b *blockDec) decodeCompressed(hist *history) error { } switch mode { case compModePredefined: + if seq.fse != nil && !seq.fse.preDefined { + fseDecoderPool.Put(seq.fse) + } seq.fse = &fsePredef[i] case compModeRLE: if br.remain() < 1 { @@ -548,34 +558,36 @@ func (b *blockDec) decodeCompressed(hist *history) error { } v := br.Uint8() br.advance(1) - dec := fseDecoderPool.Get().(*fseDecoder) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } symb, err := decSymbolValue(v, symbolTableX[i]) if err != nil { printf("RLE Transform table (%v) error: %v", tableIndex(i), err) return err } - dec.setRLE(symb) - seq.fse = dec + seq.fse.setRLE(symb) if debugDecoder { printf("RLE set to %+v, code: %v", symb, v) } case compModeFSE: println("Reading table for", tableIndex(i)) - dec := fseDecoderPool.Get().(*fseDecoder) - err := dec.readNCount(&br, uint16(maxTableSymbol[i])) + if seq.fse == nil || seq.fse.preDefined { + seq.fse = fseDecoderPool.Get().(*fseDecoder) + } + err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i])) if err != nil { println("Read table error:", err) return err } - err = dec.transform(symbolTableX[i]) + err = seq.fse.transform(symbolTableX[i]) if err != nil { println("Transform table error:", err) return err } if debugDecoder { - println("Read table ok", "symbolLen:", dec.symbolLen) + println("Read table ok", "symbolLen:", seq.fse.symbolLen) } - seq.fse = dec case compModeRepeat: seq.repeat = true } @@ -585,140 +597,88 @@ func (b *blockDec) decodeCompressed(hist *history) error { } in = br.unread() } - - // Wait for history. - // All time spent after this is critical since it is strictly sequential. - if hist == nil { - hist = <-b.history - if hist.error { - return ErrDecoderClosed - } - } - - // Decode treeless literal block. - if litType == literalsBlockTreeless { - // TODO: We could send the history early WITHOUT the stream history. - // This would allow decoding treeless literals before the byte history is available. - // Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless. - // So not much obvious gain here. - - if hist.huffTree == nil { - return errors.New("literal block was treeless, but no history was defined") - } - // Ensure we have space to store it. - if cap(b.literalBuf) < litRegenSize { - if b.lowMem { - b.literalBuf = make([]byte, 0, litRegenSize) - } else { - b.literalBuf = make([]byte, 0, maxCompressedLiteralSize) - } - } - var err error - // Use our out buffer. - huff = hist.huffTree - if fourStreams { - literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals) - } else { - literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals) - } - // Make sure we don't leak our literals buffer - if err != nil { - println("decompressing literals:", err) - return err - } - if len(literals) != litRegenSize { - return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals)) - } - } else { - if hist.huffTree != nil && huff != nil { - if hist.dict == nil || hist.dict.litEnc != hist.huffTree { - huffDecoderPool.Put(hist.huffTree) - } - hist.huffTree = nil - } - } - if huff != nil { - hist.huffTree = huff - } if debugDecoder { - println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.") + println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.") } if nSeqs == 0 { - // Decompressed content is defined entirely as Literals Section content. - b.dst = append(b.dst, literals...) - if delayedHistory { - hist.append(literals) + if len(b.sequence) > 0 { + b.sequence = b.sequence[:0] } return nil } - - seqs, err := seqs.mergeHistory(&hist.decoders) - if err != nil { - return err + br := seqs.br + if br == nil { + br = &bitReader{} } - if debugDecoder { - println("History merged ok") - } - br := &bitReader{} if err := br.init(in); err != nil { return err } - // TODO: Investigate if sending history without decoders are faster. - // This would allow the sequences to be decoded async and only have to construct stream history. - // If only recent offsets were not transferred, this would be an obvious win. - // Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded. + if err := seqs.initialize(br, hist, b.dst); err != nil { + println("initializing sequences:", err) + return err + } + return nil +} +func (b *blockDec) decodeSequences(hist *history) error { + if cap(b.sequence) < hist.decoders.nSeqs { + if b.lowMem { + b.sequence = make([]seqVals, 0, hist.decoders.nSeqs) + } else { + b.sequence = make([]seqVals, 0, 0x7F00+0xffff) + } + } + b.sequence = b.sequence[:hist.decoders.nSeqs] + if hist.decoders.nSeqs == 0 { + hist.decoders.seqSize = len(hist.decoders.literals) + return nil + } + hist.decoders.prevOffset = hist.recentOffsets + err := hist.decoders.decode(b.sequence) + hist.recentOffsets = hist.decoders.prevOffset + return err +} + +func (b *blockDec) executeSequences(hist *history) error { hbytes := hist.b if len(hbytes) > hist.windowSize { hbytes = hbytes[len(hbytes)-hist.windowSize:] - // We do not need history any more. + // We do not need history anymore. if hist.dict != nil { hist.dict.content = nil } } - - if err := seqs.initialize(br, hist, literals, b.dst); err != nil { - println("initializing sequences:", err) - return err - } - - err = seqs.decode(nSeqs, br, hbytes) + hist.decoders.windowSize = hist.windowSize + hist.decoders.out = b.dst[:0] + err := hist.decoders.execute(b.sequence, hbytes) if err != nil { return err } - if !br.finished() { - return fmt.Errorf("%d extra bits on block, should be 0", br.remain()) - } + return b.updateHistory(hist) +} - err = br.close() - if err != nil { - printf("Closing sequences: %v, %+v\n", err, *br) - } +func (b *blockDec) updateHistory(hist *history) error { if len(b.data) > maxCompressedBlockSize { return fmt.Errorf("compressed block size too large (%d)", len(b.data)) } // Set output and release references. - b.dst = seqs.out - seqs.out, seqs.literals, seqs.hist = nil, nil, nil + b.dst = hist.decoders.out + hist.recentOffsets = hist.decoders.prevOffset - if !delayedHistory { - // If we don't have delayed history, no need to update. - hist.recentOffsets = seqs.prevOffset - return nil - } if b.Last { // if last block we don't care about history. println("Last block, no history returned") hist.b = hist.b[:0] return nil + } else { + hist.append(b.dst) + if debugDecoder { + println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b)) + } } - hist.append(b.dst) - hist.recentOffsets = seqs.prevOffset - if debugDecoder { - println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.") - } + hist.decoders.out, hist.decoders.literals = nil, nil return nil } diff --git a/vendor/github.com/klauspost/compress/zstd/bytebuf.go b/vendor/github.com/klauspost/compress/zstd/bytebuf.go index aab71c6cf8..b80191e4b1 100644 --- a/vendor/github.com/klauspost/compress/zstd/bytebuf.go +++ b/vendor/github.com/klauspost/compress/zstd/bytebuf.go @@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) { func (r *readerWrapper) readByte() (byte, error) { n2, err := r.r.Read(r.tmp[:1]) if err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } return 0, err } if n2 != 1 { diff --git a/vendor/github.com/klauspost/compress/zstd/decoder.go b/vendor/github.com/klauspost/compress/zstd/decoder.go index f430f58b57..a93dfaf100 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder.go @@ -5,9 +5,13 @@ package zstd import ( - "errors" + "bytes" + "context" + "encoding/binary" "io" "sync" + + "github.com/klauspost/compress/zstd/internal/xxhash" ) // Decoder provides decoding of zstandard streams. @@ -22,12 +26,19 @@ type Decoder struct { // Unreferenced decoders, ready for use. decoders chan *blockDec - // Streams ready to be decoded. - stream chan decodeStream - // Current read position used for Reader functionality. current decoderState + // sync stream decoding + syncStream struct { + decodedFrame uint64 + br readerWrapper + enabled bool + inFrame bool + } + + frame *frameDec + // Custom dictionaries. // Always uses copies. dicts map[uint32]dict @@ -46,7 +57,10 @@ type decoderState struct { output chan decodeOutput // cancel remaining output. - cancel chan struct{} + cancel context.CancelFunc + + // crc of current frame + crc *xxhash.Digest flushed bool } @@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) { return nil, err } } - d.current.output = make(chan decodeOutput, d.o.concurrent) + d.current.crc = xxhash.New() d.current.flushed = true if r == nil { @@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) { break } if !d.nextBlock(n == 0) { - return n, nil + return n, d.current.err } } } @@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error { d.drainOutput() + d.syncStream.br.r = nil if r == nil { d.current.err = ErrDecoderNilInput if len(d.current.b) > 0 { @@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error { } return nil } - - if d.stream == nil { - d.stream = make(chan decodeStream, 1) - d.streamWg.Add(1) - go d.startStreamDecoder(d.stream) - } - // Remove current block. + d.stashDecoder() d.current.decodeOutput = decodeOutput{} d.current.err = nil - d.current.cancel = make(chan struct{}) d.current.flushed = false d.current.d = nil - d.stream <- decodeStream{ - r: r, - output: d.current.output, - cancel: d.current.cancel, + // Ensure no-one else is still running... + d.streamWg.Wait() + if d.frame == nil { + d.frame = newFrameDec(d.o) } + + if d.o.concurrent == 1 { + return d.startSyncDecoder(r) + } + + d.current.output = make(chan decodeOutput, d.o.concurrent) + ctx, cancel := context.WithCancel(context.Background()) + d.current.cancel = cancel + d.streamWg.Add(1) + go d.startStreamDecoder(ctx, r, d.current.output) + return nil } // drainOutput will drain the output until errEndOfStream is sent. func (d *Decoder) drainOutput() { if d.current.cancel != nil { - println("cancelling current") - close(d.current.cancel) + if debugDecoder { + println("cancelling current") + } + d.current.cancel() d.current.cancel = nil } if d.current.d != nil { @@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() { } d.decoders <- v.d } - if v.err == errEndOfStream { - println("current flushed") - d.current.flushed = true - return - } } + d.current.output = nil + d.current.flushed = true } // WriteTo writes data to w until there's no more data to write or when an error occurs. @@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) { // DecodeAll can be used concurrently. // The Decoder concurrency limits will be respected. func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { - if d.current.err == ErrDecoderClosed { + if d.decoders == nil { return dst, ErrDecoderClosed } @@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { } frame.rawInput = nil frame.bBuf = nil + if frame.history.decoders.br != nil { + frame.history.decoders.br.in = nil + } d.decoders <- block }() frame.bBuf = input @@ -307,27 +328,31 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { for { frame.history.reset() err := frame.reset(&frame.bBuf) - if err == io.EOF { - if debugDecoder { - println("frame reset return EOF") + if err != nil { + if err == io.EOF { + if debugDecoder { + println("frame reset return EOF") + } + return dst, nil } - return dst, nil + return dst, err } if frame.DictionaryID != nil { dict, ok := d.dicts[*frame.DictionaryID] if !ok { return nil, ErrUnknownDictionary } + if debugDecoder { + println("setting dict", frame.DictionaryID) + } frame.history.setDict(&dict) } - if err != nil { - return dst, err - } + if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) { return dst, ErrDecoderSizeExceeded } if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 { - // Never preallocate moe than 1 GB up front. + // Never preallocate more than 1 GB up front. if cap(dst)-len(dst) < int(frame.FrameContentSize) { dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize)) copy(dst2, dst) @@ -368,6 +393,161 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) { // If non-blocking mode is used the returned boolean will be false // if no data was available without blocking. func (d *Decoder) nextBlock(blocking bool) (ok bool) { + if d.current.err != nil { + // Keep error state. + return false + } + d.current.b = d.current.b[:0] + + // SYNC: + if d.syncStream.enabled { + if !blocking { + return false + } + ok = d.nextBlockSync() + if !ok { + d.stashDecoder() + } + return ok + } + + //ASYNC: + d.stashDecoder() + if blocking { + d.current.decodeOutput, ok = <-d.current.output + } else { + select { + case d.current.decodeOutput, ok = <-d.current.output: + default: + return false + } + } + if !ok { + // This should not happen, so signal error state... + d.current.err = io.ErrUnexpectedEOF + return false + } + next := d.current.decodeOutput + if next.d != nil && next.d.async.newHist != nil { + d.current.crc.Reset() + } + if debugDecoder { + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b))) + println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp) + } + + if len(next.b) > 0 { + n, err := d.current.crc.Write(next.b) + if err == nil { + if n != len(next.b) { + d.current.err = io.ErrShortWrite + } + } + } + if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 { + got := d.current.crc.Sum64() + var tmp [4]byte + binary.LittleEndian.PutUint32(tmp[:], uint32(got)) + if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC { + if debugDecoder { + println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)") + } + d.current.err = ErrCRCMismatch + } else { + if debugDecoder { + println("CRC ok", tmp[:]) + } + } + } + + return true +} + +func (d *Decoder) nextBlockSync() (ok bool) { + if d.current.d == nil { + d.current.d = <-d.decoders + } + for len(d.current.b) == 0 { + if !d.syncStream.inFrame { + d.frame.history.reset() + d.current.err = d.frame.reset(&d.syncStream.br) + if d.current.err != nil { + return false + } + if d.frame.DictionaryID != nil { + dict, ok := d.dicts[*d.frame.DictionaryID] + if !ok { + d.current.err = ErrUnknownDictionary + return false + } else { + d.frame.history.setDict(&dict) + } + } + if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize { + d.current.err = ErrDecoderSizeExceeded + return false + } + + d.syncStream.decodedFrame = 0 + d.syncStream.inFrame = true + } + d.current.err = d.frame.next(d.current.d) + if d.current.err != nil { + return false + } + d.frame.history.ensureBlock() + if debugDecoder { + println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame) + } + histBefore := len(d.frame.history.b) + d.current.err = d.current.d.decodeBuf(&d.frame.history) + + if d.current.err != nil { + println("error after:", d.current.err) + return false + } + d.current.b = d.frame.history.b[histBefore:] + if debugDecoder { + println("history after:", len(d.frame.history.b)) + } + + // Check frame size (before CRC) + d.syncStream.decodedFrame += uint64(len(d.current.b)) + if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeExceeded + return false + } + + // Check FCS + if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize { + if debugDecoder { + printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize) + } + d.current.err = ErrFrameSizeMismatch + return false + } + + // Update/Check CRC + if d.frame.HasCheckSum { + d.frame.crc.Write(d.current.b) + if d.current.d.Last { + d.current.err = d.frame.checkCRC() + if d.current.err != nil { + println("CRC error:", d.current.err) + return false + } + } + } + d.syncStream.inFrame = !d.current.d.Last + } + return true +} + +func (d *Decoder) stashDecoder() { if d.current.d != nil { if debugDecoder { printf("re-adding current decoder %p", d.current.d) @@ -375,24 +555,6 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) { d.decoders <- d.current.d d.current.d = nil } - if d.current.err != nil { - // Keep error state. - return blocking - } - - if blocking { - d.current.decodeOutput = <-d.current.output - } else { - select { - case d.current.decodeOutput = <-d.current.output: - default: - return false - } - } - if debugDecoder { - println("got", len(d.current.b), "bytes, error:", d.current.err) - } - return true } // Close will release all resources. @@ -402,10 +564,10 @@ func (d *Decoder) Close() { return } d.drainOutput() - if d.stream != nil { - close(d.stream) + if d.current.cancel != nil { + d.current.cancel() d.streamWg.Wait() - d.stream = nil + d.current.cancel = nil } if d.decoders != nil { close(d.decoders) @@ -456,100 +618,306 @@ type decodeOutput struct { err error } -type decodeStream struct { - r io.Reader - - // Blocks ready to be written to output. - output chan decodeOutput - - // cancel reading from the input - cancel chan struct{} +func (d *Decoder) startSyncDecoder(r io.Reader) error { + d.frame.history.reset() + d.syncStream.br = readerWrapper{r: r} + d.syncStream.inFrame = false + d.syncStream.enabled = true + d.syncStream.decodedFrame = 0 + return nil } -// errEndOfStream indicates that everything from the stream was read. -var errEndOfStream = errors.New("end-of-stream") - // Create Decoder: -// Spawn n block decoders. These accept tasks to decode a block. -// Create goroutine that handles stream processing, this will send history to decoders as they are available. -// Decoders update the history as they decode. -// When a block is returned: -// a) history is sent to the next decoder, -// b) content written to CRC. -// c) return data to WRITER. -// d) wait for next block to return data. -// Once WRITTEN, the decoders reused by the writer frame decoder for re-use. -func (d *Decoder) startStreamDecoder(inStream chan decodeStream) { +// ASYNC: +// Spawn 4 go routines. +// 0: Read frames and decode blocks. +// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree. +// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets. +// 3: Wait for stream history, execute sequences, send stream history. +func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) { defer d.streamWg.Done() - frame := newFrameDec(d.o) - for stream := range inStream { - if debugDecoder { - println("got new stream") + br := readerWrapper{r: r} + + var seqPrepare = make(chan *blockDec, d.o.concurrent) + var seqDecode = make(chan *blockDec, d.o.concurrent) + var seqExecute = make(chan *blockDec, d.o.concurrent) + + // Async 1: Prepare blocks... + go func() { + var hist history + var hasErr bool + for block := range seqPrepare { + if hasErr { + if block != nil { + seqDecode <- block + } + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 1: new history") + } + hist.reset() + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqDecode <- block + continue + } + + remain, err := block.decodeLiterals(block.data, &hist) + block.err = err + hasErr = block.err != nil + if err == nil { + block.async.literals = hist.decoders.literals + block.async.seqData = remain + } else if debugDecoder { + println("decodeLiterals error:", err) + } + seqDecode <- block } - br := readerWrapper{r: stream.r} - decodeStream: - for { - frame.history.reset() - err := frame.reset(&br) - if debugDecoder && err != nil { - println("Frame decoder returned", err) + close(seqDecode) + }() + + // Async 2: Decode sequences... + go func() { + var hist history + var hasErr bool + + for block := range seqDecode { + if hasErr { + if block != nil { + seqExecute <- block + } + continue } - if err == nil && frame.DictionaryID != nil { - dict, ok := d.dicts[*frame.DictionaryID] - if !ok { - err = ErrUnknownDictionary + if block.async.newHist != nil { + if debugDecoder { + println("Async 2: new history, recent:", block.async.newHist.recentOffsets) + } + hist.decoders = block.async.newHist.decoders + hist.recentOffsets = block.async.newHist.recentOffsets + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + } + if block.err != nil || block.Type != blockTypeCompressed { + hasErr = block.err != nil + seqExecute <- block + continue + } + + hist.decoders.literals = block.async.literals + block.err = block.prepareSequences(block.async.seqData, &hist) + if debugDecoder && block.err != nil { + println("prepareSequences returned:", block.err) + } + hasErr = block.err != nil + if block.err == nil { + block.err = block.decodeSequences(&hist) + if debugDecoder && block.err != nil { + println("decodeSequences returned:", block.err) + } + hasErr = block.err != nil + // block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs] + block.async.seqSize = hist.decoders.seqSize + } + seqExecute <- block + } + close(seqExecute) + }() + + var wg sync.WaitGroup + wg.Add(1) + + // Async 3: Execute sequences... + frameHistCache := d.frame.history.b + go func() { + var hist history + var decodedFrame uint64 + var fcs uint64 + var hasErr bool + for block := range seqExecute { + out := decodeOutput{err: block.err, d: block} + if block.err != nil || hasErr { + hasErr = true + output <- out + continue + } + if block.async.newHist != nil { + if debugDecoder { + println("Async 3: new history") + } + hist.windowSize = block.async.newHist.windowSize + hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer + if block.async.newHist.dict != nil { + hist.setDict(block.async.newHist.dict) + } + + if cap(hist.b) < hist.allocFrameBuffer { + if cap(frameHistCache) >= hist.allocFrameBuffer { + hist.b = frameHistCache + } else { + hist.b = make([]byte, 0, hist.allocFrameBuffer) + println("Alloc history sized", hist.allocFrameBuffer) + } + } + hist.b = hist.b[:0] + fcs = block.async.fcs + decodedFrame = 0 + } + do := decodeOutput{err: block.err, d: block} + switch block.Type { + case blockTypeRLE: + if debugDecoder { + println("add rle block length:", block.RLESize) + } + + if cap(block.dst) < int(block.RLESize) { + if block.lowMem { + block.dst = make([]byte, block.RLESize) + } else { + block.dst = make([]byte, maxBlockSize) + } + } + block.dst = block.dst[:block.RLESize] + v := block.data[0] + for i := range block.dst { + block.dst[i] = v + } + hist.append(block.dst) + do.b = block.dst + case blockTypeRaw: + if debugDecoder { + println("add raw block length:", len(block.data)) + } + hist.append(block.data) + do.b = block.data + case blockTypeCompressed: + if debugDecoder { + println("execute with history length:", len(hist.b), "window:", hist.windowSize) + } + hist.decoders.seqSize = block.async.seqSize + hist.decoders.literals = block.async.literals + do.err = block.executeSequences(&hist) + hasErr = do.err != nil + if debugDecoder && hasErr { + println("executeSequences returned:", do.err) + } + do.b = block.dst + } + if !hasErr { + decodedFrame += uint64(len(do.b)) + if fcs > 0 && decodedFrame > fcs { + println("fcs exceeded", block.Last, fcs, decodedFrame) + do.err = ErrFrameSizeExceeded + hasErr = true + } else if block.Last && fcs > 0 && decodedFrame != fcs { + do.err = ErrFrameSizeMismatch + hasErr = true } else { - frame.history.setDict(&dict) + if debugDecoder { + println("fcs ok", block.Last, fcs, decodedFrame) + } } } - if err != nil { - stream.output <- decodeOutput{ - err: err, + output <- do + } + close(output) + frameHistCache = hist.b + wg.Done() + if debugDecoder { + println("decoder goroutines finished") + } + }() + +decodeStream: + for { + frame := d.frame + if debugDecoder { + println("New frame...") + } + var historySent bool + frame.history.reset() + err := frame.reset(&br) + if debugDecoder && err != nil { + println("Frame decoder returned", err) + } + if err == nil && frame.DictionaryID != nil { + dict, ok := d.dicts[*frame.DictionaryID] + if !ok { + err = ErrUnknownDictionary + } else { + frame.history.setDict(&dict) + } + } + if err == nil && d.frame.WindowSize > d.o.maxWindowSize { + err = ErrDecoderSizeExceeded + } + if err != nil { + select { + case <-ctx.Done(): + case dec := <-d.decoders: + dec.sendErr(err) + seqPrepare <- dec + } + break decodeStream + } + + // Go through all blocks of the frame. + for { + var dec *blockDec + select { + case <-ctx.Done(): + break decodeStream + case dec = <-d.decoders: + // Once we have a decoder, we MUST return it. + } + err := frame.next(dec) + if !historySent { + h := frame.history + if debugDecoder { + println("Alloc History:", h.allocFrameBuffer) } + dec.async.newHist = &h + dec.async.fcs = frame.FrameContentSize + historySent = true + } else { + dec.async.newHist = nil + } + if debugDecoder && err != nil { + println("next block returned error:", err) + } + dec.err = err + dec.checkCRC = nil + if dec.Last && frame.HasCheckSum && err == nil { + crc, err := frame.rawInput.readSmall(4) + if err != nil { + println("CRC missing?", err) + dec.err = err + } + var tmp [4]byte + copy(tmp[:], crc) + dec.checkCRC = tmp[:] + if debugDecoder { + println("found crc to check:", dec.checkCRC) + } + } + err = dec.err + last := dec.Last + seqPrepare <- dec + if err != nil { + break decodeStream + } + if last { break } - if debugDecoder { - println("starting frame decoder") - } - - // This goroutine will forward history between frames. - frame.frameDone.Add(1) - frame.initAsync() - - go frame.startDecoder(stream.output) - decodeFrame: - // Go through all blocks of the frame. - for { - dec := <-d.decoders - select { - case <-stream.cancel: - if !frame.sendErr(dec, io.EOF) { - // To not let the decoder dangle, send it back. - stream.output <- decodeOutput{d: dec} - } - break decodeStream - default: - } - err := frame.next(dec) - switch err { - case io.EOF: - // End of current frame, no error - println("EOF on next block") - break decodeFrame - case nil: - continue - default: - println("block decoder returned", err) - break decodeStream - } - } - // All blocks have started decoding, check if there are more frames. - println("waiting for done") - frame.frameDone.Wait() - println("done waiting...") } - frame.frameDone.Wait() - println("Sending EOS") - stream.output <- decodeOutput{err: errEndOfStream} } + close(seqPrepare) + wg.Wait() + d.frame.history.b = frameHistCache } diff --git a/vendor/github.com/klauspost/compress/zstd/decoder_options.go b/vendor/github.com/klauspost/compress/zstd/decoder_options.go index 95cc9b8b81..fd05c9bb01 100644 --- a/vendor/github.com/klauspost/compress/zstd/decoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/decoder_options.go @@ -28,6 +28,9 @@ func (o *decoderOptions) setDefault() { concurrent: runtime.GOMAXPROCS(0), maxWindowSize: MaxWindowSize, } + if o.concurrent > 4 { + o.concurrent = 4 + } o.maxDecodedSize = 1 << 63 } @@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption { return func(o *decoderOptions) error { o.lowMem = b; return nil } } -// WithDecoderConcurrency will set the concurrency, -// meaning the maximum number of decoders to run concurrently. -// The value supplied must be at least 1. -// By default this will be set to GOMAXPROCS. +// WithDecoderConcurrency sets the number of created decoders. +// When decoding block with DecodeAll, this will limit the number +// of possible concurrently running decodes. +// When decoding streams, this will limit the number of +// inflight blocks. +// When decoding streams and setting maximum to 1, +// no async decoding will be done. +// When a value of 0 is provided GOMAXPROCS will be used. +// By default this will be set to 4 or GOMAXPROCS, whatever is lower. func WithDecoderConcurrency(n int) DOption { return func(o *decoderOptions) error { - if n <= 0 { + if n < 0 { return errors.New("concurrency must be at least 1") } - o.concurrent = n + if n == 0 { + o.concurrent = runtime.GOMAXPROCS(0) + } else { + o.concurrent = n + } return nil } } diff --git a/vendor/github.com/klauspost/compress/zstd/encoder.go b/vendor/github.com/klauspost/compress/zstd/encoder.go index e6e315969b..dcc987a7cb 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder.go @@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) { if cap(s.filling) == 0 { s.filling = make([]byte, 0, e.o.blockSize) } - if cap(s.current) == 0 { - s.current = make([]byte, 0, e.o.blockSize) - } - if cap(s.previous) == 0 { - s.previous = make([]byte, 0, e.o.blockSize) + if e.o.concurrent > 1 { + if cap(s.current) == 0 { + s.current = make([]byte, 0, e.o.blockSize) + } + if cap(s.previous) == 0 { + s.previous = make([]byte, 0, e.o.blockSize) + } + s.current = s.current[:0] + s.previous = s.previous[:0] + if s.writing == nil { + s.writing = &blockEnc{lowMem: e.o.lowMem} + s.writing.init() + } + s.writing.initNewEncode() } if s.encoder == nil { s.encoder = e.o.encoder() } - if s.writing == nil { - s.writing = &blockEnc{lowMem: e.o.lowMem} - s.writing.init() - } - s.writing.initNewEncode() s.filling = s.filling[:0] - s.current = s.current[:0] - s.previous = s.previous[:0] s.encoder.Reset(e.o.dict, false) s.headerWritten = false s.eofWritten = false @@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error { return s.err } + // SYNC: + if e.o.concurrent == 1 { + src := s.filling + s.nInput += int64(len(s.filling)) + if debugEncoder { + println("Adding sync block,", len(src), "bytes, final:", final) + } + enc := s.encoder + blk := enc.Block() + blk.reset(nil) + enc.Encode(blk, src) + blk.last = final + if final { + s.eofWritten = true + } + + err := errIncompressible + // If we got the exact same number of literals as input, + // assume the literals cannot be compressed. + if len(src) != len(blk.literals) || len(src) != e.o.blockSize { + err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy) + } + switch err { + case errIncompressible: + if debugEncoder { + println("Storing incompressible block as raw") + } + blk.encodeRaw(src) + // In fast mode, we do not transfer offsets, so we don't have to deal with changing the. + case nil: + default: + s.err = err + return err + } + _, s.err = s.w.Write(blk.output) + s.nWritten += int64(len(blk.output)) + s.filling = s.filling[:0] + return s.err + } + // Move blocks forward. s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current s.nInput += int64(len(s.current)) diff --git a/vendor/github.com/klauspost/compress/zstd/encoder_options.go b/vendor/github.com/klauspost/compress/zstd/encoder_options.go index 5f2e1d020e..44d8dbd199 100644 --- a/vendor/github.com/klauspost/compress/zstd/encoder_options.go +++ b/vendor/github.com/klauspost/compress/zstd/encoder_options.go @@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption { // WithEncoderConcurrency will set the concurrency, // meaning the maximum number of encoders to run concurrently. // The value supplied must be at least 1. +// For streams, setting a value of 1 will disable async compression. // By default this will be set to GOMAXPROCS. func WithEncoderConcurrency(n int) EOption { return func(o *encoderOptions) error { diff --git a/vendor/github.com/klauspost/compress/zstd/framedec.go b/vendor/github.com/klauspost/compress/zstd/framedec.go index 989c79f8c3..29c3176b05 100644 --- a/vendor/github.com/klauspost/compress/zstd/framedec.go +++ b/vendor/github.com/klauspost/compress/zstd/framedec.go @@ -8,23 +8,17 @@ import ( "bytes" "encoding/hex" "errors" - "hash" "io" - "sync" "github.com/klauspost/compress/zstd/internal/xxhash" ) type frameDec struct { - o decoderOptions - crc hash.Hash64 - offset int64 + o decoderOptions + crc *xxhash.Digest WindowSize uint64 - // In order queue of blocks being decoded. - decoding chan *blockDec - // Frame history passed between blocks history history @@ -34,15 +28,10 @@ type frameDec struct { bBuf byteBuf FrameContentSize uint64 - frameDone sync.WaitGroup DictionaryID *uint32 HasCheckSum bool SingleSegment bool - - // asyncRunning indicates whether the async routine processes input on 'decoding'. - asyncRunningMu sync.Mutex - asyncRunning bool } const ( @@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error { d.FrameContentSize = uint64(d1) | (uint64(d2) << 32) } if debugDecoder { - println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize) + println("Read FCS:", d.FrameContentSize) } } + // Move this to shared. d.HasCheckSum = fhd&(1<<2) != 0 if d.HasCheckSum { @@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error { } d.history.windowSize = int(d.WindowSize) if d.o.lowMem && d.history.windowSize < maxBlockSize { - d.history.maxSize = d.history.windowSize * 2 + d.history.allocFrameBuffer = d.history.windowSize * 2 + // TODO: Maybe use FrameContent size } else { - d.history.maxSize = d.history.windowSize + maxBlockSize + d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize } + + if debugDecoder { + println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum) + } + // history contains input - maybe we do something d.rawInput = br return nil @@ -276,49 +272,18 @@ func (d *frameDec) reset(br byteBuffer) error { // next will start decoding the next block from stream. func (d *frameDec) next(block *blockDec) error { if debugDecoder { - printf("decoding new block %p:%p", block, block.data) + println("decoding new block") } err := block.reset(d.rawInput, d.WindowSize) if err != nil { println("block error:", err) // Signal the frame decoder we have a problem. - d.sendErr(block, err) + block.sendErr(err) return err } - block.input <- struct{}{} - if debugDecoder { - println("next block:", block) - } - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return nil - } - if block.Last { - // We indicate the frame is done by sending io.EOF - d.decoding <- block - return io.EOF - } - d.decoding <- block return nil } -// sendEOF will queue an error block on the frame. -// This will cause the frame decoder to return when it encounters the block. -// Returns true if the decoder was added. -func (d *frameDec) sendErr(block *blockDec, err error) bool { - d.asyncRunningMu.Lock() - defer d.asyncRunningMu.Unlock() - if !d.asyncRunning { - return false - } - - println("sending error", err.Error()) - block.sendErr(err) - d.decoding <- block - return true -} - // checkCRC will check the checksum if the frame has one. // Will return ErrCRCMismatch if crc check failed, otherwise nil. func (d *frameDec) checkCRC() error { @@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error { return err } - if !bytes.Equal(tmp[:], want) { + if !bytes.Equal(tmp[:], want) && !ignoreCRC { if debugDecoder { println("CRC Check Failed:", tmp[:], "!=", want) } @@ -352,131 +317,13 @@ func (d *frameDec) checkCRC() error { return nil } -func (d *frameDec) initAsync() { - if !d.o.lowMem && !d.SingleSegment { - // set max extra size history to 2MB. - d.history.maxSize = d.history.windowSize + maxBlockSize - } - // re-alloc if more than one extra block size. - if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.history.b) < d.history.maxSize { - d.history.b = make([]byte, 0, d.history.maxSize) - } - if cap(d.decoding) < d.o.concurrent { - d.decoding = make(chan *blockDec, d.o.concurrent) - } - if debugDecoder { - h := d.history - printf("history init. len: %d, cap: %d", len(h.b), cap(h.b)) - } - d.asyncRunningMu.Lock() - d.asyncRunning = true - d.asyncRunningMu.Unlock() -} - -// startDecoder will start decoding blocks and write them to the writer. -// The decoder will stop as soon as an error occurs or at end of frame. -// When the frame has finished decoding the *bufio.Reader -// containing the remaining input will be sent on frameDec.frameDone. -func (d *frameDec) startDecoder(output chan decodeOutput) { - written := int64(0) - - defer func() { - d.asyncRunningMu.Lock() - d.asyncRunning = false - d.asyncRunningMu.Unlock() - - // Drain the currently decoding. - d.history.error = true - flushdone: - for { - select { - case b := <-d.decoding: - b.history <- &d.history - output <- <-b.result - default: - break flushdone - } - } - println("frame decoder done, signalling done") - d.frameDone.Done() - }() - // Get decoder for first block. - block := <-d.decoding - block.history <- &d.history - for { - var next *blockDec - // Get result - r := <-block.result - if r.err != nil { - println("Result contained error", r.err) - output <- r - return - } - if debugDecoder { - println("got result, from ", d.offset, "to", d.offset+int64(len(r.b))) - d.offset += int64(len(r.b)) - } - if !block.Last { - // Send history to next block - select { - case next = <-d.decoding: - if debugDecoder { - println("Sending ", len(d.history.b), "bytes as history") - } - next.history <- &d.history - default: - // Wait until we have sent the block, so - // other decoders can potentially get the decoder. - next = nil - } - } - - // Add checksum, async to decoding. - if d.HasCheckSum { - n, err := d.crc.Write(r.b) - if err != nil { - r.err = err - if n != len(r.b) { - r.err = io.ErrShortWrite - } - output <- r - return - } - } - written += int64(len(r.b)) - if d.SingleSegment && uint64(written) > d.FrameContentSize { - println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize) - r.err = ErrFrameSizeExceeded - output <- r - return - } - if block.Last { - r.err = d.checkCRC() - output <- r - return - } - output <- r - if next == nil { - // There was no decoder available, we wait for one now that we have sent to the writer. - if debugDecoder { - println("Sending ", len(d.history.b), " bytes as history") - } - next = <-d.decoding - next.history <- &d.history - } - block = next - } -} - // runDecoder will create a sync decoder that will decode a block of data. func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { saved := d.history.b // We use the history for output to avoid copying it. d.history.b = dst + d.history.ignoreBuffer = len(dst) // Store input length, so we only check new data. crcStart := len(dst) var err error @@ -489,7 +336,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { println("next block:", dec) } err = dec.decodeBuf(&d.history) - if err != nil || dec.Last { + if err != nil { break } if uint64(len(d.history.b)) > d.o.maxDecodedSize { @@ -501,10 +348,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) { err = ErrFrameSizeExceeded break } + if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize { + println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize) + err = ErrFrameSizeExceeded + break + } + if dec.Last { + break + } + if debugDecoder && d.FrameContentSize > 0 { + println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize) + } } dst = d.history.b if err == nil { - if d.HasCheckSum { + if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize { + err = ErrFrameSizeMismatch + } else if d.HasCheckSum { var n int n, err = d.crc.Write(dst[crcStart:]) if err == nil { diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz.go b/vendor/github.com/klauspost/compress/zstd/fuzz.go new file mode 100644 index 0000000000..fda8a74228 --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz.go @@ -0,0 +1,11 @@ +//go:build gofuzz +// +build gofuzz + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = true diff --git a/vendor/github.com/klauspost/compress/zstd/fuzz_none.go b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go new file mode 100644 index 0000000000..0515b201cc --- /dev/null +++ b/vendor/github.com/klauspost/compress/zstd/fuzz_none.go @@ -0,0 +1,11 @@ +//go:build !gofuzz +// +build !gofuzz + +// Copyright 2019+ Klaus Post. All rights reserved. +// License information can be found in the LICENSE file. +// Based on work by Yann Collet, released under BSD License. + +package zstd + +// ignoreCRC can be used for fuzz testing to ignore CRC values... +const ignoreCRC = false diff --git a/vendor/github.com/klauspost/compress/zstd/history.go b/vendor/github.com/klauspost/compress/zstd/history.go index f783e32d25..28b40153cc 100644 --- a/vendor/github.com/klauspost/compress/zstd/history.go +++ b/vendor/github.com/klauspost/compress/zstd/history.go @@ -10,20 +10,31 @@ import ( // history contains the information transferred between blocks. type history struct { - b []byte - huffTree *huff0.Scratch - recentOffsets [3]int + // Literal decompression + huffTree *huff0.Scratch + + // Sequence decompression decoders sequenceDecs - windowSize int - maxSize int - error bool - dict *dict + recentOffsets [3]int + + // History buffer... + b []byte + + // ignoreBuffer is meant to ignore a number of bytes + // when checking for matches in history + ignoreBuffer int + + windowSize int + allocFrameBuffer int // needed? + error bool + dict *dict } // reset will reset the history to initial state of a frame. // The history must already have been initialized to the desired size. func (h *history) reset() { h.b = h.b[:0] + h.ignoreBuffer = 0 h.error = false h.recentOffsets = [3]int{1, 4, 8} if f := h.decoders.litLengths.fse; f != nil && !f.preDefined { @@ -35,7 +46,7 @@ func (h *history) reset() { if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined { fseDecoderPool.Put(f) } - h.decoders = sequenceDecs{} + h.decoders = sequenceDecs{br: h.decoders.br} if h.huffTree != nil { if h.dict == nil || h.dict.litEnc != h.huffTree { huffDecoderPool.Put(h.huffTree) @@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) { h.decoders.litLengths = dict.llDec h.decoders.offsets = dict.ofDec h.decoders.matchLengths = dict.mlDec + h.decoders.dict = dict.content h.recentOffsets = dict.offsets h.huffTree = dict.litEnc } @@ -83,6 +95,24 @@ func (h *history) append(b []byte) { copy(h.b[h.windowSize-len(b):], b) } +// ensureBlock will ensure there is space for at least one block... +func (h *history) ensureBlock() { + if cap(h.b) < h.allocFrameBuffer { + h.b = make([]byte, 0, h.allocFrameBuffer) + return + } + + avail := cap(h.b) - len(h.b) + if avail >= h.windowSize || avail > maxCompressedBlockSize { + return + } + // Move data down so we only have window size left. + // We know we have less than window size in b at this point. + discard := len(h.b) - h.windowSize + copy(h.b, h.b[discard:]) + h.b = h.b[:h.windowSize] +} + // append bytes to history without ever discarding anything. func (h *history) appendKeep(b []byte) { h.b = append(h.b, b...) diff --git a/vendor/github.com/klauspost/compress/zstd/seqdec.go b/vendor/github.com/klauspost/compress/zstd/seqdec.go index bc731e4cb6..213736ad77 100644 --- a/vendor/github.com/klauspost/compress/zstd/seqdec.go +++ b/vendor/github.com/klauspost/compress/zstd/seqdec.go @@ -20,6 +20,10 @@ type seq struct { llCode, mlCode, ofCode uint8 } +type seqVals struct { + ll, ml, mo int +} + func (s seq) String() string { if s.offset <= 3 { if s.offset == 0 { @@ -61,16 +65,18 @@ type sequenceDecs struct { offsets sequenceDec matchLengths sequenceDec prevOffset [3]int - hist []byte dict []byte literals []byte out []byte + nSeqs int + br *bitReader + seqSize int windowSize int maxBits uint8 } // initialize all 3 decoders from the stream input. -func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error { +func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error { if err := s.litLengths.init(br); err != nil { return errors.New("litLengths:" + err.Error()) } @@ -80,8 +86,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] if err := s.matchLengths.init(br); err != nil { return errors.New("matchLengths:" + err.Error()) } - s.literals = literals - s.hist = hist.b + s.br = br s.prevOffset = hist.recentOffsets s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits s.windowSize = hist.windowSize @@ -94,11 +99,254 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out [] } // decode sequences from the stream with the provided history. -func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { +func (s *sequenceDecs) decode(seqs []seqVals) error { + br := s.br + + // Grab full sizes tables, to avoid bounds checks. + llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] + llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + s.seqSize = 0 + litRemain := len(s.literals) + + for i := range seqs { + var ll, mo, ml int + if br.off > 4+((maxOffsetBits+16+16)>>3) { + // inlined function: + // ll, mo, ml = s.nextFast(br, llState, mlState, ofState) + + // Final will not read from stream. + var llB, mlB, moB uint8 + ll, llB = llState.final() + ml, mlB = mlState.final() + mo, moB = ofState.final() + + // extra bits are stored in reverse order. + br.fillFast() + mo += br.getBits(moB) + if s.maxBits > 32 { + br.fillFast() + } + ml += br.getBits(mlB) + ll += br.getBits(llB) + + if moB > 1 { + s.prevOffset[2] = s.prevOffset[1] + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = mo + } else { + // mo = s.adjustOffset(mo, ll, moB) + // Inlined for rather big speedup + if ll == 0 { + // There is an exception though, when current sequence's literals_length = 0. + // In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2, + // an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte. + mo++ + } + + if mo == 0 { + mo = s.prevOffset[0] + } else { + var temp int + if mo == 3 { + temp = s.prevOffset[0] - 1 + } else { + temp = s.prevOffset[mo] + } + + if temp == 0 { + // 0 is not valid; input is corrupted; force offset to 1 + println("WARNING: temp was 0") + temp = 1 + } + + if mo != 1 { + s.prevOffset[2] = s.prevOffset[1] + } + s.prevOffset[1] = s.prevOffset[0] + s.prevOffset[0] = temp + mo = temp + } + } + br.fillFast() + } else { + if br.overread() { + if debugDecoder { + printf("reading sequence %d, exceeded available data\n", i) + } + return io.ErrUnexpectedEOF + } + ll, mo, ml = s.next(br, llState, mlState, ofState) + br.fill() + } + + if debugSequences { + println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml) + } + // Evaluate. + // We might be doing this async, so do it early. + if mo == 0 && ml > 0 { + return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) + } + if ml > maxMatchLen { + return fmt.Errorf("match len (%d) bigger than max allowed length", ml) + } + s.seqSize += ll + ml + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", s.seqSize) + } + litRemain -= ll + if litRemain < 0 { + return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll) + } + seqs[i] = seqVals{ + ll: ll, + ml: ml, + mo: mo, + } + if i == len(seqs)-1 { + // This is the last sequence, so we shouldn't update state. + break + } + + // Manually inlined, ~ 5-20% faster + // Update all 3 states at once. Approx 20% faster. + nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits() + if nBits == 0 { + llState = llTable[llState.newState()&maxTableMask] + mlState = mlTable[mlState.newState()&maxTableMask] + ofState = ofTable[ofState.newState()&maxTableMask] + } else { + bits := br.get32BitsFast(nBits) + lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31)) + llState = llTable[(llState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits >> (ofState.nbBits() & 31)) + lowBits &= bitMask[mlState.nbBits()&15] + mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask] + + lowBits = uint16(bits) & bitMask[ofState.nbBits()&15] + ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask] + } + } + s.seqSize += litRemain + if s.seqSize > maxBlockSize { + return fmt.Errorf("output (%d) bigger than max block size", s.seqSize) + } + err := br.close() + if err != nil { + printf("Closing sequences: %v, %+v\n", err, *br) + } + return err +} + +// execute will execute the decoded sequence with the provided history. +// The sequence must be evaluated before being sent. +func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error { + // Ensure we have enough output size... + if len(s.out)+s.seqSize > cap(s.out) { + addBytes := s.seqSize + len(s.out) + s.out = append(s.out, make([]byte, addBytes)...) + s.out = s.out[:len(s.out)-addBytes] + } + + if debugDecoder { + printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize) + } + + var t = len(s.out) + out := s.out[:t+s.seqSize] + + for _, seq := range seqs { + // Add literals + copy(out[t:], s.literals[:seq.ll]) + t += seq.ll + s.literals = s.literals[seq.ll:] + + // Copy from dictionary... + if seq.mo > t+len(hist) || seq.mo > s.windowSize { + if len(s.dict) == 0 { + return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist)) + } + + // we may be in dictionary. + dictO := len(s.dict) - (seq.mo - (t + len(hist))) + if dictO < 0 || dictO >= len(s.dict) { + return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict)) + } + end := dictO + seq.ml + if end > len(s.dict) { + n := len(s.dict) - dictO + copy(out[t:], s.dict[dictO:]) + t += n + seq.ml -= n + } else { + copy(out[t:], s.dict[dictO:end]) + t += end - dictO + continue + } + } + + // Copy from history. + if v := seq.mo - t; v > 0 { + // v is the start position in history from end. + start := len(hist) - v + if seq.ml > v { + // Some goes into current block. + // Copy remainder of history + copy(out[t:], hist[start:]) + t += v + seq.ml -= v + } else { + copy(out[t:], hist[start:start+seq.ml]) + t += seq.ml + continue + } + } + // We must be in current buffer now + if seq.ml > 0 { + start := t - seq.mo + if seq.ml <= t-start { + // No overlap + copy(out[t:], out[start:start+seq.ml]) + t += seq.ml + continue + } else { + // Overlapping copy + // Extend destination slice and copy one byte at the time. + src := out[start : start+seq.ml] + dst := out[t:] + dst = dst[:len(src)] + t += len(src) + // Destination is the space we just added. + for i := range src { + dst[i] = src[i] + } + } + } + } + // Add final literals + copy(out[t:], s.literals) + if debugDecoder { + t += len(s.literals) + if t != len(out) { + panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize)) + } + } + s.out = out + + return nil +} + +// decode sequences from the stream with the provided history. +func (s *sequenceDecs) decodeSync(history *history) error { + br := s.br + seqs := s.nSeqs startSize := len(s.out) // Grab full sizes tables, to avoid bounds checks. llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize] llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state + hist := history.b[history.ignoreBuffer:] + out := s.out for i := seqs - 1; i >= 0; i-- { if br.overread() { @@ -151,7 +399,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if temp == 0 { // 0 is not valid; input is corrupted; force offset to 1 - println("temp was 0") + println("WARNING: temp was 0") temp = 1 } @@ -176,51 +424,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { if ll > len(s.literals) { return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals)) } - size := ll + ml + len(s.out) + size := ll + ml + len(out) if size-startSize > maxBlockSize { return fmt.Errorf("output (%d) bigger than max block size", size) } - if size > cap(s.out) { + if size > cap(out) { // Not enough size, which can happen under high volume block streaming conditions // but could be if destination slice is too small for sync operations. // over-allocating here can create a large amount of GC pressure so we try to keep // it as contained as possible - used := len(s.out) - startSize + used := len(out) - startSize addBytes := 256 + ll + ml + used>>2 // Clamp to max block size. if used+addBytes > maxBlockSize { addBytes = maxBlockSize - used } - s.out = append(s.out, make([]byte, addBytes)...) - s.out = s.out[:len(s.out)-addBytes] + out = append(out, make([]byte, addBytes)...) + out = out[:len(out)-addBytes] } if ml > maxMatchLen { return fmt.Errorf("match len (%d) bigger than max allowed length", ml) } // Add literals - s.out = append(s.out, s.literals[:ll]...) + out = append(out, s.literals[:ll]...) s.literals = s.literals[ll:] - out := s.out if mo == 0 && ml > 0 { return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml) } - if mo > len(s.out)+len(hist) || mo > s.windowSize { + if mo > len(out)+len(hist) || mo > s.windowSize { if len(s.dict) == 0 { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) } // we may be in dictionary. - dictO := len(s.dict) - (mo - (len(s.out) + len(hist))) + dictO := len(s.dict) - (mo - (len(out) + len(hist))) if dictO < 0 || dictO >= len(s.dict) { - return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist)) + return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist)) } end := dictO + ml if end > len(s.dict) { out = append(out, s.dict[dictO:]...) - mo -= len(s.dict) - dictO ml -= len(s.dict) - dictO } else { out = append(out, s.dict[dictO:end]...) @@ -231,26 +477,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { // Copy from history. // TODO: Blocks without history could be made to ignore this completely. - if v := mo - len(s.out); v > 0 { + if v := mo - len(out); v > 0 { // v is the start position in history from end. - start := len(s.hist) - v + start := len(hist) - v if ml > v { // Some goes into current block. // Copy remainder of history - out = append(out, s.hist[start:]...) - mo -= v + out = append(out, hist[start:]...) ml -= v } else { - out = append(out, s.hist[start:start+ml]...) + out = append(out, hist[start:start+ml]...) ml = 0 } } // We must be in current buffer now if ml > 0 { - start := len(s.out) - mo - if ml <= len(s.out)-start { + start := len(out) - mo + if ml <= len(out)-start { // No overlap - out = append(out, s.out[start:start+ml]...) + out = append(out, out[start:start+ml]...) } else { // Overlapping copy // Extend destination slice and copy one byte at the time. @@ -264,7 +509,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } } } - s.out = out if i == 0 { // This is the last sequence, so we shouldn't update state. break @@ -292,8 +536,8 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error { } // Add final literals - s.out = append(s.out, s.literals...) - return nil + s.out = append(out, s.literals...) + return br.close() } // update states, at least 27 bits must be available. @@ -457,36 +701,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int { s.prevOffset[0] = temp return temp } - -// mergeHistory will merge history. -func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) { - for i := uint(0); i < 3; i++ { - var sNew, sHist *sequenceDec - switch i { - default: - // same as "case 0": - sNew = &s.litLengths - sHist = &hist.litLengths - case 1: - sNew = &s.offsets - sHist = &hist.offsets - case 2: - sNew = &s.matchLengths - sHist = &hist.matchLengths - } - if sNew.repeat { - if sHist.fse == nil { - return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i) - } - continue - } - if sNew.fse == nil { - return nil, fmt.Errorf("sequence stream %d, no fse found", i) - } - if sHist.fse != nil && !sHist.fse.preDefined { - fseDecoderPool.Put(sHist.fse) - } - sHist.fse = sNew.fse - } - return hist, nil -} diff --git a/vendor/github.com/klauspost/compress/zstd/zstd.go b/vendor/github.com/klauspost/compress/zstd/zstd.go index ef1d49a009..0b0c2571dd 100644 --- a/vendor/github.com/klauspost/compress/zstd/zstd.go +++ b/vendor/github.com/klauspost/compress/zstd/zstd.go @@ -75,6 +75,10 @@ var ( // This is only returned if SingleSegment is specified on the frame. ErrFrameSizeExceeded = errors.New("frame size exceeded") + // ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size. + // This is only returned if SingleSegment is specified on the frame. + ErrFrameSizeMismatch = errors.New("frame size does not match size on stream") + // ErrCRCMismatch is returned if CRC mismatches. ErrCRCMismatch = errors.New("CRC check failed") diff --git a/vendor/github.com/mitchellh/hashstructure/LICENSE b/vendor/github.com/mitchellh/hashstructure/v2/LICENSE similarity index 100% rename from vendor/github.com/mitchellh/hashstructure/LICENSE rename to vendor/github.com/mitchellh/hashstructure/v2/LICENSE diff --git a/vendor/github.com/mitchellh/hashstructure/README.md b/vendor/github.com/mitchellh/hashstructure/v2/README.md similarity index 65% rename from vendor/github.com/mitchellh/hashstructure/README.md rename to vendor/github.com/mitchellh/hashstructure/v2/README.md index 28ce45a3e1..21f36be193 100644 --- a/vendor/github.com/mitchellh/hashstructure/README.md +++ b/vendor/github.com/mitchellh/hashstructure/v2/README.md @@ -17,20 +17,31 @@ sending data across the network, caching values locally (de-dup), and so on. doesn't affect the hash code but the field itself is still taken into account to create the hash value. - * Optionally specify a custom hash function to optimize for speed, collision + * Optionally, specify a custom hash function to optimize for speed, collision avoidance for your data set, etc. - - * Optionally hash the output of `.String()` on structs that implement fmt.Stringer, + + * Optionally, hash the output of `.String()` on structs that implement fmt.Stringer, allowing effective hashing of time.Time + * Optionally, override the hashing process by implementing `Hashable`. + ## Installation Standard `go get`: ``` -$ go get github.com/mitchellh/hashstructure +$ go get github.com/mitchellh/hashstructure/v2 ``` +**Note on v2:** It is highly recommended you use the "v2" release since this +fixes some significant hash collisions issues from v1. In practice, we used +v1 for many years in real projects at HashiCorp and never had issues, but it +is highly dependent on the shape of the data you're hashing and how you use +those hashes. + +When using v2+, you can still generate weaker v1 hashes by using the +`FormatV1` format when calling `Hash`. + ## Usage & Example For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure). @@ -54,7 +65,7 @@ v := ComplexStruct{ }, } -hash, err := hashstructure.Hash(v, nil) +hash, err := hashstructure.Hash(v, hashstructure.FormatV2, nil) if err != nil { panic(err) } diff --git a/vendor/github.com/mitchellh/hashstructure/v2/errors.go b/vendor/github.com/mitchellh/hashstructure/v2/errors.go new file mode 100644 index 0000000000..44b8951478 --- /dev/null +++ b/vendor/github.com/mitchellh/hashstructure/v2/errors.go @@ -0,0 +1,22 @@ +package hashstructure + +import ( + "fmt" +) + +// ErrNotStringer is returned when there's an error with hash:"string" +type ErrNotStringer struct { + Field string +} + +// Error implements error for ErrNotStringer +func (ens *ErrNotStringer) Error() string { + return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) +} + +// ErrFormat is returned when an invalid format is given to the Hash function. +type ErrFormat struct{} + +func (*ErrFormat) Error() string { + return "format must be one of the defined Format values in the hashstructure library" +} diff --git a/vendor/github.com/mitchellh/hashstructure/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/v2/hashstructure.go similarity index 63% rename from vendor/github.com/mitchellh/hashstructure/hashstructure.go rename to vendor/github.com/mitchellh/hashstructure/v2/hashstructure.go index ea13a1583c..3dc0eb74e0 100644 --- a/vendor/github.com/mitchellh/hashstructure/hashstructure.go +++ b/vendor/github.com/mitchellh/hashstructure/v2/hashstructure.go @@ -6,18 +6,9 @@ import ( "hash" "hash/fnv" "reflect" + "time" ) -// ErrNotStringer is returned when there's an error with hash:"string" -type ErrNotStringer struct { - Field string -} - -// Error implements error for ErrNotStringer -func (ens *ErrNotStringer) Error() string { - return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field) -} - // HashOptions are options that are available for hashing. type HashOptions struct { // Hasher is the hash function to use. If this isn't set, it will @@ -31,8 +22,42 @@ type HashOptions struct { // ZeroNil is flag determining if nil pointer should be treated equal // to a zero value of pointed type. By default this is false. ZeroNil bool + + // IgnoreZeroValue is determining if zero value fields should be + // ignored for hash calculation. + IgnoreZeroValue bool + + // SlicesAsSets assumes that a `set` tag is always present for slices. + // Default is false (in which case the tag is used instead) + SlicesAsSets bool + + // UseStringer will attempt to use fmt.Stringer always. If the struct + // doesn't implement fmt.Stringer, it'll fall back to trying usual tricks. + // If this is true, and the "string" tag is also set, the tag takes + // precedence (meaning that if the type doesn't implement fmt.Stringer, we + // panic) + UseStringer bool } +// Format specifies the hashing process used. Different formats typically +// generate different hashes for the same value and have different properties. +type Format uint + +const ( + // To disallow the zero value + formatInvalid Format = iota + + // FormatV1 is the format used in v1.x of this library. This has the + // downsides noted in issue #18 but allows simultaneous v1/v2 usage. + FormatV1 + + // FormatV2 is the current recommended format and fixes the issues + // noted in FormatV1. + FormatV2 + + formatMax // so we can easily find the end +) + // Hash returns the hash value of an arbitrary value. // // If opts is nil, then default options will be used. See HashOptions @@ -40,6 +65,11 @@ type HashOptions struct { // concurrently. None of the values within a *HashOptions struct are // safe to read/write while hashing is being done. // +// The "format" is required and must be one of the format values defined +// by this library. You should probably just use "FormatV2". This allows +// generated hashes uses alternate logic to maintain compatibility with +// older versions. +// // Notes on the value: // // * Unexported fields on structs are ignored and do not affect the @@ -65,7 +95,12 @@ type HashOptions struct { // * "string" - The field will be hashed as a string, only works when the // field implements fmt.Stringer // -func Hash(v interface{}, opts *HashOptions) (uint64, error) { +func Hash(v interface{}, format Format, opts *HashOptions) (uint64, error) { + // Validate our format + if format <= formatInvalid || format >= formatMax { + return 0, &ErrFormat{} + } + // Create default options if opts == nil { opts = &HashOptions{} @@ -82,17 +117,25 @@ func Hash(v interface{}, opts *HashOptions) (uint64, error) { // Create our walker and walk the structure w := &walker{ - h: opts.Hasher, - tag: opts.TagName, - zeronil: opts.ZeroNil, + format: format, + h: opts.Hasher, + tag: opts.TagName, + zeronil: opts.ZeroNil, + ignorezerovalue: opts.IgnoreZeroValue, + sets: opts.SlicesAsSets, + stringer: opts.UseStringer, } return w.visit(reflect.ValueOf(v), nil) } type walker struct { - h hash.Hash64 - tag string - zeronil bool + format Format + h hash.Hash64 + tag string + zeronil bool + ignorezerovalue bool + sets bool + stringer bool } type visitOpts struct { @@ -104,6 +147,8 @@ type visitOpts struct { StructField string } +var timeType = reflect.TypeOf(time.Time{}) + func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { t := reflect.TypeOf(0) @@ -159,6 +204,18 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { return w.h.Sum64(), err } + switch v.Type() { + case timeType: + w.h.Reset() + b, err := v.Interface().(time.Time).MarshalBinary() + if err != nil { + return 0, err + } + + err = binary.Write(w.h, binary.LittleEndian, b) + return w.h.Sum64(), err + } + switch k { case reflect.Array: var h uint64 @@ -211,6 +268,11 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { h = hashUpdateUnordered(h, fieldHash) } + if w.format != FormatV1 { + // Important: read the docs for hashFinishUnordered + h = hashFinishUnordered(w.h, h) + } + return h, nil case reflect.Struct: @@ -220,6 +282,24 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { include = impl } + if impl, ok := parent.(Hashable); ok { + return impl.Hash() + } + + // If we can address this value, check if the pointer value + // implements our interfaces and use that if so. + if v.CanAddr() { + vptr := v.Addr() + parentptr := vptr.Interface() + if impl, ok := parentptr.(Includable); ok { + include = impl + } + + if impl, ok := parentptr.(Hashable); ok { + return impl.Hash() + } + } + t := v.Type() h, err := w.visit(reflect.ValueOf(t.Name()), nil) if err != nil { @@ -242,11 +322,19 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { continue } + if w.ignorezerovalue { + if innerV.IsZero() { + continue + } + } + // if string is set, use the string value - if tag == "string" { + if tag == "string" || w.stringer { if impl, ok := innerV.Interface().(fmt.Stringer); ok { innerV = reflect.ValueOf(impl.String()) - } else { + } else if tag == "string" { + // We only show this error if the tag explicitly + // requests a stringer. return 0, &ErrNotStringer{ Field: v.Type().Field(i).Name, } @@ -286,6 +374,11 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { fieldHash := hashUpdateOrdered(w.h, kh, vh) h = hashUpdateUnordered(h, fieldHash) } + + if w.format != FormatV1 { + // Important: read the docs for hashFinishUnordered + h = hashFinishUnordered(w.h, h) + } } return h, nil @@ -306,13 +399,18 @@ func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) { return 0, err } - if set { + if set || w.sets { h = hashUpdateUnordered(h, current) } else { h = hashUpdateOrdered(w.h, h, current) } } + if set && w.format != FormatV1 { + // Important: read the docs for hashFinishUnordered + h = hashFinishUnordered(w.h, h) + } + return h, nil case reflect.String: @@ -349,6 +447,32 @@ func hashUpdateUnordered(a, b uint64) uint64 { return a ^ b } +// After mixing a group of unique hashes with hashUpdateUnordered, it's always +// necessary to call hashFinishUnordered. Why? Because hashUpdateUnordered +// is a simple XOR, and calling hashUpdateUnordered on hashes produced by +// hashUpdateUnordered can effectively cancel out a previous change to the hash +// result if the same hash value appears later on. For example, consider: +// +// hashUpdateUnordered(hashUpdateUnordered("A", "B"), hashUpdateUnordered("A", "C")) = +// H("A") ^ H("B")) ^ (H("A") ^ H("C")) = +// (H("A") ^ H("A")) ^ (H("B") ^ H(C)) = +// H(B) ^ H(C) = +// hashUpdateUnordered(hashUpdateUnordered("Z", "B"), hashUpdateUnordered("Z", "C")) +// +// hashFinishUnordered "hardens" the result, so that encountering partially +// overlapping input data later on in a different context won't cancel out. +func hashFinishUnordered(h hash.Hash64, a uint64) uint64 { + h.Reset() + + // We just panic if the writes fail + e1 := binary.Write(h, binary.LittleEndian, a) + if e1 != nil { + panic(e1) + } + + return h.Sum64() +} + // visitFlag is used as a bitmask for affecting visit behavior type visitFlag uint diff --git a/vendor/github.com/mitchellh/hashstructure/include.go b/vendor/github.com/mitchellh/hashstructure/v2/include.go similarity index 68% rename from vendor/github.com/mitchellh/hashstructure/include.go rename to vendor/github.com/mitchellh/hashstructure/v2/include.go index b6289c0bee..702d35415d 100644 --- a/vendor/github.com/mitchellh/hashstructure/include.go +++ b/vendor/github.com/mitchellh/hashstructure/v2/include.go @@ -13,3 +13,10 @@ type Includable interface { type IncludableMap interface { HashIncludeMap(field string, k, v interface{}) (bool, error) } + +// Hashable is an interface that can optionally be implemented by a struct +// to override the hash value. This value will override the hash value for +// the entire struct. Entries in the struct will not be hashed. +type Hashable interface { + Hash() (uint64, error) +} diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go index d020e8df77..939f2c2ca7 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.pb.go +++ b/vendor/github.com/moby/buildkit/api/services/control/control.pb.go @@ -205,13 +205,14 @@ type UsageRecord struct { Mutable bool `protobuf:"varint,2,opt,name=Mutable,proto3" json:"Mutable,omitempty"` InUse bool `protobuf:"varint,3,opt,name=InUse,proto3" json:"InUse,omitempty"` Size_ int64 `protobuf:"varint,4,opt,name=Size,proto3" json:"Size,omitempty"` - Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` + Parent string `protobuf:"bytes,5,opt,name=Parent,proto3" json:"Parent,omitempty"` // Deprecated: Do not use. CreatedAt time.Time `protobuf:"bytes,6,opt,name=CreatedAt,proto3,stdtime" json:"CreatedAt"` LastUsedAt *time.Time `protobuf:"bytes,7,opt,name=LastUsedAt,proto3,stdtime" json:"LastUsedAt,omitempty"` UsageCount int64 `protobuf:"varint,8,opt,name=UsageCount,proto3" json:"UsageCount,omitempty"` Description string `protobuf:"bytes,9,opt,name=Description,proto3" json:"Description,omitempty"` RecordType string `protobuf:"bytes,10,opt,name=RecordType,proto3" json:"RecordType,omitempty"` Shared bool `protobuf:"varint,11,opt,name=Shared,proto3" json:"Shared,omitempty"` + Parents []string `protobuf:"bytes,12,rep,name=Parents,proto3" json:"Parents,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -278,6 +279,7 @@ func (m *UsageRecord) GetSize_() int64 { return 0 } +// Deprecated: Do not use. func (m *UsageRecord) GetParent() string { if m != nil { return m.Parent @@ -327,6 +329,13 @@ func (m *UsageRecord) GetShared() bool { return false } +func (m *UsageRecord) GetParents() []string { + if m != nil { + return m.Parents + } + return nil +} + type SolveRequest struct { Ref string `protobuf:"bytes,1,opt,name=Ref,proto3" json:"Ref,omitempty"` Definition *pb.Definition `protobuf:"bytes,2,opt,name=Definition,proto3" json:"Definition,omitempty"` @@ -683,12 +692,13 @@ func (m *StatusRequest) GetRef() string { } type StatusResponse struct { - Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes,proto3" json:"vertexes,omitempty"` - Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses,proto3" json:"statuses,omitempty"` - Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs,proto3" json:"logs,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + Vertexes []*Vertex `protobuf:"bytes,1,rep,name=vertexes,proto3" json:"vertexes,omitempty"` + Statuses []*VertexStatus `protobuf:"bytes,2,rep,name=statuses,proto3" json:"statuses,omitempty"` + Logs []*VertexLog `protobuf:"bytes,3,rep,name=logs,proto3" json:"logs,omitempty"` + Warnings []*VertexWarning `protobuf:"bytes,4,rep,name=warnings,proto3" json:"warnings,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *StatusResponse) Reset() { *m = StatusResponse{} } @@ -745,6 +755,13 @@ func (m *StatusResponse) GetLogs() []*VertexLog { return nil } +func (m *StatusResponse) GetWarnings() []*VertexWarning { + if m != nil { + return m.Warnings + } + return nil +} + type Vertex struct { Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` Inputs []github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,rep,name=inputs,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"inputs"` @@ -753,6 +770,7 @@ type Vertex struct { Started *time.Time `protobuf:"bytes,5,opt,name=started,proto3,stdtime" json:"started,omitempty"` Completed *time.Time `protobuf:"bytes,6,opt,name=completed,proto3,stdtime" json:"completed,omitempty"` Error string `protobuf:"bytes,7,opt,name=error,proto3" json:"error,omitempty"` + ProgressGroup *pb.ProgressGroup `protobuf:"bytes,8,opt,name=progressGroup,proto3" json:"progressGroup,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -826,19 +844,25 @@ func (m *Vertex) GetError() string { return "" } +func (m *Vertex) GetProgressGroup() *pb.ProgressGroup { + if m != nil { + return m.ProgressGroup + } + return nil +} + type VertexStatus struct { - ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` - Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` - Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` - Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` - Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` - // TODO: add started, completed - Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` - Started *time.Time `protobuf:"bytes,7,opt,name=started,proto3,stdtime" json:"started,omitempty"` - Completed *time.Time `protobuf:"bytes,8,opt,name=completed,proto3,stdtime" json:"completed,omitempty"` - XXX_NoUnkeyedLiteral struct{} `json:"-"` - XXX_unrecognized []byte `json:"-"` - XXX_sizecache int32 `json:"-"` + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,2,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Name string `protobuf:"bytes,3,opt,name=name,proto3" json:"name,omitempty"` + Current int64 `protobuf:"varint,4,opt,name=current,proto3" json:"current,omitempty"` + Total int64 `protobuf:"varint,5,opt,name=total,proto3" json:"total,omitempty"` + Timestamp time.Time `protobuf:"bytes,6,opt,name=timestamp,proto3,stdtime" json:"timestamp"` + Started *time.Time `protobuf:"bytes,7,opt,name=started,proto3,stdtime" json:"started,omitempty"` + Completed *time.Time `protobuf:"bytes,8,opt,name=completed,proto3,stdtime" json:"completed,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` } func (m *VertexStatus) Reset() { *m = VertexStatus{} } @@ -987,6 +1011,94 @@ func (m *VertexLog) GetMsg() []byte { return nil } +type VertexWarning struct { + Vertex github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=vertex,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"vertex"` + Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` + Short []byte `protobuf:"bytes,3,opt,name=short,proto3" json:"short,omitempty"` + Detail [][]byte `protobuf:"bytes,4,rep,name=detail,proto3" json:"detail,omitempty"` + Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"` + Info *pb.SourceInfo `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` + Ranges []*pb.Range `protobuf:"bytes,7,rep,name=ranges,proto3" json:"ranges,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *VertexWarning) Reset() { *m = VertexWarning{} } +func (m *VertexWarning) String() string { return proto.CompactTextString(m) } +func (*VertexWarning) ProtoMessage() {} +func (*VertexWarning) Descriptor() ([]byte, []int) { + return fileDescriptor_0c5120591600887d, []int{13} +} +func (m *VertexWarning) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *VertexWarning) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_VertexWarning.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *VertexWarning) XXX_Merge(src proto.Message) { + xxx_messageInfo_VertexWarning.Merge(m, src) +} +func (m *VertexWarning) XXX_Size() int { + return m.Size() +} +func (m *VertexWarning) XXX_DiscardUnknown() { + xxx_messageInfo_VertexWarning.DiscardUnknown(m) +} + +var xxx_messageInfo_VertexWarning proto.InternalMessageInfo + +func (m *VertexWarning) GetLevel() int64 { + if m != nil { + return m.Level + } + return 0 +} + +func (m *VertexWarning) GetShort() []byte { + if m != nil { + return m.Short + } + return nil +} + +func (m *VertexWarning) GetDetail() [][]byte { + if m != nil { + return m.Detail + } + return nil +} + +func (m *VertexWarning) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *VertexWarning) GetInfo() *pb.SourceInfo { + if m != nil { + return m.Info + } + return nil +} + +func (m *VertexWarning) GetRanges() []*pb.Range { + if m != nil { + return m.Ranges + } + return nil +} + type BytesMessage struct { Data []byte `protobuf:"bytes,1,opt,name=data,proto3" json:"data,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` @@ -998,7 +1110,7 @@ func (m *BytesMessage) Reset() { *m = BytesMessage{} } func (m *BytesMessage) String() string { return proto.CompactTextString(m) } func (*BytesMessage) ProtoMessage() {} func (*BytesMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{13} + return fileDescriptor_0c5120591600887d, []int{14} } func (m *BytesMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1045,7 +1157,7 @@ func (m *ListWorkersRequest) Reset() { *m = ListWorkersRequest{} } func (m *ListWorkersRequest) String() string { return proto.CompactTextString(m) } func (*ListWorkersRequest) ProtoMessage() {} func (*ListWorkersRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{14} + return fileDescriptor_0c5120591600887d, []int{15} } func (m *ListWorkersRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1092,7 +1204,7 @@ func (m *ListWorkersResponse) Reset() { *m = ListWorkersResponse{} } func (m *ListWorkersResponse) String() string { return proto.CompactTextString(m) } func (*ListWorkersResponse) ProtoMessage() {} func (*ListWorkersResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_0c5120591600887d, []int{15} + return fileDescriptor_0c5120591600887d, []int{16} } func (m *ListWorkersResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1148,6 +1260,7 @@ func init() { proto.RegisterType((*Vertex)(nil), "moby.buildkit.v1.Vertex") proto.RegisterType((*VertexStatus)(nil), "moby.buildkit.v1.VertexStatus") proto.RegisterType((*VertexLog)(nil), "moby.buildkit.v1.VertexLog") + proto.RegisterType((*VertexWarning)(nil), "moby.buildkit.v1.VertexWarning") proto.RegisterType((*BytesMessage)(nil), "moby.buildkit.v1.BytesMessage") proto.RegisterType((*ListWorkersRequest)(nil), "moby.buildkit.v1.ListWorkersRequest") proto.RegisterType((*ListWorkersResponse)(nil), "moby.buildkit.v1.ListWorkersResponse") @@ -1156,95 +1269,104 @@ func init() { func init() { proto.RegisterFile("control.proto", fileDescriptor_0c5120591600887d) } var fileDescriptor_0c5120591600887d = []byte{ - // 1397 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x57, 0x4d, 0x6f, 0x1b, 0xc5, - 0x1b, 0xef, 0xda, 0xf1, 0xdb, 0x63, 0x27, 0x4a, 0xa7, 0xfd, 0x57, 0xab, 0xfd, 0x8b, 0xc4, 0x6c, - 0x8b, 0x64, 0x55, 0xed, 0x3a, 0x35, 0x14, 0x95, 0x08, 0x50, 0xeb, 0xb8, 0xa8, 0xa9, 0x1a, 0x51, - 0x36, 0x2d, 0x95, 0x7a, 0x40, 0x5a, 0xdb, 0x13, 0x77, 0x95, 0xf5, 0xce, 0x32, 0x33, 0x1b, 0x6a, - 0x3e, 0x00, 0x67, 0xbe, 0x03, 0x07, 0x4e, 0x9c, 0x38, 0xf0, 0x09, 0x90, 0x7a, 0xe4, 0xdc, 0x43, - 0x40, 0xb9, 0xc3, 0x9d, 0x1b, 0x9a, 0x97, 0x75, 0xc6, 0xb1, 0x9d, 0xc4, 0xe9, 0xc9, 0xf3, 0x8c, - 0x9f, 0xdf, 0x6f, 0x9f, 0xd7, 0x99, 0x79, 0x60, 0xb9, 0x47, 0x62, 0x4e, 0x49, 0xe4, 0x25, 0x94, - 0x70, 0x82, 0x56, 0x87, 0xa4, 0x3b, 0xf2, 0xba, 0x69, 0x18, 0xf5, 0xf7, 0x43, 0xee, 0x1d, 0xdc, - 0x71, 0x6e, 0x0f, 0x42, 0xfe, 0x2a, 0xed, 0x7a, 0x3d, 0x32, 0x6c, 0x0e, 0xc8, 0x80, 0x34, 0xa5, - 0x62, 0x37, 0xdd, 0x93, 0x92, 0x14, 0xe4, 0x4a, 0x11, 0x38, 0xeb, 0x03, 0x42, 0x06, 0x11, 0x3e, - 0xd6, 0xe2, 0xe1, 0x10, 0x33, 0x1e, 0x0c, 0x13, 0xad, 0x70, 0xcb, 0xe0, 0x13, 0x1f, 0x6b, 0x66, - 0x1f, 0x6b, 0x32, 0x12, 0x1d, 0x60, 0xda, 0x4c, 0xba, 0x4d, 0x92, 0x30, 0xad, 0xdd, 0x9c, 0xab, - 0x1d, 0x24, 0x61, 0x93, 0x8f, 0x12, 0xcc, 0x9a, 0xdf, 0x11, 0xba, 0x8f, 0xa9, 0x02, 0xb8, 0x3f, - 0x58, 0x50, 0x7b, 0x4a, 0xd3, 0x18, 0xfb, 0xf8, 0xdb, 0x14, 0x33, 0x8e, 0xae, 0x41, 0x71, 0x2f, - 0x8c, 0x38, 0xa6, 0xb6, 0x55, 0xcf, 0x37, 0x2a, 0xbe, 0x96, 0xd0, 0x2a, 0xe4, 0x83, 0x28, 0xb2, - 0x73, 0x75, 0xab, 0x51, 0xf6, 0xc5, 0x12, 0x35, 0xa0, 0xb6, 0x8f, 0x71, 0xd2, 0x49, 0x69, 0xc0, - 0x43, 0x12, 0xdb, 0xf9, 0xba, 0xd5, 0xc8, 0xb7, 0x97, 0xde, 0x1c, 0xae, 0x5b, 0xfe, 0xc4, 0x3f, - 0xc8, 0x85, 0x8a, 0x90, 0xdb, 0x23, 0x8e, 0x99, 0xbd, 0x64, 0xa8, 0x1d, 0x6f, 0xbb, 0x37, 0x61, - 0xb5, 0x13, 0xb2, 0xfd, 0xe7, 0x2c, 0x18, 0x9c, 0x65, 0x8b, 0xfb, 0x18, 0x2e, 0x1b, 0xba, 0x2c, - 0x21, 0x31, 0xc3, 0xe8, 0x2e, 0x14, 0x29, 0xee, 0x11, 0xda, 0x97, 0xca, 0xd5, 0xd6, 0x7b, 0xde, - 0xc9, 0xdc, 0x78, 0x1a, 0x20, 0x94, 0x7c, 0xad, 0xec, 0xfe, 0x9b, 0x83, 0xaa, 0xb1, 0x8f, 0x56, - 0x20, 0xb7, 0xdd, 0xb1, 0xad, 0xba, 0xd5, 0xa8, 0xf8, 0xb9, 0xed, 0x0e, 0xb2, 0xa1, 0xb4, 0x93, - 0xf2, 0xa0, 0x1b, 0x61, 0xed, 0x7b, 0x26, 0xa2, 0xab, 0x50, 0xd8, 0x8e, 0x9f, 0x33, 0x2c, 0x1d, - 0x2f, 0xfb, 0x4a, 0x40, 0x08, 0x96, 0x76, 0xc3, 0xef, 0xb1, 0x72, 0xd3, 0x97, 0x6b, 0xe1, 0xc7, - 0xd3, 0x80, 0xe2, 0x98, 0xdb, 0x05, 0xc9, 0xab, 0x25, 0xd4, 0x86, 0xca, 0x16, 0xc5, 0x01, 0xc7, - 0xfd, 0x07, 0xdc, 0x2e, 0xd6, 0xad, 0x46, 0xb5, 0xe5, 0x78, 0xaa, 0x20, 0xbc, 0xac, 0x20, 0xbc, - 0x67, 0x59, 0x41, 0xb4, 0xcb, 0x6f, 0x0e, 0xd7, 0x2f, 0xfd, 0xf8, 0xa7, 0x88, 0xdb, 0x18, 0x86, - 0xee, 0x03, 0x3c, 0x09, 0x18, 0x7f, 0xce, 0x24, 0x49, 0xe9, 0x4c, 0x92, 0x25, 0x49, 0x60, 0x60, - 0xd0, 0x1a, 0x80, 0x0c, 0xc0, 0x16, 0x49, 0x63, 0x6e, 0x97, 0xa5, 0xdd, 0xc6, 0x0e, 0xaa, 0x43, - 0xb5, 0x83, 0x59, 0x8f, 0x86, 0x89, 0x4c, 0x73, 0x45, 0xba, 0x60, 0x6e, 0x09, 0x06, 0x15, 0xbd, - 0x67, 0xa3, 0x04, 0xdb, 0x20, 0x15, 0x8c, 0x1d, 0xe1, 0xff, 0xee, 0xab, 0x80, 0xe2, 0xbe, 0x5d, - 0x95, 0xa1, 0xd2, 0x92, 0xfb, 0x53, 0x11, 0x6a, 0xbb, 0xa2, 0x8a, 0xb3, 0x84, 0xaf, 0x42, 0xde, - 0xc7, 0x7b, 0x3a, 0xfa, 0x62, 0x89, 0x3c, 0x80, 0x0e, 0xde, 0x0b, 0xe3, 0x50, 0x7e, 0x3b, 0x27, - 0xdd, 0x5b, 0xf1, 0x92, 0xae, 0x77, 0xbc, 0xeb, 0x1b, 0x1a, 0xc8, 0x81, 0xf2, 0xc3, 0xd7, 0x09, - 0xa1, 0xa2, 0x68, 0xf2, 0x92, 0x66, 0x2c, 0xa3, 0x17, 0xb0, 0x9c, 0xad, 0x1f, 0x70, 0x4e, 0x45, - 0x29, 0x8a, 0x42, 0xb9, 0x33, 0x5d, 0x28, 0xa6, 0x51, 0xde, 0x04, 0xe6, 0x61, 0xcc, 0xe9, 0xc8, - 0x9f, 0xe4, 0x11, 0x35, 0xb2, 0x8b, 0x19, 0x13, 0x16, 0xaa, 0x04, 0x67, 0xa2, 0x30, 0xe7, 0x0b, - 0x4a, 0x62, 0x8e, 0xe3, 0xbe, 0x4c, 0x70, 0xc5, 0x1f, 0xcb, 0xc2, 0x9c, 0x6c, 0xad, 0xcc, 0x29, - 0x9d, 0xcb, 0x9c, 0x09, 0x8c, 0x36, 0x67, 0x62, 0x0f, 0x6d, 0x42, 0x61, 0x2b, 0xe8, 0xbd, 0xc2, - 0x32, 0x97, 0xd5, 0xd6, 0xda, 0x34, 0xa1, 0xfc, 0xfb, 0x4b, 0x99, 0x3c, 0x26, 0x5b, 0xf1, 0x92, - 0xaf, 0x20, 0xe8, 0x1b, 0xa8, 0x3d, 0x8c, 0x79, 0xc8, 0x23, 0x3c, 0xc4, 0x31, 0x67, 0x76, 0x45, - 0x34, 0x5e, 0x7b, 0xf3, 0xed, 0xe1, 0xfa, 0xc7, 0x73, 0x8f, 0x96, 0x94, 0x87, 0x51, 0x13, 0x1b, - 0x28, 0xcf, 0xa0, 0xf0, 0x27, 0xf8, 0xd0, 0x4b, 0x58, 0xc9, 0x8c, 0xdd, 0x8e, 0x93, 0x94, 0x33, - 0x1b, 0xa4, 0xd7, 0xad, 0x73, 0x7a, 0xad, 0x40, 0xca, 0xed, 0x13, 0x4c, 0xce, 0x7d, 0x40, 0xd3, - 0xb9, 0x12, 0x35, 0xb5, 0x8f, 0x47, 0x59, 0x4d, 0xed, 0xe3, 0x91, 0x68, 0xdc, 0x83, 0x20, 0x4a, - 0x55, 0x43, 0x57, 0x7c, 0x25, 0x6c, 0xe6, 0xee, 0x59, 0x82, 0x61, 0x3a, 0xbc, 0x0b, 0x31, 0x7c, - 0x05, 0x57, 0x66, 0x98, 0x3a, 0x83, 0xe2, 0x86, 0x49, 0x31, 0x5d, 0xd3, 0xc7, 0x94, 0xee, 0x2f, - 0x79, 0xa8, 0x99, 0x09, 0x43, 0x1b, 0x70, 0x45, 0xf9, 0xe9, 0xe3, 0xbd, 0x0e, 0x4e, 0x28, 0xee, - 0x89, 0xb3, 0x40, 0x93, 0xcf, 0xfa, 0x0b, 0xb5, 0xe0, 0xea, 0xf6, 0x50, 0x6f, 0x33, 0x03, 0x92, - 0x93, 0xc7, 0xea, 0xcc, 0xff, 0x10, 0x81, 0xff, 0x29, 0x2a, 0x19, 0x09, 0x03, 0x94, 0x97, 0x09, - 0xfb, 0xe4, 0xf4, 0xaa, 0xf2, 0x66, 0x62, 0x55, 0xde, 0x66, 0xf3, 0xa2, 0xcf, 0xa0, 0xa4, 0xfe, - 0xc8, 0x1a, 0xf3, 0xfa, 0xe9, 0x9f, 0x50, 0x64, 0x19, 0x46, 0xc0, 0x95, 0x1f, 0xcc, 0x2e, 0x2c, - 0x00, 0xd7, 0x18, 0xe7, 0x11, 0x38, 0xf3, 0x4d, 0x5e, 0xa4, 0x04, 0xdc, 0x9f, 0x2d, 0xb8, 0x3c, - 0xf5, 0x21, 0x71, 0x2f, 0xc8, 0xd3, 0x51, 0x51, 0xc8, 0x35, 0xea, 0x40, 0x41, 0x75, 0x7e, 0x4e, - 0x1a, 0xec, 0x9d, 0xc3, 0x60, 0xcf, 0x68, 0x7b, 0x05, 0x76, 0xee, 0x01, 0x5c, 0xac, 0x58, 0xdd, - 0xdf, 0x2c, 0x58, 0xd6, 0x5d, 0xa6, 0x2f, 0xd1, 0x00, 0x56, 0xb3, 0x16, 0xca, 0xf6, 0xf4, 0x75, - 0x7a, 0x77, 0x6e, 0x83, 0x2a, 0x35, 0xef, 0x24, 0x4e, 0xd9, 0x38, 0x45, 0xe7, 0x6c, 0x65, 0x75, - 0x75, 0x42, 0x75, 0x21, 0xcb, 0xdf, 0x87, 0xe5, 0x5d, 0x1e, 0xf0, 0x94, 0xcd, 0xbd, 0x39, 0xdc, - 0x5f, 0x2d, 0x58, 0xc9, 0x74, 0xb4, 0x77, 0x1f, 0x41, 0xf9, 0x00, 0x53, 0x8e, 0x5f, 0x63, 0xa6, - 0xbd, 0xb2, 0xa7, 0xbd, 0xfa, 0x5a, 0x6a, 0xf8, 0x63, 0x4d, 0xb4, 0x09, 0x65, 0x26, 0x79, 0x70, - 0x96, 0xa8, 0xb5, 0x79, 0x28, 0xfd, 0xbd, 0xb1, 0x3e, 0x6a, 0xc2, 0x52, 0x44, 0x06, 0x4c, 0xf7, - 0xcc, 0xff, 0xe7, 0xe1, 0x9e, 0x90, 0x81, 0x2f, 0x15, 0xdd, 0xc3, 0x1c, 0x14, 0xd5, 0x1e, 0x7a, - 0x0c, 0xc5, 0x7e, 0x38, 0xc0, 0x8c, 0x2b, 0xaf, 0xda, 0x2d, 0x71, 0x4e, 0xbf, 0x3d, 0x5c, 0xbf, - 0x69, 0x1c, 0xc4, 0x24, 0xc1, 0xb1, 0x78, 0x91, 0x06, 0x61, 0x8c, 0x29, 0x6b, 0x0e, 0xc8, 0x6d, - 0x05, 0xf1, 0x3a, 0xf2, 0xc7, 0xd7, 0x0c, 0x82, 0x2b, 0x54, 0xc7, 0xad, 0x6c, 0xf9, 0x8b, 0x71, - 0x29, 0x06, 0x51, 0xc9, 0x71, 0x30, 0xc4, 0xfa, 0x7a, 0x95, 0x6b, 0x71, 0xc3, 0xf7, 0x44, 0xa9, - 0xf6, 0xe5, 0xbb, 0xa7, 0xec, 0x6b, 0x09, 0x6d, 0x42, 0x89, 0xf1, 0x80, 0x8a, 0x63, 0xa3, 0x70, - 0xce, 0xa7, 0x49, 0x06, 0x40, 0x9f, 0x43, 0xa5, 0x47, 0x86, 0x49, 0x84, 0x05, 0xba, 0x78, 0x4e, - 0xf4, 0x31, 0x44, 0x54, 0x0f, 0xa6, 0x94, 0x50, 0xf9, 0x28, 0xaa, 0xf8, 0x4a, 0x70, 0xff, 0xc9, - 0x41, 0xcd, 0x4c, 0xd6, 0xd4, 0x83, 0xef, 0x31, 0x14, 0x55, 0xea, 0x55, 0xd5, 0x5d, 0x2c, 0x54, - 0x8a, 0x61, 0x66, 0xa8, 0x6c, 0x28, 0xf5, 0x52, 0x2a, 0x5f, 0x83, 0xea, 0x8d, 0x98, 0x89, 0xc2, - 0x60, 0x4e, 0x78, 0x10, 0xc9, 0x50, 0xe5, 0x7d, 0x25, 0x88, 0x47, 0xe2, 0x78, 0x26, 0x58, 0xec, - 0x91, 0x38, 0x86, 0x99, 0x69, 0x28, 0xbd, 0x53, 0x1a, 0xca, 0x0b, 0xa7, 0xc1, 0xfd, 0xdd, 0x82, - 0xca, 0xb8, 0xca, 0x8d, 0xe8, 0x5a, 0xef, 0x1c, 0xdd, 0x89, 0xc8, 0xe4, 0x2e, 0x16, 0x99, 0x6b, - 0x50, 0x64, 0x9c, 0xe2, 0x60, 0xa8, 0xc6, 0x17, 0x5f, 0x4b, 0xe2, 0x3c, 0x19, 0xb2, 0x81, 0xcc, - 0x50, 0xcd, 0x17, 0x4b, 0xd7, 0x85, 0x9a, 0x9c, 0x54, 0x76, 0x30, 0x13, 0x6f, 0x63, 0x91, 0xdb, - 0x7e, 0xc0, 0x03, 0xe9, 0x47, 0xcd, 0x97, 0x6b, 0xf7, 0x16, 0xa0, 0x27, 0x21, 0xe3, 0x2f, 0xe4, - 0x84, 0xc5, 0xce, 0x1a, 0x63, 0x76, 0xe1, 0xca, 0x84, 0xb6, 0x3e, 0xa5, 0x3e, 0x3d, 0x31, 0xc8, - 0xdc, 0x98, 0x3e, 0x35, 0xe4, 0x20, 0xe7, 0x29, 0xe0, 0xe4, 0x3c, 0xd3, 0xfa, 0x3b, 0x0f, 0xa5, - 0x2d, 0x35, 0xa3, 0xa2, 0x67, 0x50, 0x19, 0xcf, 0x49, 0xc8, 0x9d, 0xa6, 0x39, 0x39, 0x70, 0x39, - 0xd7, 0x4f, 0xd5, 0xd1, 0xf6, 0x3d, 0x82, 0x82, 0x9c, 0x18, 0xd1, 0x8c, 0x63, 0xd0, 0x1c, 0x25, - 0x9d, 0xd3, 0x27, 0xb0, 0x0d, 0x4b, 0x30, 0xc9, 0x3b, 0x64, 0x16, 0x93, 0xf9, 0xfa, 0x73, 0xd6, - 0xcf, 0xb8, 0x7c, 0xd0, 0x0e, 0x14, 0x75, 0x3b, 0xcf, 0x52, 0x35, 0x6f, 0x0a, 0xa7, 0x3e, 0x5f, - 0x41, 0x91, 0x6d, 0x58, 0x68, 0x67, 0xfc, 0xa0, 0x9f, 0x65, 0x9a, 0x59, 0x06, 0xce, 0x19, 0xff, - 0x37, 0xac, 0x0d, 0x0b, 0xbd, 0x84, 0xaa, 0x91, 0x68, 0x34, 0x23, 0xa1, 0xd3, 0x55, 0xe3, 0x7c, - 0x70, 0x86, 0x96, 0x32, 0xb6, 0x5d, 0x7b, 0x73, 0xb4, 0x66, 0xfd, 0x71, 0xb4, 0x66, 0xfd, 0x75, - 0xb4, 0x66, 0x75, 0x8b, 0xb2, 0xee, 0x3f, 0xfc, 0x2f, 0x00, 0x00, 0xff, 0xff, 0xfb, 0x65, 0x7c, - 0xd6, 0xa7, 0x10, 0x00, 0x00, + // 1543 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0xcd, 0x6f, 0x1b, 0x45, + 0x14, 0xef, 0xda, 0xf1, 0xd7, 0x8b, 0x13, 0xa5, 0xd3, 0x52, 0xad, 0x16, 0x91, 0xa4, 0xdb, 0x22, + 0x45, 0x55, 0xbb, 0x4e, 0x03, 0x85, 0x12, 0x3e, 0xd4, 0x3a, 0x2e, 0x34, 0x55, 0x23, 0xca, 0xa4, + 0xa5, 0x52, 0x0f, 0x48, 0x6b, 0x7b, 0xbc, 0x59, 0x65, 0xbd, 0xb3, 0xcc, 0xcc, 0xa6, 0x35, 0x7f, + 0x00, 0x67, 0x6e, 0xfc, 0x01, 0x1c, 0x38, 0x71, 0xe6, 0x2f, 0x40, 0xea, 0x91, 0x73, 0x0f, 0x01, + 0xf5, 0x0e, 0xe2, 0xc8, 0x11, 0xcd, 0xc7, 0x3a, 0xeb, 0xd8, 0xce, 0x57, 0x39, 0x79, 0xde, 0xcc, + 0x7b, 0xbf, 0x7d, 0x9f, 0x33, 0xef, 0x19, 0xe6, 0x3a, 0x34, 0x16, 0x8c, 0x46, 0x5e, 0xc2, 0xa8, + 0xa0, 0x68, 0xa1, 0x4f, 0xdb, 0x03, 0xaf, 0x9d, 0x86, 0x51, 0x77, 0x37, 0x14, 0xde, 0xde, 0x4d, + 0xe7, 0x46, 0x10, 0x8a, 0x9d, 0xb4, 0xed, 0x75, 0x68, 0xbf, 0x11, 0xd0, 0x80, 0x36, 0x14, 0x63, + 0x3b, 0xed, 0x29, 0x4a, 0x11, 0x6a, 0xa5, 0x01, 0x9c, 0xa5, 0x80, 0xd2, 0x20, 0x22, 0x07, 0x5c, + 0x22, 0xec, 0x13, 0x2e, 0xfc, 0x7e, 0x62, 0x18, 0xae, 0xe7, 0xf0, 0xe4, 0xc7, 0x1a, 0xd9, 0xc7, + 0x1a, 0x9c, 0x46, 0x7b, 0x84, 0x35, 0x92, 0x76, 0x83, 0x26, 0xdc, 0x70, 0x37, 0xa6, 0x72, 0xfb, + 0x49, 0xd8, 0x10, 0x83, 0x84, 0xf0, 0xc6, 0x73, 0xca, 0x76, 0x09, 0xd3, 0x02, 0xee, 0xf7, 0x16, + 0xd4, 0x1f, 0xb1, 0x34, 0x26, 0x98, 0x7c, 0x9b, 0x12, 0x2e, 0xd0, 0x25, 0x28, 0xf7, 0xc2, 0x48, + 0x10, 0x66, 0x5b, 0xcb, 0xc5, 0x95, 0x1a, 0x36, 0x14, 0x5a, 0x80, 0xa2, 0x1f, 0x45, 0x76, 0x61, + 0xd9, 0x5a, 0xa9, 0x62, 0xb9, 0x44, 0x2b, 0x50, 0xdf, 0x25, 0x24, 0x69, 0xa5, 0xcc, 0x17, 0x21, + 0x8d, 0xed, 0xe2, 0xb2, 0xb5, 0x52, 0x6c, 0xce, 0xbc, 0xdc, 0x5f, 0xb2, 0xf0, 0xc8, 0x09, 0x72, + 0xa1, 0x26, 0xe9, 0xe6, 0x40, 0x10, 0x6e, 0xcf, 0xe4, 0xd8, 0x0e, 0xb6, 0xdd, 0x6b, 0xb0, 0xd0, + 0x0a, 0xf9, 0xee, 0x13, 0xee, 0x07, 0xc7, 0xe9, 0xe2, 0x3e, 0x80, 0xf3, 0x39, 0x5e, 0x9e, 0xd0, + 0x98, 0x13, 0x74, 0x0b, 0xca, 0x8c, 0x74, 0x28, 0xeb, 0x2a, 0xe6, 0xd9, 0xb5, 0x77, 0xbc, 0xc3, + 0xb1, 0xf1, 0x8c, 0x80, 0x64, 0xc2, 0x86, 0xd9, 0xfd, 0xb1, 0x08, 0xb3, 0xb9, 0x7d, 0x34, 0x0f, + 0x85, 0xcd, 0x96, 0x6d, 0x2d, 0x5b, 0x2b, 0x35, 0x5c, 0xd8, 0x6c, 0x21, 0x1b, 0x2a, 0x5b, 0xa9, + 0xf0, 0xdb, 0x11, 0x31, 0xb6, 0x67, 0x24, 0xba, 0x08, 0xa5, 0xcd, 0xf8, 0x09, 0x27, 0xca, 0xf0, + 0x2a, 0xd6, 0x04, 0x42, 0x30, 0xb3, 0x1d, 0x7e, 0x47, 0xb4, 0x99, 0x58, 0xad, 0x91, 0x03, 0xe5, + 0x47, 0x3e, 0x23, 0xb1, 0xb0, 0x4b, 0x12, 0xb7, 0x59, 0xb0, 0x2d, 0x6c, 0x76, 0x50, 0x13, 0x6a, + 0x1b, 0x8c, 0xf8, 0x82, 0x74, 0xef, 0x0a, 0xbb, 0xbc, 0x6c, 0xad, 0xcc, 0xae, 0x39, 0x9e, 0x4e, + 0x0a, 0x2f, 0x4b, 0x0a, 0xef, 0x71, 0x96, 0x14, 0xcd, 0xea, 0xcb, 0xfd, 0xa5, 0x73, 0x3f, 0xfc, + 0x21, 0x7d, 0x37, 0x14, 0x43, 0x77, 0x00, 0x1e, 0xfa, 0x5c, 0x3c, 0xe1, 0x0a, 0xa4, 0x72, 0x2c, + 0xc8, 0x8c, 0x02, 0xc8, 0xc9, 0xa0, 0x45, 0x00, 0xe5, 0x84, 0x0d, 0x9a, 0xc6, 0xc2, 0xae, 0x2a, + 0xdd, 0x73, 0x3b, 0x68, 0x19, 0x66, 0x5b, 0x84, 0x77, 0x58, 0x98, 0xa8, 0x50, 0xd7, 0x94, 0x7b, + 0xf2, 0x5b, 0x12, 0x41, 0x7b, 0xf0, 0xf1, 0x20, 0x21, 0x36, 0x28, 0x86, 0xdc, 0x8e, 0x8c, 0xe5, + 0xf6, 0x8e, 0xcf, 0x48, 0xd7, 0x9e, 0x55, 0xee, 0x32, 0x94, 0xf4, 0xaf, 0xf6, 0x04, 0xb7, 0xeb, + 0x2a, 0xc8, 0x19, 0xe9, 0xfe, 0x54, 0x86, 0xfa, 0xb6, 0xcc, 0xf1, 0x2c, 0x1d, 0x16, 0xa0, 0x88, + 0x49, 0xcf, 0xc4, 0x46, 0x2e, 0x91, 0x07, 0xd0, 0x22, 0xbd, 0x30, 0x0e, 0x95, 0x56, 0x05, 0x65, + 0xf8, 0xbc, 0x97, 0xb4, 0xbd, 0x83, 0x5d, 0x9c, 0xe3, 0x40, 0x0e, 0x54, 0xef, 0xbd, 0x48, 0x28, + 0x93, 0x29, 0x55, 0x54, 0x30, 0x43, 0x1a, 0x3d, 0x85, 0xb9, 0x6c, 0x7d, 0x57, 0x08, 0x26, 0x13, + 0x55, 0xa6, 0xd1, 0xcd, 0xf1, 0x34, 0xca, 0x2b, 0xe5, 0x8d, 0xc8, 0xdc, 0x8b, 0x05, 0x1b, 0xe0, + 0x51, 0x1c, 0x69, 0xe1, 0x36, 0xe1, 0x5c, 0x6a, 0xa8, 0xc2, 0x8f, 0x33, 0x52, 0xaa, 0xf3, 0x39, + 0xa3, 0xb1, 0x20, 0x71, 0x57, 0x85, 0xbe, 0x86, 0x87, 0xb4, 0x54, 0x27, 0x5b, 0x6b, 0x75, 0x2a, + 0x27, 0x52, 0x67, 0x44, 0xc6, 0xa8, 0x33, 0xb2, 0x87, 0xd6, 0xa1, 0xb4, 0xe1, 0x77, 0x76, 0x88, + 0x8a, 0xf2, 0xec, 0xda, 0xe2, 0x38, 0xa0, 0x3a, 0xfe, 0x52, 0x85, 0x95, 0xab, 0x42, 0x3d, 0x87, + 0xb5, 0x08, 0xfa, 0x06, 0xea, 0xf7, 0x62, 0x11, 0x8a, 0x88, 0xf4, 0x55, 0xc4, 0x6a, 0x32, 0x62, + 0xcd, 0xf5, 0x57, 0xfb, 0x4b, 0x1f, 0x4c, 0xbd, 0x78, 0x52, 0x11, 0x46, 0x0d, 0x92, 0x93, 0xf2, + 0x72, 0x10, 0x78, 0x04, 0x0f, 0x3d, 0x83, 0xf9, 0x4c, 0xd9, 0xcd, 0x38, 0x49, 0x05, 0xb7, 0x41, + 0x59, 0xbd, 0x76, 0x42, 0xab, 0xb5, 0x90, 0x36, 0xfb, 0x10, 0x92, 0x73, 0x07, 0xd0, 0x78, 0xac, + 0x64, 0x4e, 0xed, 0x92, 0x41, 0x96, 0x53, 0xbb, 0x64, 0x20, 0xcb, 0x7a, 0xcf, 0x8f, 0x52, 0x5d, + 0xee, 0x35, 0xac, 0x89, 0xf5, 0xc2, 0x6d, 0x4b, 0x22, 0x8c, 0xbb, 0xf7, 0x54, 0x08, 0x5f, 0xc1, + 0x85, 0x09, 0xaa, 0x4e, 0x80, 0xb8, 0x9a, 0x87, 0x18, 0xcf, 0xe9, 0x03, 0x48, 0xf7, 0x97, 0x22, + 0xd4, 0xf3, 0x01, 0x43, 0xab, 0x70, 0x41, 0xdb, 0x89, 0x49, 0xaf, 0x45, 0x12, 0x46, 0x3a, 0xf2, + 0x96, 0x30, 0xe0, 0x93, 0x8e, 0xd0, 0x1a, 0x5c, 0xdc, 0xec, 0x9b, 0x6d, 0x9e, 0x13, 0x29, 0xa8, + 0x7a, 0x9c, 0x78, 0x86, 0x28, 0xbc, 0xa5, 0xa1, 0x94, 0x27, 0x72, 0x42, 0x45, 0x15, 0xb0, 0x8f, + 0x8e, 0xce, 0x2a, 0x6f, 0xa2, 0xac, 0x8e, 0xdb, 0x64, 0x5c, 0xf4, 0x29, 0x54, 0xf4, 0x41, 0x56, + 0x98, 0x57, 0x8e, 0xfe, 0x84, 0x06, 0xcb, 0x64, 0xa4, 0xb8, 0xb6, 0x83, 0xdb, 0xa5, 0x53, 0x88, + 0x1b, 0x19, 0xe7, 0x3e, 0x38, 0xd3, 0x55, 0x3e, 0x4d, 0x0a, 0xb8, 0x3f, 0x5b, 0x70, 0x7e, 0xec, + 0x43, 0xf2, 0xd5, 0x50, 0xf7, 0xa6, 0x86, 0x50, 0x6b, 0xd4, 0x82, 0x92, 0xae, 0xfc, 0x82, 0x52, + 0xd8, 0x3b, 0x81, 0xc2, 0x5e, 0xae, 0xec, 0xb5, 0xb0, 0x73, 0x1b, 0xe0, 0x6c, 0xc9, 0xea, 0xfe, + 0x6a, 0xc1, 0x9c, 0xa9, 0x32, 0xf3, 0xc4, 0xfa, 0xb0, 0x90, 0x95, 0x50, 0xb6, 0x67, 0x1e, 0xdb, + 0x5b, 0x53, 0x0b, 0x54, 0xb3, 0x79, 0x87, 0xe5, 0xb4, 0x8e, 0x63, 0x70, 0xce, 0x46, 0x96, 0x57, + 0x87, 0x58, 0x4f, 0xa5, 0xf9, 0x65, 0x98, 0xdb, 0x16, 0xbe, 0x48, 0xf9, 0xd4, 0x97, 0xc3, 0xfd, + 0xc7, 0x82, 0xf9, 0x8c, 0xc7, 0x58, 0xf7, 0x3e, 0x54, 0xf7, 0x08, 0x13, 0xe4, 0x05, 0xe1, 0xc6, + 0x2a, 0x7b, 0xdc, 0xaa, 0xaf, 0x15, 0x07, 0x1e, 0x72, 0xa2, 0x75, 0xa8, 0x72, 0x85, 0x43, 0xb2, + 0x40, 0x2d, 0x4e, 0x93, 0x32, 0xdf, 0x1b, 0xf2, 0xa3, 0x06, 0xcc, 0x44, 0x34, 0xe0, 0xa6, 0x66, + 0xde, 0x9e, 0x26, 0xf7, 0x90, 0x06, 0x58, 0x31, 0xa2, 0x8f, 0xa1, 0xfa, 0xdc, 0x67, 0x71, 0x18, + 0x07, 0x59, 0x15, 0x2c, 0x4d, 0x13, 0x7a, 0xaa, 0xf9, 0xf0, 0x50, 0x40, 0x76, 0x3a, 0x65, 0x7d, + 0x86, 0x1e, 0x40, 0xb9, 0x1b, 0x06, 0x84, 0x0b, 0xed, 0x92, 0xe6, 0x9a, 0xbc, 0xe4, 0x5f, 0xed, + 0x2f, 0x5d, 0xcb, 0xdd, 0xe2, 0x34, 0x21, 0xb1, 0x6c, 0x76, 0xfd, 0x30, 0x26, 0x8c, 0x37, 0x02, + 0x7a, 0x43, 0x8b, 0x78, 0x2d, 0xf5, 0x83, 0x0d, 0x82, 0xc4, 0x0a, 0xf5, 0x5d, 0xad, 0xee, 0x8b, + 0xb3, 0x61, 0x69, 0x04, 0x59, 0x06, 0xb1, 0xdf, 0x27, 0xe6, 0x6d, 0x56, 0x6b, 0xd9, 0x38, 0x74, + 0x64, 0x9e, 0x77, 0x55, 0x4b, 0x55, 0xc5, 0x86, 0x42, 0xeb, 0x50, 0xe1, 0xc2, 0x67, 0xf2, 0xce, + 0x29, 0x9d, 0xb0, 0xe3, 0xc9, 0x04, 0xd0, 0x67, 0x50, 0xeb, 0xd0, 0x7e, 0x12, 0x11, 0x29, 0x5d, + 0x3e, 0xa1, 0xf4, 0x81, 0x88, 0x4c, 0x3d, 0xc2, 0x18, 0x65, 0xaa, 0xd7, 0xaa, 0x61, 0x4d, 0xa0, + 0x0f, 0x61, 0x2e, 0x61, 0x34, 0x60, 0x84, 0xf3, 0x2f, 0x18, 0x4d, 0x13, 0xf3, 0xc2, 0x9e, 0x97, + 0x97, 0xf7, 0xa3, 0xfc, 0x01, 0x1e, 0xe5, 0x73, 0xff, 0x2e, 0x40, 0x3d, 0x9f, 0x22, 0x63, 0x4d, + 0xe8, 0x03, 0x28, 0xeb, 0x84, 0xd3, 0xb9, 0x7e, 0x36, 0x1f, 0x6b, 0x84, 0x89, 0x3e, 0xb6, 0xa1, + 0xd2, 0x49, 0x99, 0xea, 0x50, 0x75, 0xdf, 0x9a, 0x91, 0xd2, 0x52, 0x41, 0x85, 0x1f, 0x29, 0x1f, + 0x17, 0xb1, 0x26, 0x64, 0xd3, 0x3a, 0x9c, 0x53, 0x4e, 0xd7, 0xb4, 0x0e, 0xc5, 0xf2, 0xf1, 0xab, + 0xbc, 0x51, 0xfc, 0xaa, 0xa7, 0x8e, 0x9f, 0xfb, 0x9b, 0x05, 0xb5, 0x61, 0x6d, 0xe5, 0xbc, 0x6b, + 0xbd, 0xb1, 0x77, 0x47, 0x3c, 0x53, 0x38, 0x9b, 0x67, 0x2e, 0x41, 0x99, 0x0b, 0x46, 0xfc, 0xbe, + 0x1e, 0xa9, 0xb0, 0xa1, 0xe4, 0x2d, 0xd6, 0xe7, 0x81, 0x8a, 0x50, 0x1d, 0xcb, 0xa5, 0xfb, 0xaf, + 0x05, 0x73, 0x23, 0xe5, 0xfe, 0xbf, 0xda, 0x72, 0x11, 0x4a, 0x11, 0xd9, 0x23, 0x7a, 0xe8, 0x2b, + 0x62, 0x4d, 0xc8, 0x5d, 0xbe, 0x43, 0x99, 0x50, 0xca, 0xd5, 0xb1, 0x26, 0xa4, 0xce, 0x5d, 0x22, + 0xfc, 0x30, 0x52, 0xf7, 0x52, 0x1d, 0x1b, 0x4a, 0xea, 0x9c, 0xb2, 0xc8, 0x34, 0xbe, 0x72, 0x89, + 0x5c, 0x98, 0x09, 0xe3, 0x1e, 0x35, 0x69, 0xa3, 0x3a, 0x9b, 0x6d, 0x9a, 0xb2, 0x0e, 0xd9, 0x8c, + 0x7b, 0x14, 0xab, 0x33, 0x74, 0x19, 0xca, 0xcc, 0x8f, 0x03, 0x92, 0x75, 0xbd, 0x35, 0xc9, 0x85, + 0xe5, 0x0e, 0x36, 0x07, 0xae, 0x0b, 0x75, 0x35, 0x38, 0x6e, 0x11, 0x2e, 0xc7, 0x14, 0x99, 0xd6, + 0x5d, 0x5f, 0xf8, 0xca, 0xec, 0x3a, 0x56, 0x6b, 0xf7, 0x3a, 0xa0, 0x87, 0x21, 0x17, 0x4f, 0xd5, + 0xc0, 0xcb, 0x8f, 0x9b, 0x2a, 0xb7, 0xe1, 0xc2, 0x08, 0xb7, 0x79, 0x16, 0x3e, 0x39, 0x34, 0x57, + 0x5e, 0x1d, 0xbf, 0x71, 0xd5, 0x5c, 0xed, 0x69, 0xc1, 0xd1, 0xf1, 0x72, 0xed, 0xaf, 0x22, 0x54, + 0x36, 0xf4, 0x5f, 0x06, 0xe8, 0x31, 0xd4, 0x86, 0x63, 0x2b, 0x72, 0xc7, 0x61, 0x0e, 0xcf, 0xbf, + 0xce, 0x95, 0x23, 0x79, 0x8c, 0x7e, 0xf7, 0xa1, 0xa4, 0x06, 0x78, 0x34, 0xe1, 0xdd, 0xc9, 0x4f, + 0xf6, 0xce, 0xd1, 0x03, 0xf1, 0xaa, 0x25, 0x91, 0xd4, 0xa3, 0x3d, 0x09, 0x29, 0xdf, 0x6e, 0x3b, + 0x4b, 0xc7, 0xbc, 0xf6, 0x68, 0x0b, 0xca, 0xe6, 0x26, 0x9b, 0xc4, 0x9a, 0x7f, 0x9a, 0x9d, 0xe5, + 0xe9, 0x0c, 0x1a, 0x6c, 0xd5, 0x42, 0x5b, 0xc3, 0x09, 0x6a, 0x92, 0x6a, 0xf9, 0x34, 0x70, 0x8e, + 0x39, 0x5f, 0xb1, 0x56, 0x2d, 0xf4, 0x0c, 0x66, 0x73, 0x81, 0x46, 0x13, 0x02, 0x3a, 0x9e, 0x35, + 0xce, 0xbb, 0xc7, 0x70, 0x69, 0x65, 0x9b, 0xf5, 0x97, 0xaf, 0x17, 0xad, 0xdf, 0x5f, 0x2f, 0x5a, + 0x7f, 0xbe, 0x5e, 0xb4, 0xda, 0x65, 0x55, 0xf2, 0xef, 0xfd, 0x17, 0x00, 0x00, 0xff, 0xff, 0x54, + 0x8e, 0x72, 0x11, 0x36, 0x12, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -1751,6 +1873,15 @@ func (m *UsageRecord) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Parents) > 0 { + for iNdEx := len(m.Parents) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Parents[iNdEx]) + copy(dAtA[i:], m.Parents[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Parents[iNdEx]))) + i-- + dAtA[i] = 0x62 + } + } if m.Shared { i-- if m.Shared { @@ -2237,6 +2368,20 @@ func (m *StatusResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.Warnings) > 0 { + for iNdEx := len(m.Warnings) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Warnings[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x22 + } + } if len(m.Logs) > 0 { for iNdEx := len(m.Logs) - 1; iNdEx >= 0; iNdEx-- { { @@ -2306,6 +2451,18 @@ func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if m.ProgressGroup != nil { + { + size, err := m.ProgressGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } if len(m.Error) > 0 { i -= len(m.Error) copy(dAtA[i:], m.Error) @@ -2314,23 +2471,23 @@ func (m *Vertex) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x3a } if m.Completed != nil { - n6, err6 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err6 != nil { - return 0, err6 - } - i -= n6 - i = encodeVarintControl(dAtA, i, uint64(n6)) - i-- - dAtA[i] = 0x32 - } - if m.Started != nil { - n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + n7, err7 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) if err7 != nil { return 0, err7 } i -= n7 i = encodeVarintControl(dAtA, i, uint64(n7)) i-- + dAtA[i] = 0x32 + } + if m.Started != nil { + n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err8 != nil { + return 0, err8 + } + i -= n8 + i = encodeVarintControl(dAtA, i, uint64(n8)) + i-- dAtA[i] = 0x2a } if m.Cached { @@ -2394,31 +2551,31 @@ func (m *VertexStatus) MarshalToSizedBuffer(dAtA []byte) (int, error) { copy(dAtA[i:], m.XXX_unrecognized) } if m.Completed != nil { - n8, err8 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) - if err8 != nil { - return 0, err8 - } - i -= n8 - i = encodeVarintControl(dAtA, i, uint64(n8)) - i-- - dAtA[i] = 0x42 - } - if m.Started != nil { - n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + n9, err9 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Completed, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Completed):]) if err9 != nil { return 0, err9 } i -= n9 i = encodeVarintControl(dAtA, i, uint64(n9)) i-- + dAtA[i] = 0x42 + } + if m.Started != nil { + n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(*m.Started, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(*m.Started):]) + if err10 != nil { + return 0, err10 + } + i -= n10 + i = encodeVarintControl(dAtA, i, uint64(n10)) + i-- dAtA[i] = 0x3a } - n10, err10 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err10 != nil { - return 0, err10 + n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err11 != nil { + return 0, err11 } - i -= n10 - i = encodeVarintControl(dAtA, i, uint64(n10)) + i -= n11 + i = encodeVarintControl(dAtA, i, uint64(n11)) i-- dAtA[i] = 0x32 if m.Total != 0 { @@ -2491,12 +2648,12 @@ func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0x18 } - n11, err11 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) - if err11 != nil { - return 0, err11 + n12, err12 := github_com_gogo_protobuf_types.StdTimeMarshalTo(m.Timestamp, dAtA[i-github_com_gogo_protobuf_types.SizeOfStdTime(m.Timestamp):]) + if err12 != nil { + return 0, err12 } - i -= n11 - i = encodeVarintControl(dAtA, i, uint64(n11)) + i -= n12 + i = encodeVarintControl(dAtA, i, uint64(n12)) i-- dAtA[i] = 0x12 if len(m.Vertex) > 0 { @@ -2509,6 +2666,94 @@ func (m *VertexLog) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *VertexWarning) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *VertexWarning) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *VertexWarning) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintControl(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintControl(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x2a + } + if len(m.Detail) > 0 { + for iNdEx := len(m.Detail) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Detail[iNdEx]) + copy(dAtA[i:], m.Detail[iNdEx]) + i = encodeVarintControl(dAtA, i, uint64(len(m.Detail[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Short) > 0 { + i -= len(m.Short) + copy(dAtA[i:], m.Short) + i = encodeVarintControl(dAtA, i, uint64(len(m.Short))) + i-- + dAtA[i] = 0x1a + } + if m.Level != 0 { + i = encodeVarintControl(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if len(m.Vertex) > 0 { + i -= len(m.Vertex) + copy(dAtA[i:], m.Vertex) + i = encodeVarintControl(dAtA, i, uint64(len(m.Vertex))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *BytesMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2737,6 +2982,12 @@ func (m *UsageRecord) Size() (n int) { if m.Shared { n += 2 } + if len(m.Parents) > 0 { + for _, s := range m.Parents { + l = len(s) + n += 1 + l + sovControl(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2938,6 +3189,12 @@ func (m *StatusResponse) Size() (n int) { n += 1 + l + sovControl(uint64(l)) } } + if len(m.Warnings) > 0 { + for _, e := range m.Warnings { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -2979,6 +3236,10 @@ func (m *Vertex) Size() (n int) { if l > 0 { n += 1 + l + sovControl(uint64(l)) } + if m.ProgressGroup != nil { + l = m.ProgressGroup.Size() + n += 1 + l + sovControl(uint64(l)) + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -3050,6 +3311,49 @@ func (m *VertexLog) Size() (n int) { return n } +func (m *VertexWarning) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Vertex) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Level != 0 { + n += 1 + sovControl(uint64(m.Level)) + } + l = len(m.Short) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Detail) > 0 { + for _, b := range m.Detail { + l = len(b) + n += 1 + l + sovControl(uint64(l)) + } + } + l = len(m.Url) + if l > 0 { + n += 1 + l + sovControl(uint64(l)) + } + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovControl(uint64(l)) + } + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovControl(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *BytesMessage) Size() (n int) { if m == nil { return 0 @@ -3741,6 +4045,38 @@ func (m *UsageRecord) Unmarshal(dAtA []byte) error { } } m.Shared = bool(v != 0) + case 12: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Parents", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Parents = append(m.Parents, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) @@ -5338,6 +5674,40 @@ func (m *StatusResponse) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Warnings", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Warnings = append(m.Warnings, &VertexWarning{}) + if err := m.Warnings[len(m.Warnings)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) @@ -5609,6 +5979,42 @@ func (m *Vertex) Unmarshal(dAtA []byte) error { } m.Error = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProgressGroup == nil { + m.ProgressGroup = &pb.ProgressGroup{} + } + if err := m.ProgressGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipControl(dAtA[iNdEx:]) @@ -6090,6 +6496,276 @@ func (m *VertexLog) Unmarshal(dAtA []byte) error { } return nil } +func (m *VertexWarning) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: VertexWarning: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: VertexWarning: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Vertex", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Vertex = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Short", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Short = append(m.Short[:0], dAtA[iNdEx:postIndex]...) + if m.Short == nil { + m.Short = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Detail = append(m.Detail, make([]byte, postIndex-iNdEx)) + copy(m.Detail[len(m.Detail)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &pb.SourceInfo{} + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowControl + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthControl + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthControl + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, &pb.Range{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipControl(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthControl + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *BytesMessage) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/moby/buildkit/api/services/control/control.proto b/vendor/github.com/moby/buildkit/api/services/control/control.proto index 84a4680c4b..a468a293af 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/control.proto +++ b/vendor/github.com/moby/buildkit/api/services/control/control.proto @@ -41,13 +41,14 @@ message UsageRecord { bool Mutable = 2; bool InUse = 3; int64 Size = 4; - string Parent = 5; + string Parent = 5 [deprecated=true]; google.protobuf.Timestamp CreatedAt = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; google.protobuf.Timestamp LastUsedAt = 7 [(gogoproto.stdtime) = true]; int64 UsageCount = 8; string Description = 9; string RecordType = 10; bool Shared = 11; + repeated string Parents = 12; } message SolveRequest { @@ -103,6 +104,7 @@ message StatusResponse { repeated Vertex vertexes = 1; repeated VertexStatus statuses = 2; repeated VertexLog logs = 3; + repeated VertexWarning warnings = 4; } message Vertex { @@ -113,6 +115,7 @@ message Vertex { google.protobuf.Timestamp started = 5 [(gogoproto.stdtime) = true ]; google.protobuf.Timestamp completed = 6 [(gogoproto.stdtime) = true ]; string error = 7; // typed errors? + pb.ProgressGroup progressGroup = 8; } message VertexStatus { @@ -121,7 +124,6 @@ message VertexStatus { string name = 3; int64 current = 4; int64 total = 5; - // TODO: add started, completed google.protobuf.Timestamp timestamp = 6 [(gogoproto.stdtime) = true, (gogoproto.nullable) = false]; google.protobuf.Timestamp started = 7 [(gogoproto.stdtime) = true ]; google.protobuf.Timestamp completed = 8 [(gogoproto.stdtime) = true ]; @@ -134,6 +136,16 @@ message VertexLog { bytes msg = 4; } +message VertexWarning { + string vertex = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + int64 level = 2; + bytes short = 3; + repeated bytes detail = 4; + string url = 5; + pb.SourceInfo info = 6; + repeated pb.Range ranges = 7; +} + message BytesMessage { bytes data = 1; } diff --git a/vendor/github.com/moby/buildkit/api/services/control/generate.go b/vendor/github.com/moby/buildkit/api/services/control/generate.go index 9a3b24613e..ea624c4e08 100644 --- a/vendor/github.com/moby/buildkit/api/services/control/generate.go +++ b/vendor/github.com/moby/buildkit/api/services/control/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1 //nolint:golint +package moby_buildkit_v1 //nolint:revive //go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. control.proto diff --git a/vendor/github.com/moby/buildkit/api/types/generate.go b/vendor/github.com/moby/buildkit/api/types/generate.go index 984bb74ce1..1689e7d7f1 100644 --- a/vendor/github.com/moby/buildkit/api/types/generate.go +++ b/vendor/github.com/moby/buildkit/api/types/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1_types //nolint:golint +package moby_buildkit_v1_types //nolint:revive //go:generate protoc -I=. -I=../../vendor/ -I=../../../../../ --gogo_out=plugins=grpc:. worker.proto diff --git a/vendor/github.com/moby/buildkit/cache/blobs.go b/vendor/github.com/moby/buildkit/cache/blobs.go index 767462aeef..8d2beefd06 100644 --- a/vendor/github.com/moby/buildkit/cache/blobs.go +++ b/vendor/github.com/moby/buildkit/cache/blobs.go @@ -1,19 +1,28 @@ package cache import ( + "compress/gzip" "context" + "fmt" + "io" + "os" + "strconv" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" + "github.com/containerd/containerd/diff/walking" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" + "github.com/klauspost/compress/zstd" "github.com/moby/buildkit/session" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/winlayers" digest "github.com/opencontainers/go-digest" imagespecidentity "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -26,12 +35,13 @@ var ErrNoBlobs = errors.Errorf("no blobs for snapshot") // computeBlobChain ensures every ref in a parent chain has an associated blob in the content store. If // a blob is missing and createIfNeeded is true, then the blob will be created, otherwise ErrNoBlobs will // be returned. Caller must hold a lease when calling this function. -func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, compressionType compression.Type, s session.Group) error { +// If forceCompression is specified but the blob of compressionType doesn't exist, this function creates it. +func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded bool, comp compression.Config, s session.Group) error { if _, ok := leases.FromContext(ctx); !ok { return errors.Errorf("missing lease requirement for computeBlobChain") } - if err := sr.Finalize(ctx, true); err != nil { + if err := sr.Finalize(ctx); err != nil { return err } @@ -39,45 +49,82 @@ func (sr *immutableRef) computeBlobChain(ctx context.Context, createIfNeeded boo ctx = winlayers.UseWindowsLayerMode(ctx) } - return computeBlobChain(ctx, sr, createIfNeeded, compressionType, s) + // filter keeps track of which layers should actually be included in the blob chain. + // This is required for diff refs, which can include only a single layer from their parent + // refs rather than every single layer present among their ancestors. + filter := sr.layerSet() + + return computeBlobChain(ctx, sr, createIfNeeded, comp, s, filter) } -func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, compressionType compression.Type, s session.Group) error { - baseCtx := ctx +type compressor func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) + +func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool, comp compression.Config, s session.Group, filter map[string]struct{}) error { eg, ctx := errgroup.WithContext(ctx) - var currentDescr ocispec.Descriptor - if sr.parent != nil { + switch sr.kind() { + case Merge: + for _, parent := range sr.mergeParents { + parent := parent + eg.Go(func() error { + return computeBlobChain(ctx, parent, createIfNeeded, comp, s, filter) + }) + } + case Diff: + if _, ok := filter[sr.ID()]; !ok && sr.diffParents.upper != nil { + // This diff is just re-using the upper blob, compute that + eg.Go(func() error { + return computeBlobChain(ctx, sr.diffParents.upper, createIfNeeded, comp, s, filter) + }) + } + case Layer: eg.Go(func() error { - return computeBlobChain(ctx, sr.parent, createIfNeeded, compressionType, s) + return computeBlobChain(ctx, sr.layerParent, createIfNeeded, comp, s, filter) }) } - eg.Go(func() error { - dp, err := g.Do(ctx, sr.ID(), func(ctx context.Context) (interface{}, error) { - refInfo := sr.Info() - if refInfo.Blob != "" { - return nil, nil - } else if !createIfNeeded { - return nil, errors.WithStack(ErrNoBlobs) - } - var mediaType string - switch compressionType { - case compression.Uncompressed: - mediaType = ocispec.MediaTypeImageLayer - case compression.Gzip: - mediaType = ocispec.MediaTypeImageLayerGzip - default: - return nil, errors.Errorf("unknown layer compression type: %q", compressionType) - } + if _, ok := filter[sr.ID()]; ok { + eg.Go(func() error { + _, err := g.Do(ctx, fmt.Sprintf("%s-%t", sr.ID(), createIfNeeded), func(ctx context.Context) (interface{}, error) { + if sr.getBlob() != "" { + return nil, nil + } + if !createIfNeeded { + return nil, errors.WithStack(ErrNoBlobs) + } - var descr ocispec.Descriptor - var err error + var mediaType string + var compressorFunc compressor + var finalize func(context.Context, content.Store) (map[string]string, error) + switch comp.Type { + case compression.Uncompressed: + mediaType = ocispecs.MediaTypeImageLayer + case compression.Gzip: + compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) { + return gzipWriter(comp)(dest) + } + mediaType = ocispecs.MediaTypeImageLayerGzip + case compression.EStargz: + compressorFunc, finalize = compressEStargz(comp) + mediaType = ocispecs.MediaTypeImageLayerGzip + case compression.Zstd: + compressorFunc = func(dest io.Writer, _ string) (io.WriteCloser, error) { + return zstdWriter(comp)(dest) + } + mediaType = ocispecs.MediaTypeImageLayer + "+zstd" + default: + return nil, errors.Errorf("unknown layer compression type: %q", comp.Type) + } - if descr.Digest == "" { - // reference needs to be committed + var lowerRef *immutableRef + switch sr.kind() { + case Diff: + lowerRef = sr.diffParents.lower + case Layer: + lowerRef = sr.layerParent + } var lower []mount.Mount - if sr.parent != nil { - m, err := sr.parent.Mount(ctx, true, s) + if lowerRef != nil { + m, err := lowerRef.Mount(ctx, true, s) if err != nil { return nil, err } @@ -90,74 +137,160 @@ func computeBlobChain(ctx context.Context, sr *immutableRef, createIfNeeded bool defer release() } } - m, err := sr.Mount(ctx, true, s) - if err != nil { - return nil, err - } - upper, release, err := m.Mount() - if err != nil { - return nil, err - } - if release != nil { - defer release() - } - descr, err = sr.cm.Differ.Compare(ctx, lower, upper, - diff.WithMediaType(mediaType), - diff.WithReference(sr.ID()), - ) - if err != nil { - return nil, err - } - } - if descr.Annotations == nil { - descr.Annotations = map[string]string{} - } + var upperRef *immutableRef + switch sr.kind() { + case Diff: + upperRef = sr.diffParents.upper + default: + upperRef = sr + } + var upper []mount.Mount + if upperRef != nil { + m, err := upperRef.Mount(ctx, true, s) + if err != nil { + return nil, err + } + var release func() error + upper, release, err = m.Mount() + if err != nil { + return nil, err + } + if release != nil { + defer release() + } + } - info, err := sr.cm.ContentStore.Info(ctx, descr.Digest) + var desc ocispecs.Descriptor + var err error + + // Determine differ and error/log handling according to the platform, envvar and the snapshotter. + var enableOverlay, fallback, logWarnOnErr bool + if forceOvlStr := os.Getenv("BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF"); forceOvlStr != "" && sr.kind() != Diff { + enableOverlay, err = strconv.ParseBool(forceOvlStr) + if err != nil { + return nil, errors.Wrapf(err, "invalid boolean in BUILDKIT_DEBUG_FORCE_OVERLAY_DIFF") + } + fallback = false // prohibit fallback on debug + } else if !isTypeWindows(sr) { + enableOverlay, fallback = true, true + switch sr.cm.Snapshotter.Name() { + case "overlayfs", "stargz": + // overlayfs-based snapshotters should support overlay diff except when running an arbitrary diff + // (in which case lower and upper may differ by more than one layer), so print warn log on unexpected + // failure. + logWarnOnErr = sr.kind() != Diff + case "fuse-overlayfs", "native": + // not supported with fuse-overlayfs snapshotter which doesn't provide overlayfs mounts. + // TODO: add support for fuse-overlayfs + enableOverlay = false + } + } + if enableOverlay { + computed, ok, err := sr.tryComputeOverlayBlob(ctx, lower, upper, mediaType, sr.ID(), compressorFunc) + if !ok || err != nil { + if !fallback { + if !ok { + return nil, errors.Errorf("overlay mounts not detected (lower=%+v,upper=%+v)", lower, upper) + } + if err != nil { + return nil, errors.Wrapf(err, "failed to compute overlay diff") + } + } + if logWarnOnErr { + logrus.Warnf("failed to compute blob by overlay differ (ok=%v): %v", ok, err) + } + } + if ok { + desc = computed + } + } + + if desc.Digest == "" && !isTypeWindows(sr) && (comp.Type == compression.Zstd || comp.Type == compression.EStargz) { + // These compression types aren't supported by containerd differ. So try to compute diff on buildkit side. + // This case can be happen on containerd worker + non-overlayfs snapshotter (e.g. native). + // See also: https://github.com/containerd/containerd/issues/4263 + desc, err = walking.NewWalkingDiff(sr.cm.ContentStore).Compare(ctx, lower, upper, + diff.WithMediaType(mediaType), + diff.WithReference(sr.ID()), + diff.WithCompressor(compressorFunc), + ) + if err != nil { + logrus.WithError(err).Warnf("failed to compute blob by buildkit differ") + } + } + + if desc.Digest == "" { + desc, err = sr.cm.Differ.Compare(ctx, lower, upper, + diff.WithMediaType(mediaType), + diff.WithReference(sr.ID()), + diff.WithCompressor(compressorFunc), + ) + if err != nil { + return nil, err + } + } + + if desc.Annotations == nil { + desc.Annotations = map[string]string{} + } + if finalize != nil { + a, err := finalize(ctx, sr.cm.ContentStore) + if err != nil { + return nil, errors.Wrapf(err, "failed to finalize compression") + } + for k, v := range a { + desc.Annotations[k] = v + } + } + info, err := sr.cm.ContentStore.Info(ctx, desc.Digest) + if err != nil { + return nil, err + } + + if diffID, ok := info.Labels[containerdUncompressed]; ok { + desc.Annotations[containerdUncompressed] = diffID + } else if mediaType == ocispecs.MediaTypeImageLayer { + desc.Annotations[containerdUncompressed] = desc.Digest.String() + } else { + return nil, errors.Errorf("unknown layer compression type") + } + + if err := sr.setBlob(ctx, desc); err != nil { + return nil, err + } + return nil, nil + }) if err != nil { - return nil, err + return err } - if diffID, ok := info.Labels[containerdUncompressed]; ok { - descr.Annotations[containerdUncompressed] = diffID - } else if compressionType == compression.Uncompressed { - descr.Annotations[containerdUncompressed] = descr.Digest.String() - } else { - return nil, errors.Errorf("unknown layer compression type") + if comp.Force { + if err := ensureCompression(ctx, sr, comp, s); err != nil { + return errors.Wrapf(err, "failed to ensure compression type of %q", comp.Type) + } } - - return descr, nil - + return nil }) - if err != nil { - return err - } + } - if dp != nil { - currentDescr = dp.(ocispec.Descriptor) - } - return nil - }) - err := eg.Wait() - if err != nil { + if err := eg.Wait(); err != nil { return err } - if currentDescr.Digest != "" { - if err := sr.setBlob(baseCtx, currentDescr); err != nil { - return err - } - } - return nil + return sr.computeChainMetadata(ctx, filter) } // setBlob associates a blob with the cache record. // A lease must be held for the blob when calling this function -// Caller should call Info() for knowing what current values are actually set -func (sr *immutableRef) setBlob(ctx context.Context, desc ocispec.Descriptor) error { +func (sr *immutableRef) setBlob(ctx context.Context, desc ocispecs.Descriptor) (rerr error) { if _, ok := leases.FromContext(ctx); !ok { return errors.Errorf("missing lease requirement for setBlob") } + defer func() { + if rerr == nil { + rerr = sr.linkBlob(ctx, desc) + } + }() diffID, err := diffIDFromDescriptor(desc) if err != nil { @@ -170,26 +303,14 @@ func (sr *immutableRef) setBlob(ctx context.Context, desc ocispec.Descriptor) er sr.mu.Lock() defer sr.mu.Unlock() - if getChainID(sr.md) != "" { + if sr.getBlob() != "" { return nil } - if err := sr.finalize(ctx, true); err != nil { + if err := sr.finalize(ctx); err != nil { return err } - p := sr.parent - var parentChainID digest.Digest - var parentBlobChainID digest.Digest - if p != nil { - pInfo := p.Info() - if pInfo.ChainID == "" || pInfo.BlobChainID == "" { - return errors.Errorf("failed to set blob for reference with non-addressable parent") - } - parentChainID = pInfo.ChainID - parentBlobChainID = pInfo.BlobChainID - } - if err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.ID()}, leases.Resource{ ID: desc.Digest.String(), Type: "content", @@ -197,30 +318,200 @@ func (sr *immutableRef) setBlob(ctx context.Context, desc ocispec.Descriptor) er return err } - queueDiffID(sr.md, diffID.String()) - queueBlob(sr.md, desc.Digest.String()) - chainID := diffID - blobChainID := imagespecidentity.ChainID([]digest.Digest{desc.Digest, diffID}) - if parentChainID != "" { - chainID = imagespecidentity.ChainID([]digest.Digest{parentChainID, chainID}) - blobChainID = imagespecidentity.ChainID([]digest.Digest{parentBlobChainID, blobChainID}) + sr.queueDiffID(diffID) + sr.queueBlob(desc.Digest) + sr.queueMediaType(desc.MediaType) + sr.queueBlobSize(desc.Size) + sr.appendURLs(desc.URLs) + if err := sr.commitMetadata(); err != nil { + return err } - queueChainID(sr.md, chainID.String()) - queueBlobChainID(sr.md, blobChainID.String()) - queueMediaType(sr.md, desc.MediaType) - queueBlobSize(sr.md, desc.Size) - if err := sr.md.Commit(); err != nil { + + return nil +} + +func (sr *immutableRef) computeChainMetadata(ctx context.Context, filter map[string]struct{}) error { + if _, ok := leases.FromContext(ctx); !ok { + return errors.Errorf("missing lease requirement for computeChainMetadata") + } + + sr.mu.Lock() + defer sr.mu.Unlock() + + if sr.getChainID() != "" { + return nil + } + + var chainID digest.Digest + var blobChainID digest.Digest + + switch sr.kind() { + case BaseLayer: + if _, ok := filter[sr.ID()]; !ok { + return nil + } + diffID := sr.getDiffID() + chainID = diffID + blobChainID = imagespecidentity.ChainID([]digest.Digest{digest.Digest(sr.getBlob()), diffID}) + case Layer: + if _, ok := filter[sr.ID()]; !ok { + return nil + } + if _, ok := filter[sr.layerParent.ID()]; ok { + if parentChainID := sr.layerParent.getChainID(); parentChainID != "" { + chainID = parentChainID + } else { + return errors.Errorf("failed to set chain for reference with non-addressable parent %q", sr.layerParent.GetDescription()) + } + if parentBlobChainID := sr.layerParent.getBlobChainID(); parentBlobChainID != "" { + blobChainID = parentBlobChainID + } else { + return errors.Errorf("failed to set blobchain for reference with non-addressable parent %q", sr.layerParent.GetDescription()) + } + } + diffID := digest.Digest(sr.getDiffID()) + chainID = imagespecidentity.ChainID([]digest.Digest{chainID, diffID}) + blobID := imagespecidentity.ChainID([]digest.Digest{digest.Digest(sr.getBlob()), diffID}) + blobChainID = imagespecidentity.ChainID([]digest.Digest{blobChainID, blobID}) + case Merge: + baseInput := sr.mergeParents[0] + if _, ok := filter[baseInput.ID()]; !ok { + // not enough information to compute chain at this time + return nil + } + chainID = baseInput.getChainID() + blobChainID = baseInput.getBlobChainID() + for _, mergeParent := range sr.mergeParents[1:] { + for _, layer := range mergeParent.layerChain() { + if _, ok := filter[layer.ID()]; !ok { + // not enough information to compute chain at this time + return nil + } + diffID := digest.Digest(layer.getDiffID()) + chainID = imagespecidentity.ChainID([]digest.Digest{chainID, diffID}) + blobID := imagespecidentity.ChainID([]digest.Digest{digest.Digest(layer.getBlob()), diffID}) + blobChainID = imagespecidentity.ChainID([]digest.Digest{blobChainID, blobID}) + } + } + case Diff: + if _, ok := filter[sr.ID()]; ok { + // this diff is its own blob + diffID := sr.getDiffID() + chainID = diffID + blobChainID = imagespecidentity.ChainID([]digest.Digest{digest.Digest(sr.getBlob()), diffID}) + } else { + // re-using upper blob + chainID = sr.diffParents.upper.getChainID() + blobChainID = sr.diffParents.upper.getBlobChainID() + } + } + + sr.queueChainID(chainID) + sr.queueBlobChainID(blobChainID) + if err := sr.commitMetadata(); err != nil { return err } return nil } func isTypeWindows(sr *immutableRef) bool { - if GetLayerType(sr) == "windows" { + if sr.GetLayerType() == "windows" { return true } - if parent := sr.parent; parent != nil { - return isTypeWindows(parent) + switch sr.kind() { + case Merge: + for _, p := range sr.mergeParents { + if isTypeWindows(p) { + return true + } + } + case Layer: + return isTypeWindows(sr.layerParent) } return false } + +// ensureCompression ensures the specified ref has the blob of the specified compression Type. +func ensureCompression(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) error { + _, err := g.Do(ctx, fmt.Sprintf("%s-%d", ref.ID(), comp.Type), func(ctx context.Context) (interface{}, error) { + desc, err := ref.ociDesc(ctx, ref.descHandlers, true) + if err != nil { + return nil, err + } + + // Resolve converters + layerConvertFunc, err := getConverter(ctx, ref.cm.ContentStore, desc, comp) + if err != nil { + return nil, err + } else if layerConvertFunc == nil { + if isLazy, err := ref.isLazy(ctx); err != nil { + return nil, err + } else if isLazy { + // This ref can be used as the specified compressionType. Keep it lazy. + return nil, nil + } + return nil, ref.linkBlob(ctx, desc) + } + + // First, lookup local content store + if _, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil { + return nil, nil // found the compression variant. no need to convert. + } + + // Convert layer compression type + if err := (lazyRefProvider{ + ref: ref, + desc: desc, + dh: ref.descHandlers[desc.Digest], + session: s, + }).Unlazy(ctx); err != nil { + return nil, err + } + newDesc, err := layerConvertFunc(ctx, ref.cm.ContentStore, desc) + if err != nil { + return nil, errors.Wrapf(err, "failed to convert") + } + + // Start to track converted layer + if err := ref.linkBlob(ctx, *newDesc); err != nil { + return nil, errors.Wrapf(err, "failed to add compression blob") + } + return nil, nil + }) + return err +} + +func gzipWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) { + return func(dest io.Writer) (io.WriteCloser, error) { + level := gzip.DefaultCompression + if comp.Level != nil { + level = *comp.Level + } + return gzip.NewWriterLevel(dest, level) + } +} + +func zstdWriter(comp compression.Config) func(io.Writer) (io.WriteCloser, error) { + return func(dest io.Writer) (io.WriteCloser, error) { + level := zstd.SpeedDefault + if comp.Level != nil { + level = toZstdEncoderLevel(*comp.Level) + } + return zstd.NewWriter(dest, zstd.WithEncoderLevel(level)) + } +} + +func toZstdEncoderLevel(level int) zstd.EncoderLevel { + // map zstd compression levels to go-zstd levels + // once we also have c based implementation move this to helper pkg + if level < 0 { + return zstd.SpeedDefault + } else if level < 3 { + return zstd.SpeedFastest + } else if level < 7 { + return zstd.SpeedDefault + } else if level < 9 { + return zstd.SpeedBetterCompression + } + return zstd.SpeedBestCompression +} diff --git a/vendor/github.com/moby/buildkit/cache/blobs_linux.go b/vendor/github.com/moby/buildkit/cache/blobs_linux.go new file mode 100644 index 0000000000..fcb8850a02 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/blobs_linux.go @@ -0,0 +1,112 @@ +//go:build linux +// +build linux + +package cache + +import ( + "bufio" + "context" + "io" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/mount" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/overlay" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var emptyDesc = ocispecs.Descriptor{} + +// computeOverlayBlob provides overlayfs-specialized method to compute +// diff between lower and upper snapshot. If the passed mounts cannot +// be computed (e.g. because the mounts aren't overlayfs), it returns +// an error. +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { + // Get upperdir location if mounts are overlayfs that can be processed by this differ. + upperdir, err := overlay.GetUpperdir(lower, upper) + if err != nil { + // This is not an overlayfs snapshot. This is not an error so don't return error here + // and let the caller fallback to another differ. + return emptyDesc, false, nil + } + + cw, err := sr.cm.ContentStore.Writer(ctx, + content.WithRef(ref), + content.WithDescriptor(ocispecs.Descriptor{ + MediaType: mediaType, // most contentstore implementations just ignore this + })) + if err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to open writer") + } + defer func() { + if cw != nil { + if cerr := cw.Close(); cerr != nil { + bklog.G(ctx).WithError(cerr).Warnf("failed to close writer %q", ref) + } + } + }() + + bufW := bufio.NewWriterSize(cw, 128*1024) + var labels map[string]string + if compressorFunc != nil { + dgstr := digest.SHA256.Digester() + compressed, err := compressorFunc(bufW, mediaType) + if err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to get compressed stream") + } + err = overlay.WriteUpperdir(ctx, io.MultiWriter(compressed, dgstr.Hash()), upperdir, lower) + compressed.Close() + if err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to write compressed diff") + } + if labels == nil { + labels = map[string]string{} + } + labels[containerdUncompressed] = dgstr.Digest().String() + } else { + if err = overlay.WriteUpperdir(ctx, bufW, upperdir, lower); err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to write diff") + } + } + + if err := bufW.Flush(); err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to flush diff") + } + var commitopts []content.Opt + if labels != nil { + commitopts = append(commitopts, content.WithLabels(labels)) + } + dgst := cw.Digest() + if err := cw.Commit(ctx, 0, dgst, commitopts...); err != nil { + if !errdefs.IsAlreadyExists(err) { + return emptyDesc, false, errors.Wrap(err, "failed to commit") + } + } + if err := cw.Close(); err != nil { + return emptyDesc, false, err + } + cw = nil + cinfo, err := sr.cm.ContentStore.Info(ctx, dgst) + if err != nil { + return emptyDesc, false, errors.Wrap(err, "failed to get info from content store") + } + if cinfo.Labels == nil { + cinfo.Labels = make(map[string]string) + } + // Set uncompressed label if digest already existed without label + if _, ok := cinfo.Labels[containerdUncompressed]; !ok { + cinfo.Labels[containerdUncompressed] = labels[containerdUncompressed] + if _, err := sr.cm.ContentStore.Update(ctx, cinfo, "labels."+containerdUncompressed); err != nil { + return emptyDesc, false, errors.Wrap(err, "error setting uncompressed label") + } + } + + return ocispecs.Descriptor{ + MediaType: mediaType, + Size: cinfo.Size, + Digest: cinfo.Digest, + }, true, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/blobs_nolinux.go b/vendor/github.com/moby/buildkit/cache/blobs_nolinux.go new file mode 100644 index 0000000000..2ccee770e2 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/blobs_nolinux.go @@ -0,0 +1,16 @@ +//go:build !linux +// +build !linux + +package cache + +import ( + "context" + + "github.com/containerd/containerd/mount" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +func (sr *immutableRef) tryComputeOverlayBlob(ctx context.Context, lower, upper []mount.Mount, mediaType string, ref string, compressorFunc compressor) (_ ocispecs.Descriptor, ok bool, err error) { + return ocispecs.Descriptor{}, true, errors.Errorf("overlayfs-based diff computing is unsupported") +} diff --git a/vendor/github.com/moby/buildkit/cache/config/config.go b/vendor/github.com/moby/buildkit/cache/config/config.go new file mode 100644 index 0000000000..5fcae329c6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/config/config.go @@ -0,0 +1,8 @@ +package config + +import "github.com/moby/buildkit/util/compression" + +type RefConfig struct { + Compression compression.Config + PreferNonDistributable bool +} diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go index 1c416abdba..a59523dd29 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/checksum.go @@ -12,11 +12,9 @@ import ( "sync" "github.com/docker/docker/pkg/fileutils" - "github.com/docker/docker/pkg/idtools" iradix "github.com/hashicorp/go-immutable-radix" "github.com/hashicorp/golang-lru/simplelru" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/locker" @@ -31,8 +29,6 @@ var errNotFound = errors.Errorf("not found") var defaultManager *cacheManager var defaultManagerOnce sync.Once -const keyContentHash = "buildkit.contenthash.v0" - func getDefaultManager() *cacheManager { defaultManagerOnce.Do(func() { lru, _ := simplelru.NewLRU(20, nil) // error is impossible on positive size @@ -58,15 +54,15 @@ func Checksum(ctx context.Context, ref cache.ImmutableRef, path string, opts Che return getDefaultManager().Checksum(ctx, ref, path, opts, s) } -func GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) { - return getDefaultManager().GetCacheContext(ctx, md, idmap) +func GetCacheContext(ctx context.Context, md cache.RefMetadata) (CacheContext, error) { + return getDefaultManager().GetCacheContext(ctx, md) } -func SetCacheContext(ctx context.Context, md *metadata.StorageItem, cc CacheContext) error { +func SetCacheContext(ctx context.Context, md cache.RefMetadata, cc CacheContext) error { return getDefaultManager().SetCacheContext(ctx, md, cc) } -func ClearCacheContext(md *metadata.StorageItem) { +func ClearCacheContext(md cache.RefMetadata) { getDefaultManager().clearCacheContext(md.ID()) } @@ -79,9 +75,12 @@ type Hashed interface { Digest() digest.Digest } -type IncludedPath struct { - Path string - Record *CacheRecord +type includedPath struct { + path string + record *CacheRecord + included bool + includeMatchInfo fileutils.MatchInfo + excludeMatchInfo fileutils.MatchInfo } type cacheManager struct { @@ -91,14 +90,20 @@ type cacheManager struct { } func (cm *cacheManager) Checksum(ctx context.Context, ref cache.ImmutableRef, p string, opts ChecksumOpts, s session.Group) (digest.Digest, error) { - cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref.Metadata()), ref.IdentityMapping()) + if ref == nil { + if p == "/" { + return digest.FromBytes(nil), nil + } + return "", errors.Errorf("%s: no such file or directory", p) + } + cc, err := cm.GetCacheContext(ctx, ensureOriginMetadata(ref)) if err != nil { return "", nil } return cc.Checksum(ctx, ref, p, opts, s) } -func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.StorageItem, idmap *idtools.IdentityMapping) (CacheContext, error) { +func (cm *cacheManager) GetCacheContext(ctx context.Context, md cache.RefMetadata) (CacheContext, error) { cm.locker.Lock(md.ID()) cm.lruMu.Lock() v, ok := cm.lru.Get(md.ID()) @@ -108,7 +113,7 @@ func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.Storag v.(*cacheContext).linkMap = map[string][][]byte{} return v.(*cacheContext), nil } - cc, err := newCacheContext(md, idmap) + cc, err := newCacheContext(md) if err != nil { cm.locker.Unlock(md.ID()) return nil, err @@ -120,14 +125,14 @@ func (cm *cacheManager) GetCacheContext(ctx context.Context, md *metadata.Storag return cc, nil } -func (cm *cacheManager) SetCacheContext(ctx context.Context, md *metadata.StorageItem, cci CacheContext) error { +func (cm *cacheManager) SetCacheContext(ctx context.Context, md cache.RefMetadata, cci CacheContext) error { cc, ok := cci.(*cacheContext) if !ok { return errors.Errorf("invalid cachecontext: %T", cc) } if md.ID() != cc.md.ID() { cc = &cacheContext{ - md: md, + md: cacheMetadata{md}, tree: cci.(*cacheContext).tree, dirtyMap: map[string]struct{}{}, linkMap: map[string][][]byte{}, @@ -151,7 +156,7 @@ func (cm *cacheManager) clearCacheContext(id string) { type cacheContext struct { mu sync.RWMutex - md *metadata.StorageItem + md cacheMetadata tree *iradix.Tree dirty bool // needs to be persisted to disk @@ -160,7 +165,20 @@ type cacheContext struct { node *iradix.Node dirtyMap map[string]struct{} linkMap map[string][][]byte - idmap *idtools.IdentityMapping +} + +type cacheMetadata struct { + cache.RefMetadata +} + +const keyContentHash = "buildkit.contenthash.v0" + +func (md cacheMetadata) GetContentHash() ([]byte, error) { + return md.GetExternal(keyContentHash) +} + +func (md cacheMetadata) SetContentHash(dt []byte) error { + return md.SetExternal(keyContentHash, dt) } type mount struct { @@ -201,13 +219,12 @@ func (m *mount) clean() error { return nil } -func newCacheContext(md *metadata.StorageItem, idmap *idtools.IdentityMapping) (*cacheContext, error) { +func newCacheContext(md cache.RefMetadata) (*cacheContext, error) { cc := &cacheContext{ - md: md, + md: cacheMetadata{md}, tree: iradix.New(), dirtyMap: map[string]struct{}{}, linkMap: map[string][][]byte{}, - idmap: idmap, } if err := cc.load(); err != nil { return nil, err @@ -216,7 +233,7 @@ func newCacheContext(md *metadata.StorageItem, idmap *idtools.IdentityMapping) ( } func (cc *cacheContext) load() error { - dt, err := cc.md.GetExternal(keyContentHash) + dt, err := cc.md.GetContentHash() if err != nil { return nil } @@ -257,7 +274,7 @@ func (cc *cacheContext) save() error { return err } - return cc.md.SetExternal(keyContentHash, dt) + return cc.md.SetContentHash(dt) } func keyPath(p string) string { @@ -398,12 +415,12 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, if opts.FollowLinks { for i, w := range includedPaths { - if w.Record.Type == CacheRecordTypeSymlink { - dgst, err := cc.checksumFollow(ctx, m, w.Path, opts.FollowLinks) + if w.record.Type == CacheRecordTypeSymlink { + dgst, err := cc.checksumFollow(ctx, m, w.path, opts.FollowLinks) if err != nil { return "", err } - includedPaths[i].Record = &CacheRecord{Digest: dgst} + includedPaths[i].record = &CacheRecord{Digest: dgst} } } } @@ -411,8 +428,8 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, return digest.FromBytes([]byte{}), nil } - if len(includedPaths) == 1 && path.Base(p) == path.Base(includedPaths[0].Path) { - return includedPaths[0].Record.Digest, nil + if len(includedPaths) == 1 && path.Base(p) == path.Base(includedPaths[0].path) { + return includedPaths[0].record.Digest, nil } digester := digest.Canonical.Digester() @@ -420,8 +437,8 @@ func (cc *cacheContext) Checksum(ctx context.Context, mountable cache.Mountable, if i != 0 { digester.Hash().Write([]byte{0}) } - digester.Hash().Write([]byte(path.Base(w.Path))) - digester.Hash().Write([]byte(w.Record.Digest)) + digester.Hash().Write([]byte(path.Base(w.path))) + digester.Hash().Write([]byte(w.record.Digest)) } return digester.Digest(), nil } @@ -450,7 +467,7 @@ func (cc *cacheContext) checksumFollow(ctx context.Context, m *mount, p string, } } -func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, opts ChecksumOpts) ([]*IncludedPath, error) { +func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, opts ChecksumOpts) ([]*includedPath, error) { cc.mu.Lock() defer cc.mu.Unlock() @@ -481,11 +498,7 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o var includePatternMatcher *fileutils.PatternMatcher if len(opts.IncludePatterns) != 0 { - rootedIncludePatterns := make([]string, len(opts.IncludePatterns)) - for i, includePattern := range opts.IncludePatterns { - rootedIncludePatterns[i] = keyPath(includePattern) - } - includePatternMatcher, err = fileutils.NewPatternMatcher(rootedIncludePatterns) + includePatternMatcher, err = fileutils.NewPatternMatcher(opts.IncludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid includepatterns: %s", opts.IncludePatterns) } @@ -493,52 +506,90 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o var excludePatternMatcher *fileutils.PatternMatcher if len(opts.ExcludePatterns) != 0 { - rootedExcludePatterns := make([]string, len(opts.ExcludePatterns)) - for i, excludePattern := range opts.ExcludePatterns { - rootedExcludePatterns[i] = keyPath(excludePattern) - } - excludePatternMatcher, err = fileutils.NewPatternMatcher(rootedExcludePatterns) + excludePatternMatcher, err = fileutils.NewPatternMatcher(opts.ExcludePatterns) if err != nil { return nil, errors.Wrapf(err, "invalid excludepatterns: %s", opts.ExcludePatterns) } } - includedPaths := make([]*IncludedPath, 0, 2) + includedPaths := make([]*includedPath, 0, 2) txn := cc.tree.Txn() root = txn.Root() var ( - updated bool - iter *iradix.Seeker - k []byte - kOk bool + updated bool + iter *iradix.Iterator + k []byte + keyOk bool + origPrefix string + resolvedPrefix string ) + iter = root.Iterator() + if opts.Wildcard { - iter = root.Seek([]byte{}) - k, _, kOk = iter.Next() - } else { - k = convertPathToKey([]byte(p)) - if _, kOk = root.Get(k); kOk { - iter = root.Seek(k) + origPrefix, k, keyOk, err = wildcardPrefix(root, p) + if err != nil { + return nil, err } + } else { + origPrefix = p + k = convertPathToKey([]byte(origPrefix)) + + // We need to resolve symlinks here, in case the base path + // involves a symlink. That will match fsutil behavior of + // calling functions such as stat and walk. + var cr *CacheRecord + k, cr, err = getFollowLinks(root, k, true) + if err != nil { + return nil, err + } + keyOk = (cr != nil) + } + + if origPrefix != "" { + if keyOk { + iter.SeekLowerBound(append(append([]byte{}, k...), 0)) + } + + resolvedPrefix = string(convertKeyToPath(k)) + } else { + k, _, keyOk = iter.Next() } var ( - parentDirHeaders []*IncludedPath + parentDirHeaders []*includedPath lastMatchedDir string ) - for kOk { + for keyOk { fn := string(convertKeyToPath(k)) + // Convert the path prefix from what we found in the prefix + // tree to what the argument specified. + // + // For example, if the original 'p' argument was /a/b and there + // is a symlink a->c, we want fn to be /a/b/foo rather than + // /c/b/foo. This is necessary to ensure correct pattern + // matching. + // + // When wildcards are enabled, this translation applies to the + // portion of 'p' before any wildcards. + if strings.HasPrefix(fn, resolvedPrefix) { + fn = origPrefix + strings.TrimPrefix(fn, resolvedPrefix) + } + for len(parentDirHeaders) != 0 { lastParentDir := parentDirHeaders[len(parentDirHeaders)-1] - if strings.HasPrefix(fn, lastParentDir.Path+"/") { + if strings.HasPrefix(fn, lastParentDir.path+"/") { break } parentDirHeaders = parentDirHeaders[:len(parentDirHeaders)-1] } + var parentDir *includedPath + if len(parentDirHeaders) != 0 { + parentDir = parentDirHeaders[len(parentDirHeaders)-1] + } dirHeader := false if len(k) > 0 && k[len(k)-1] == byte(0) { @@ -546,33 +597,55 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o fn = fn[:len(fn)-1] if fn == p && endsInSep { // We don't include the metadata header for a source dir which ends with a separator - k, _, kOk = iter.Next() + k, _, keyOk = iter.Next() continue } } + + maybeIncludedPath := &includedPath{path: fn} + var shouldInclude bool if opts.Wildcard { - if lastMatchedDir == "" || !strings.HasPrefix(fn, lastMatchedDir+"/") { + if p != "" && (lastMatchedDir == "" || !strings.HasPrefix(fn, lastMatchedDir+"/")) { include, err := path.Match(p, fn) if err != nil { return nil, err } if !include { - k, _, kOk = iter.Next() + k, _, keyOk = iter.Next() continue } lastMatchedDir = fn } - } else if !strings.HasPrefix(fn+"/", p+"/") { - k, _, kOk = iter.Next() - continue + + shouldInclude, err = shouldIncludePath( + strings.TrimSuffix(strings.TrimPrefix(fn+"/", lastMatchedDir+"/"), "/"), + includePatternMatcher, + excludePatternMatcher, + maybeIncludedPath, + parentDir, + ) + if err != nil { + return nil, err + } + } else { + if !strings.HasPrefix(fn+"/", p+"/") { + break + } + + shouldInclude, err = shouldIncludePath( + strings.TrimSuffix(strings.TrimPrefix(fn+"/", p+"/"), "/"), + includePatternMatcher, + excludePatternMatcher, + maybeIncludedPath, + parentDir, + ) + if err != nil { + return nil, err + } } - shouldInclude, err := shouldIncludePath(p, fn, includePatternMatcher, excludePatternMatcher) - if err != nil { - return nil, err - } if !shouldInclude && !dirHeader { - k, _, kOk = iter.Next() + k, _, keyOk = iter.Next() continue } @@ -590,19 +663,28 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o // dir. shouldInclude = false } + maybeIncludedPath.record = cr - if !shouldInclude { - if cr.Type == CacheRecordTypeDirHeader { - // We keep track of non-included parent dir headers in case an - // include pattern matches a file inside one of these dirs. - parentDirHeaders = append(parentDirHeaders, &IncludedPath{Path: fn, Record: cr}) + if shouldInclude { + for _, parentDir := range parentDirHeaders { + if !parentDir.included { + includedPaths = append(includedPaths, parentDir) + parentDir.included = true + } } - } else { - includedPaths = append(includedPaths, parentDirHeaders...) - parentDirHeaders = nil - includedPaths = append(includedPaths, &IncludedPath{Path: fn, Record: cr}) + includedPaths = append(includedPaths, maybeIncludedPath) + maybeIncludedPath.included = true } - k, _, kOk = iter.Next() + + if cr.Type == CacheRecordTypeDirHeader { + // We keep track of parent dir headers whether + // they are immediately included or not, in case + // an include pattern matches a file inside one + // of these dirs. + parentDirHeaders = append(parentDirHeaders, maybeIncludedPath) + } + + k, _, keyOk = iter.Next() } cc.tree = txn.Commit() @@ -612,26 +694,42 @@ func (cc *cacheContext) includedPaths(ctx context.Context, m *mount, p string, o } func shouldIncludePath( - p string, candidate string, includePatternMatcher *fileutils.PatternMatcher, excludePatternMatcher *fileutils.PatternMatcher, + maybeIncludedPath *includedPath, + parentDir *includedPath, ) (bool, error) { + var ( + m bool + matchInfo fileutils.MatchInfo + err error + ) if includePatternMatcher != nil { - m, err := includePatternMatcher.Matches(filepath.FromSlash(candidate)) + if parentDir != nil { + m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, parentDir.includeMatchInfo) + } else { + m, matchInfo, err = includePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{}) + } if err != nil { return false, errors.Wrap(err, "failed to match includepatterns") } + maybeIncludedPath.includeMatchInfo = matchInfo if !m { return false, nil } } if excludePatternMatcher != nil { - m, err := excludePatternMatcher.Matches(filepath.FromSlash(candidate)) + if parentDir != nil { + m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, parentDir.excludeMatchInfo) + } else { + m, matchInfo, err = excludePatternMatcher.MatchesUsingParentResults(candidate, fileutils.MatchInfo{}) + } if err != nil { return false, errors.Wrap(err, "failed to match excludepatterns") } + maybeIncludedPath.excludeMatchInfo = matchInfo if m { return false, nil } @@ -640,6 +738,82 @@ func shouldIncludePath( return true, nil } +func wildcardPrefix(root *iradix.Node, p string) (string, []byte, bool, error) { + // For consistency with what the copy implementation in fsutil + // does: split pattern into non-wildcard prefix and rest of + // pattern, then follow symlinks when resolving the non-wildcard + // prefix. + + d1, d2 := splitWildcards(p) + if d1 == "/" { + return "", nil, false, nil + } + + linksWalked := 0 + k, cr, err := getFollowLinksWalk(root, convertPathToKey([]byte(d1)), true, &linksWalked) + if err != nil { + return "", k, false, err + } + + if d2 != "" && cr != nil && cr.Type == CacheRecordTypeSymlink { + // getFollowLinks only handles symlinks in path + // components before the last component, so + // handle last component in d1 specially. + resolved := string(convertKeyToPath(k)) + for { + v, ok := root.Get(k) + + if !ok { + return d1, k, false, nil + } + if v.(*CacheRecord).Type != CacheRecordTypeSymlink { + break + } + + linksWalked++ + if linksWalked > 255 { + return "", k, false, errors.Errorf("too many links") + } + + resolved := cleanLink(resolved, v.(*CacheRecord).Linkname) + k = convertPathToKey([]byte(resolved)) + } + } + return d1, k, cr != nil, nil +} + +func splitWildcards(p string) (d1, d2 string) { + parts := strings.Split(path.Join(p), "/") + var p1, p2 []string + var found bool + for _, p := range parts { + if !found && containsWildcards(p) { + found = true + } + if p == "" { + p = "/" + } + if !found { + p1 = append(p1, p) + } else { + p2 = append(p2, p) + } + } + return filepath.Join(p1...), filepath.Join(p2...) +} + +func containsWildcards(name string) bool { + for i := 0; i < len(name); i++ { + ch := name[i] + if ch == '\\' { + i++ + } else if ch == '*' || ch == '?' || ch == '[' { + return true + } + } + return false +} + func (cc *cacheContext) checksumNoFollow(ctx context.Context, m *mount, p string) (*CacheRecord, error) { p = keyPath(p) @@ -721,7 +895,7 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir return nil, false, err } if cr == nil { - return nil, false, errors.Wrapf(errNotFound, "%q not found", convertKeyToPath(origk)) + return nil, false, errors.Wrapf(errNotFound, "%q", convertKeyToPath(origk)) } if cr.Digest != "" { return cr, false, nil @@ -732,7 +906,8 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir case CacheRecordTypeDir: h := sha256.New() next := append(k, 0) - iter := root.Seek(next) + iter := root.Iterator() + iter.SeekLowerBound(append(append([]byte{}, next...), 0)) subk := next ok := true for { @@ -750,7 +925,8 @@ func (cc *cacheContext) checksum(ctx context.Context, root *iradix.Node, txn *ir if subcr.Type == CacheRecordTypeDir { // skip subfiles next := append(subk, 0, 0xff) - iter = root.Seek(next) + iter = root.Iterator() + iter.SeekLowerBound(next) } subk, _, ok = iter.Next() } @@ -919,14 +1095,8 @@ func getFollowLinksWalk(root *iradix.Node, k []byte, follow bool, linksWalked *i if *linksWalked > 255 { return nil, nil, errors.Errorf("too many links") } - dirPath := path.Clean(string(convertKeyToPath(dir))) - if dirPath == "." || dirPath == "/" { - dirPath = "" - } - link := path.Clean(parent.Linkname) - if !path.IsAbs(link) { - link = path.Join("/", path.Join(path.Dir(dirPath), link)) - } + + link := cleanLink(string(convertKeyToPath(dir)), parent.Linkname) return getFollowLinksWalk(root, append(convertPathToKey([]byte(link)), file...), follow, linksWalked) } } @@ -938,6 +1108,18 @@ func getFollowLinksWalk(root *iradix.Node, k []byte, follow bool, linksWalked *i return k, nil, nil } +func cleanLink(dir, linkname string) string { + dirPath := path.Clean(dir) + if dirPath == "." || dirPath == "/" { + dirPath = "" + } + link := path.Clean(linkname) + if !path.IsAbs(link) { + return path.Join("/", path.Join(path.Dir(dirPath), link)) + } + return link +} + func prepareDigest(fp, p string, fi os.FileInfo) (digest.Digest, error) { h, err := NewFileHash(fp, fi) if err != nil { @@ -970,20 +1152,12 @@ func addParentToMap(d string, m map[string]struct{}) { addParentToMap(d, m) } -func ensureOriginMetadata(md *metadata.StorageItem) *metadata.StorageItem { - v := md.Get("cache.equalMutable") // TODO: const - if v == nil { - return md +func ensureOriginMetadata(md cache.RefMetadata) cache.RefMetadata { + em, ok := md.GetEqualMutable() + if !ok { + em = md } - var mutable string - if err := v.Unmarshal(&mutable); err != nil { - return md - } - si, ok := md.Storage().Get(mutable) - if ok { - return si - } - return md + return em } var pool32K = sync.Pool{ diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go index ccd2ebd2bb..99bba604f2 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package contenthash diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go index c6bfce9e61..a0e35665fe 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/filehash_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package contenthash diff --git a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go index a7f192d121..182c461184 100644 --- a/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go +++ b/vendor/github.com/moby/buildkit/cache/contenthash/tarsum.go @@ -47,7 +47,7 @@ func v1TarHeaderSelect(h *tar.Header) (orderedHeaders [][2]string) { } // Get extended attributes. - xAttrKeys := make([]string, len(h.PAXRecords)) + xAttrKeys := make([]string, 0, len(h.PAXRecords)) for k := range pax { if strings.HasPrefix(k, "SCHILY.xattr.") { k = strings.TrimPrefix(k, "SCHILY.xattr.") diff --git a/vendor/github.com/moby/buildkit/cache/converter.go b/vendor/github.com/moby/buildkit/cache/converter.go new file mode 100644 index 0000000000..a7e4df193a --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/converter.go @@ -0,0 +1,252 @@ +package cache + +import ( + "bufio" + "context" + "fmt" + "io" + "sync" + + cdcompression "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/images/converter" + "github.com/containerd/containerd/labels" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/compression" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +// needsConversion indicates whether a conversion is needed for the specified descriptor to +// be the compressionType. +func needsConversion(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (bool, error) { + mediaType := desc.MediaType + switch compressionType { + case compression.Uncompressed: + if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Uncompressed { + return false, nil + } + case compression.Gzip: + esgz, err := isEStargz(ctx, cs, desc.Digest) + if err != nil { + return false, err + } + if (!images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Gzip) && !esgz { + return false, nil + } + case compression.Zstd: + if !images.IsLayerType(mediaType) || compression.FromMediaType(mediaType) == compression.Zstd { + return false, nil + } + case compression.EStargz: + esgz, err := isEStargz(ctx, cs, desc.Digest) + if err != nil { + return false, err + } + if !images.IsLayerType(mediaType) || esgz { + return false, nil + } + default: + return false, fmt.Errorf("unknown compression type during conversion: %q", compressionType) + } + return true, nil +} + +// getConverter returns converter function according to the specified compression type. +// If no conversion is needed, this returns nil without error. +func getConverter(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, comp compression.Config) (converter.ConvertFunc, error) { + if needs, err := needsConversion(ctx, cs, desc, comp.Type); err != nil { + return nil, errors.Wrapf(err, "failed to determine conversion needs") + } else if !needs { + // No conversion. No need to return an error here. + return nil, nil + } + + c := conversion{target: comp} + + from := compression.FromMediaType(desc.MediaType) + switch from { + case compression.Uncompressed: + case compression.Gzip, compression.Zstd: + c.decompress = func(ctx context.Context, desc ocispecs.Descriptor) (r io.ReadCloser, err error) { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + esgz, err := isEStargz(ctx, cs, desc.Digest) + if err != nil { + return nil, err + } else if esgz { + r, err = decompressEStargz(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + } else { + r, err = cdcompression.DecompressStream(io.NewSectionReader(ra, 0, ra.Size())) + if err != nil { + return nil, err + } + } + return &readCloser{r, ra.Close}, nil + } + default: + return nil, errors.Errorf("unsupported source compression type %q from mediatype %q", from, desc.MediaType) + } + + switch comp.Type { + case compression.Uncompressed: + case compression.Gzip: + c.compress = gzipWriter(comp) + case compression.Zstd: + c.compress = zstdWriter(comp) + case compression.EStargz: + compressorFunc, finalize := compressEStargz(comp) + c.compress = func(w io.Writer) (io.WriteCloser, error) { + return compressorFunc(w, ocispecs.MediaTypeImageLayerGzip) + } + c.finalize = finalize + default: + return nil, errors.Errorf("unknown target compression type during conversion: %q", comp.Type) + } + + return (&c).convert, nil +} + +type conversion struct { + target compression.Config + decompress func(context.Context, ocispecs.Descriptor) (io.ReadCloser, error) + compress func(w io.Writer) (io.WriteCloser, error) + finalize func(context.Context, content.Store) (map[string]string, error) +} + +var bufioPool = sync.Pool{ + New: func() interface{} { + return nil + }, +} + +func (c *conversion) convert(ctx context.Context, cs content.Store, desc ocispecs.Descriptor) (*ocispecs.Descriptor, error) { + bklog.G(ctx).WithField("blob", desc).WithField("target", c.target).Debugf("converting blob to the target compression") + // prepare the source and destination + labelz := make(map[string]string) + ref := fmt.Sprintf("convert-from-%s-to-%s-%s", desc.Digest, c.target.Type.String(), identity.NewID()) + w, err := cs.Writer(ctx, content.WithRef(ref)) + if err != nil { + return nil, err + } + defer w.Close() + if err := w.Truncate(0); err != nil { // Old written data possibly remains + return nil, err + } + + var bufW *bufio.Writer + if pooledW := bufioPool.Get(); pooledW != nil { + bufW = pooledW.(*bufio.Writer) + bufW.Reset(w) + } else { + bufW = bufio.NewWriterSize(w, 128*1024) + } + defer bufioPool.Put(bufW) + var zw io.WriteCloser = &nopWriteCloser{bufW} + if c.compress != nil { + zw, err = c.compress(zw) + if err != nil { + return nil, err + } + } + zw = &onceWriteCloser{WriteCloser: zw} + defer zw.Close() + + // convert this layer + diffID := digest.Canonical.Digester() + var rdr io.Reader + if c.decompress == nil { + ra, err := cs.ReaderAt(ctx, desc) + if err != nil { + return nil, err + } + defer ra.Close() + rdr = io.NewSectionReader(ra, 0, ra.Size()) + } else { + rc, err := c.decompress(ctx, desc) + if err != nil { + return nil, err + } + defer rc.Close() + rdr = rc + } + if _, err := io.Copy(zw, io.TeeReader(rdr, diffID.Hash())); err != nil { + return nil, err + } + if err := zw.Close(); err != nil { // Flush the writer + return nil, err + } + if err := bufW.Flush(); err != nil { // Flush the buffer + return nil, errors.Wrap(err, "failed to flush diff during conversion") + } + labelz[labels.LabelUncompressed] = diffID.Digest().String() // update diffID label + if err = w.Commit(ctx, 0, "", content.WithLabels(labelz)); err != nil && !errdefs.IsAlreadyExists(err) { + return nil, err + } + if err := w.Close(); err != nil { + return nil, err + } + info, err := cs.Info(ctx, w.Digest()) + if err != nil { + return nil, err + } + + newDesc := desc + newDesc.MediaType = c.target.Type.DefaultMediaType() + newDesc.Digest = info.Digest + newDesc.Size = info.Size + newDesc.Annotations = map[string]string{labels.LabelUncompressed: diffID.Digest().String()} + if c.finalize != nil { + a, err := c.finalize(ctx, cs) + if err != nil { + return nil, errors.Wrapf(err, "failed finalize compression") + } + for k, v := range a { + newDesc.Annotations[k] = v + } + } + return &newDesc, nil +} + +type readCloser struct { + io.ReadCloser + closeFunc func() error +} + +func (rc *readCloser) Close() error { + err1 := rc.ReadCloser.Close() + err2 := rc.closeFunc() + if err1 != nil { + return errors.Wrapf(err1, "failed to close: %v", err2) + } + return err2 +} + +type nopWriteCloser struct { + io.Writer +} + +func (w *nopWriteCloser) Close() error { + return nil +} + +type onceWriteCloser struct { + io.WriteCloser + closeOnce sync.Once +} + +func (w *onceWriteCloser) Close() (err error) { + w.closeOnce.Do(func() { + err = w.WriteCloser.Close() + }) + return +} diff --git a/vendor/github.com/moby/buildkit/cache/estargz.go b/vendor/github.com/moby/buildkit/cache/estargz.go new file mode 100644 index 0000000000..f67d14925d --- /dev/null +++ b/vendor/github.com/moby/buildkit/cache/estargz.go @@ -0,0 +1,250 @@ +package cache + +import ( + "archive/tar" + "compress/gzip" + "context" + "fmt" + "io" + "strconv" + "sync" + + cdcompression "github.com/containerd/containerd/archive/compression" + "github.com/containerd/containerd/content" + "github.com/containerd/stargz-snapshotter/estargz" + "github.com/moby/buildkit/util/compression" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" +) + +var eStargzAnnotations = []string{estargz.TOCJSONDigestAnnotation, estargz.StoreUncompressedSizeAnnotation} + +// compressEStargz writes the passed blobs stream as an eStargz-compressed blob. +// finalize function finalizes the written blob metadata and returns all eStargz annotations. +func compressEStargz(comp compression.Config) (compressorFunc compressor, finalize func(context.Context, content.Store) (map[string]string, error)) { + var cInfo *compressionInfo + var writeErr error + var mu sync.Mutex + return func(dest io.Writer, requiredMediaType string) (io.WriteCloser, error) { + if compression.FromMediaType(requiredMediaType) != compression.Gzip { + return nil, fmt.Errorf("unsupported media type for estargz compressor %q", requiredMediaType) + } + done := make(chan struct{}) + pr, pw := io.Pipe() + go func() (retErr error) { + defer close(done) + defer func() { + if retErr != nil { + mu.Lock() + writeErr = retErr + mu.Unlock() + } + }() + + blobInfoW, bInfoCh := calculateBlobInfo() + defer blobInfoW.Close() + level := gzip.DefaultCompression + if comp.Level != nil { + level = *comp.Level + } + w := estargz.NewWriterLevel(io.MultiWriter(dest, blobInfoW), level) + + // Using lossless API here to make sure that decompressEStargz provides the exact + // same tar as the original. + // + // Note that we don't support eStragz compression for tar that contains a file named + // `stargz.index.json` because we cannot create eStargz in loseless way for such blob + // (we must overwrite stargz.index.json file). + if err := w.AppendTarLossLess(pr); err != nil { + pr.CloseWithError(err) + return err + } + tocDgst, err := w.Close() + if err != nil { + pr.CloseWithError(err) + return err + } + if err := blobInfoW.Close(); err != nil { + pr.CloseWithError(err) + return err + } + bInfo := <-bInfoCh + mu.Lock() + cInfo = &compressionInfo{bInfo, tocDgst} + mu.Unlock() + pr.Close() + return nil + }() + return &writeCloser{pw, func() error { + <-done // wait until the write completes + return nil + }}, nil + }, func(ctx context.Context, cs content.Store) (map[string]string, error) { + mu.Lock() + cInfo, writeErr := cInfo, writeErr + mu.Unlock() + if cInfo == nil { + if writeErr != nil { + return nil, errors.Wrapf(writeErr, "cannot finalize due to write error") + } + return nil, errors.Errorf("cannot finalize (reason unknown)") + } + + // Fill necessary labels + info, err := cs.Info(ctx, cInfo.compressedDigest) + if err != nil { + return nil, errors.Wrap(err, "failed to get info from content store") + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[containerdUncompressed] = cInfo.uncompressedDigest.String() + if _, err := cs.Update(ctx, info, "labels."+containerdUncompressed); err != nil { + return nil, err + } + + // Fill annotations + a := make(map[string]string) + a[estargz.TOCJSONDigestAnnotation] = cInfo.tocDigest.String() + a[estargz.StoreUncompressedSizeAnnotation] = fmt.Sprintf("%d", cInfo.uncompressedSize) + a[containerdUncompressed] = cInfo.uncompressedDigest.String() + return a, nil + } +} + +const estargzLabel = "buildkit.io/compression/estargz" + +// isEStargz returns true when the specified digest of content exists in +// the content store and it's eStargz. +func isEStargz(ctx context.Context, cs content.Store, dgst digest.Digest) (bool, error) { + info, err := cs.Info(ctx, dgst) + if err != nil { + return false, nil + } + if isEsgzStr, ok := info.Labels[estargzLabel]; ok { + if isEsgz, err := strconv.ParseBool(isEsgzStr); err == nil { + return isEsgz, nil + } + } + + res := func() bool { + r, err := cs.ReaderAt(ctx, ocispecs.Descriptor{Digest: dgst}) + if err != nil { + return false + } + defer r.Close() + sr := io.NewSectionReader(r, 0, r.Size()) + + // Does this have the footer? + tocOffset, _, err := estargz.OpenFooter(sr) + if err != nil { + return false + } + + // Is TOC the final entry? + decompressor := new(estargz.GzipDecompressor) + rr, err := decompressor.Reader(io.NewSectionReader(sr, tocOffset, sr.Size()-tocOffset)) + if err != nil { + return false + } + tr := tar.NewReader(rr) + h, err := tr.Next() + if err != nil { + return false + } + if h.Name != estargz.TOCTarName { + return false + } + if _, err = tr.Next(); err != io.EOF { // must be EOF + return false + } + + return true + }() + + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[estargzLabel] = strconv.FormatBool(res) // cache the result + if _, err := cs.Update(ctx, info, "labels."+estargzLabel); err != nil { + return false, err + } + + return res, nil +} + +func decompressEStargz(r *io.SectionReader) (io.ReadCloser, error) { + return estargz.Unpack(r, new(estargz.GzipDecompressor)) +} + +type writeCloser struct { + io.WriteCloser + closeFunc func() error +} + +func (wc *writeCloser) Close() error { + err1 := wc.WriteCloser.Close() + err2 := wc.closeFunc() + if err1 != nil { + return errors.Wrapf(err1, "failed to close: %v", err2) + } + return err2 +} + +type counter struct { + n int64 + mu sync.Mutex +} + +func (c *counter) Write(p []byte) (n int, err error) { + c.mu.Lock() + c.n += int64(len(p)) + c.mu.Unlock() + return len(p), nil +} + +func (c *counter) size() (n int64) { + c.mu.Lock() + n = c.n + c.mu.Unlock() + return +} + +type compressionInfo struct { + blobInfo + tocDigest digest.Digest +} + +type blobInfo struct { + compressedDigest digest.Digest + uncompressedDigest digest.Digest + uncompressedSize int64 +} + +func calculateBlobInfo() (io.WriteCloser, chan blobInfo) { + res := make(chan blobInfo) + pr, pw := io.Pipe() + go func() { + defer pr.Close() + c := new(counter) + dgstr := digest.Canonical.Digester() + diffID := digest.Canonical.Digester() + decompressR, err := cdcompression.DecompressStream(io.TeeReader(pr, dgstr.Hash())) + if err != nil { + pr.CloseWithError(err) + return + } + defer decompressR.Close() + if _, err := io.Copy(io.MultiWriter(c, diffID.Hash()), decompressR); err != nil { + pr.CloseWithError(err) + return + } + if err := decompressR.Close(); err != nil { + pr.CloseWithError(err) + return + } + res <- blobInfo{dgstr.Digest(), diffID.Digest(), c.size()} + }() + return pw, res +} diff --git a/vendor/github.com/moby/buildkit/cache/manager.go b/vendor/github.com/moby/buildkit/cache/manager.go index 854a8783ab..8f91d3c7a0 100644 --- a/vendor/github.com/moby/buildkit/cache/manager.go +++ b/vendor/github.com/moby/buildkit/cache/manager.go @@ -2,7 +2,9 @@ package cache import ( "context" + "fmt" "sort" + "strings" "sync" "time" @@ -18,10 +20,12 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" imagespecidentity "github.com/opencontainers/image-spec/identity" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" @@ -35,23 +39,27 @@ var ( type ManagerOpt struct { Snapshotter snapshot.Snapshotter - MetadataStore *metadata.Store ContentStore content.Store LeaseManager leases.Manager PruneRefChecker ExternalRefCheckerFunc GarbageCollect func(ctx context.Context) (gc.Stats, error) Applier diff.Applier Differ diff.Comparer + MetadataStore *metadata.Store + MountPoolRoot string } type Accessor interface { - GetByBlob(ctx context.Context, desc ocispec.Descriptor, parent ImmutableRef, opts ...RefOption) (ImmutableRef, error) - Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) + MetadataStore + + GetByBlob(ctx context.Context, desc ocispecs.Descriptor, parent ImmutableRef, opts ...RefOption) (ImmutableRef, error) + Get(ctx context.Context, id string, pg progress.Controller, opts ...RefOption) (ImmutableRef, error) New(ctx context.Context, parent ImmutableRef, s session.Group, opts ...RefOption) (MutableRef, error) GetMutable(ctx context.Context, id string, opts ...RefOption) (MutableRef, error) // Rebase? IdentityMapping() *idtools.IdentityMapping - Metadata(string) *metadata.StorageItem + Merge(ctx context.Context, parents []ImmutableRef, pg progress.Controller, opts ...RefOption) (ImmutableRef, error) + Diff(ctx context.Context, lower, upper ImmutableRef, pg progress.Controller, opts ...RefOption) (ImmutableRef, error) } type Controller interface { @@ -72,10 +80,18 @@ type ExternalRefChecker interface { } type cacheManager struct { - records map[string]*cacheRecord - mu sync.Mutex - ManagerOpt - md *metadata.Store + records map[string]*cacheRecord + mu sync.Mutex + Snapshotter snapshot.MergeSnapshotter + ContentStore content.Store + LeaseManager leases.Manager + PruneRefChecker ExternalRefCheckerFunc + GarbageCollect func(ctx context.Context) (gc.Stats, error) + Applier diff.Applier + Differ diff.Comparer + MetadataStore *metadata.Store + + mountPool sharableMountPool muPrune sync.Mutex // make sure parallel prune is not allowed so there will not be inconsistent results unlazyG flightcontrol.Group @@ -83,21 +99,33 @@ type cacheManager struct { func NewManager(opt ManagerOpt) (Manager, error) { cm := &cacheManager{ - ManagerOpt: opt, - md: opt.MetadataStore, - records: make(map[string]*cacheRecord), + Snapshotter: snapshot.NewMergeSnapshotter(context.TODO(), opt.Snapshotter, opt.LeaseManager), + ContentStore: opt.ContentStore, + LeaseManager: opt.LeaseManager, + PruneRefChecker: opt.PruneRefChecker, + GarbageCollect: opt.GarbageCollect, + Applier: opt.Applier, + Differ: opt.Differ, + MetadataStore: opt.MetadataStore, + records: make(map[string]*cacheRecord), } if err := cm.init(context.TODO()); err != nil { return nil, err } + p, err := newSharableMountPool(opt.MountPoolRoot) + if err != nil { + return nil, err + } + cm.mountPool = p + // cm.scheduleGC(5 * time.Minute) return cm, nil } -func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, parent ImmutableRef, opts ...RefOption) (ir ImmutableRef, rerr error) { +func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispecs.Descriptor, parent ImmutableRef, opts ...RefOption) (ir ImmutableRef, rerr error) { diffID, err := diffIDFromDescriptor(desc) if err != nil { return nil, err @@ -108,31 +136,31 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, descHandlers := descHandlersOf(opts...) if desc.Digest != "" && (descHandlers == nil || descHandlers[desc.Digest] == nil) { if _, err := cm.ContentStore.Info(ctx, desc.Digest); errors.Is(err, errdefs.ErrNotFound) { - return nil, NeedsRemoteProvidersError([]digest.Digest{desc.Digest}) + return nil, NeedsRemoteProviderError([]digest.Digest{desc.Digest}) } else if err != nil { return nil, err } } var p *immutableRef - var parentID string if parent != nil { - pInfo := parent.Info() - if pInfo.ChainID == "" || pInfo.BlobChainID == "" { - return nil, errors.Errorf("failed to get ref by blob on non-addressable parent") - } - chainID = imagespecidentity.ChainID([]digest.Digest{pInfo.ChainID, chainID}) - blobChainID = imagespecidentity.ChainID([]digest.Digest{pInfo.BlobChainID, blobChainID}) - - p2, err := cm.Get(ctx, parent.ID(), NoUpdateLastUsed, descHandlers) + p2, err := cm.Get(ctx, parent.ID(), nil, NoUpdateLastUsed, descHandlers) if err != nil { return nil, err } - if err := p2.Finalize(ctx, true); err != nil { + p = p2.(*immutableRef) + + if err := p.Finalize(ctx); err != nil { + p.Release(context.TODO()) return nil, err } - parentID = p2.ID() - p = p2.(*immutableRef) + + if p.getChainID() == "" || p.getBlobChainID() == "" { + p.Release(context.TODO()) + return nil, errors.Errorf("failed to get ref by blob on non-addressable parent") + } + chainID = imagespecidentity.ChainID([]digest.Digest{p.getChainID(), chainID}) + blobChainID = imagespecidentity.ChainID([]digest.Digest{p.getBlobChainID(), blobChainID}) } releaseParent := false @@ -145,15 +173,22 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, cm.mu.Lock() defer cm.mu.Unlock() - sis, err := cm.MetadataStore.Search("blobchainid:" + blobChainID.String()) + sis, err := cm.searchBlobchain(ctx, blobChainID) if err != nil { return nil, err } for _, si := range sis { - ref, err := cm.get(ctx, si.ID(), opts...) - if err != nil && !IsNotFound(err) { - return nil, errors.Wrapf(err, "failed to get record %s by blobchainid", sis[0].ID()) + ref, err := cm.get(ctx, si.ID(), nil, opts...) + if err != nil { + if errors.As(err, &NeedsRemoteProviderError{}) { + // This shouldn't happen and indicates that blobchain IDs are being set incorrectly, + // but if it does happen it's not fatal as we can just not try to re-use by blobchainID. + // Log the error but continue. + bklog.G(ctx).Errorf("missing providers for ref with equivalent blobchain ID %s", blobChainID) + } else if !IsNotFound(err) { + return nil, errors.Wrapf(err, "failed to get record %s by blobchainid", sis[0].ID()) + } } if ref == nil { continue @@ -161,22 +196,23 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, if p != nil { releaseParent = true } - if err := setImageRefMetadata(ref, opts...); err != nil { + if err := setImageRefMetadata(ref.cacheMetadata, opts...); err != nil { return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", ref.ID()) } return ref, nil } - sis, err = cm.MetadataStore.Search("chainid:" + chainID.String()) + sis, err = cm.searchChain(ctx, chainID) if err != nil { return nil, err } - var link ImmutableRef + var link *immutableRef for _, si := range sis { - ref, err := cm.get(ctx, si.ID(), opts...) - if err != nil && !IsNotFound(err) { - return nil, errors.Wrapf(err, "failed to get record %s by chainid", sis[0].ID()) + ref, err := cm.get(ctx, si.ID(), nil, opts...) + // if the error was NotFound or NeedsRemoteProvider, we can't re-use the snapshot from the blob so just skip it + if err != nil && !IsNotFound(err) && !errors.As(err, &NeedsRemoteProviderError{}) { + return nil, errors.Wrapf(err, "failed to get record %s by chainid", si.ID()) } if ref != nil { link = ref @@ -188,12 +224,12 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, snapshotID := chainID.String() blobOnly := true if link != nil { - snapshotID = getSnapshotID(link.Metadata()) - blobOnly = getBlobOnly(link.Metadata()) + snapshotID = link.getSnapshotID() + blobOnly = link.getBlobOnly() go link.Release(context.TODO()) } - l, err := cm.ManagerOpt.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l, err := cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { l.ID = id l.Labels = map[string]string{ "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), @@ -206,7 +242,7 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, defer func() { if rerr != nil { - if err := cm.ManagerOpt.LeaseManager.Delete(context.TODO(), leases.Lease{ + if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ ID: l.ID, }); err != nil { logrus.Errorf("failed to remove lease: %+v", err) @@ -214,15 +250,15 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, } }() - if err := cm.ManagerOpt.LeaseManager.AddResource(ctx, l, leases.Resource{ + if err := cm.LeaseManager.AddResource(ctx, l, leases.Resource{ ID: snapshotID, - Type: "snapshots/" + cm.ManagerOpt.Snapshotter.Name(), - }); err != nil { + Type: "snapshots/" + cm.Snapshotter.Name(), + }); err != nil && !errdefs.IsAlreadyExists(err) { return nil, errors.Wrapf(err, "failed to add snapshot %s to lease", id) } if desc.Digest != "" { - if err := cm.ManagerOpt.LeaseManager.AddResource(ctx, leases.Lease{ID: id}, leases.Resource{ + if err := cm.LeaseManager.AddResource(ctx, leases.Lease{ID: id}, leases.Resource{ ID: desc.Digest.String(), Type: "content", }); err != nil { @@ -230,47 +266,48 @@ func (cm *cacheManager) GetByBlob(ctx context.Context, desc ocispec.Descriptor, } } - md, _ := cm.md.Get(id) + md, _ := cm.getMetadata(id) rec := &cacheRecord{ - mu: &sync.Mutex{}, - cm: cm, - refs: make(map[ref]struct{}), - parent: p, - md: md, + mu: &sync.Mutex{}, + cm: cm, + refs: make(map[ref]struct{}), + parentRefs: parentRefs{layerParent: p}, + cacheMetadata: md, } - if err := initializeMetadata(rec, parentID, opts...); err != nil { + if err := initializeMetadata(rec.cacheMetadata, rec.parentRefs, opts...); err != nil { return nil, err } - if err := setImageRefMetadata(rec, opts...); err != nil { + if err := setImageRefMetadata(rec.cacheMetadata, opts...); err != nil { return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", rec.ID()) } - queueDiffID(rec.md, diffID.String()) - queueBlob(rec.md, desc.Digest.String()) - queueChainID(rec.md, chainID.String()) - queueBlobChainID(rec.md, blobChainID.String()) - queueSnapshotID(rec.md, snapshotID) - queueBlobOnly(rec.md, blobOnly) - queueMediaType(rec.md, desc.MediaType) - queueBlobSize(rec.md, desc.Size) - queueCommitted(rec.md) + rec.queueDiffID(diffID) + rec.queueBlob(desc.Digest) + rec.queueChainID(chainID) + rec.queueBlobChainID(blobChainID) + rec.queueSnapshotID(snapshotID) + rec.queueBlobOnly(blobOnly) + rec.queueMediaType(desc.MediaType) + rec.queueBlobSize(desc.Size) + rec.appendURLs(desc.URLs) + rec.queueCommitted(true) - if err := rec.md.Commit(); err != nil { + if err := rec.commitMetadata(); err != nil { return nil, err } cm.records[id] = rec - return rec.ref(true, descHandlers), nil + return rec.ref(true, descHandlers, nil), nil } // init loads all snapshots from metadata state and tries to load the records // from the snapshotter. If snaphot can't be found, metadata is deleted as well. func (cm *cacheManager) init(ctx context.Context) error { - items, err := cm.md.All() + items, err := cm.MetadataStore.All() if err != nil { return err } @@ -278,7 +315,7 @@ func (cm *cacheManager) init(ctx context.Context) error { for _, si := range items { if _, err := cm.getRecord(ctx, si.ID()); err != nil { logrus.Debugf("could not load snapshot %s: %+v", si.ID(), err) - cm.md.Clear(si.ID()) + cm.MetadataStore.Clear(si.ID()) cm.LeaseManager.Delete(ctx, leases.Lease{ID: si.ID()}) } } @@ -294,28 +331,18 @@ func (cm *cacheManager) IdentityMapping() *idtools.IdentityMapping { // method should be called after Close. func (cm *cacheManager) Close() error { // TODO: allocate internal context and cancel it here - return cm.md.Close() + return cm.MetadataStore.Close() } // Get returns an immutable snapshot reference for ID -func (cm *cacheManager) Get(ctx context.Context, id string, opts ...RefOption) (ImmutableRef, error) { +func (cm *cacheManager) Get(ctx context.Context, id string, pg progress.Controller, opts ...RefOption) (ImmutableRef, error) { cm.mu.Lock() defer cm.mu.Unlock() - return cm.get(ctx, id, opts...) -} - -func (cm *cacheManager) Metadata(id string) *metadata.StorageItem { - cm.mu.Lock() - defer cm.mu.Unlock() - r, ok := cm.records[id] - if !ok { - return nil - } - return r.Metadata() + return cm.get(ctx, id, pg, opts...) } // get requires manager lock to be taken -func (cm *cacheManager) get(ctx context.Context, id string, opts ...RefOption) (*immutableRef, error) { +func (cm *cacheManager) get(ctx context.Context, id string, pg progress.Controller, opts ...RefOption) (*immutableRef, error) { rec, err := cm.getRecord(ctx, id, opts...) if err != nil { return nil, err @@ -337,31 +364,29 @@ func (cm *cacheManager) get(ctx context.Context, id string, opts ...RefOption) ( return nil, errors.Wrapf(ErrLocked, "%s is locked", id) } if rec.equalImmutable != nil { - return rec.equalImmutable.ref(triggerUpdate, descHandlers), nil + return rec.equalImmutable.ref(triggerUpdate, descHandlers, pg), nil } return rec.mref(triggerUpdate, descHandlers).commit(ctx) } - return rec.ref(triggerUpdate, descHandlers), nil + return rec.ref(triggerUpdate, descHandlers, pg), nil } // getRecord returns record for id. Requires manager lock. func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOption) (cr *cacheRecord, retErr error) { checkLazyProviders := func(rec *cacheRecord) error { - missing := NeedsRemoteProvidersError(nil) + missing := NeedsRemoteProviderError(nil) dhs := descHandlersOf(opts...) - for { - blob := digest.Digest(getBlob(rec.md)) - if isLazy, err := rec.isLazy(ctx); err != nil { + if err := rec.walkUniqueAncestors(func(cr *cacheRecord) error { + blob := cr.getBlob() + if isLazy, err := cr.isLazy(ctx); err != nil { return err } else if isLazy && dhs[blob] == nil { missing = append(missing, blob) } - - if rec.parent == nil { - break - } - rec = rec.parent.cacheRecord + return nil + }); err != nil { + return err } if len(missing) > 0 { return missing @@ -379,76 +404,87 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt return rec, nil } - md, ok := cm.md.Get(id) + md, ok := cm.getMetadata(id) if !ok { - return nil, errors.Wrapf(errNotFound, "%s not found", id) + return nil, errors.Wrap(errNotFound, id) } - if mutableID := getEqualMutable(md); mutableID != "" { + + parents, err := cm.parentsOf(ctx, md, opts...) + if err != nil { + return nil, errors.Wrapf(err, "failed to get parents") + } + defer func() { + if retErr != nil { + parents.release(context.TODO()) + } + }() + + if mutableID := md.getEqualMutable(); mutableID != "" { mutable, err := cm.getRecord(ctx, mutableID) - if err != nil { - // check loading mutable deleted record from disk - if IsNotFound(err) { - cm.md.Clear(id) + if err == nil { + rec := &cacheRecord{ + mu: &sync.Mutex{}, + cm: cm, + refs: make(map[ref]struct{}), + parentRefs: parents, + cacheMetadata: md, + equalMutable: &mutableRef{cacheRecord: mutable}, } + mutable.equalImmutable = &immutableRef{cacheRecord: rec} + cm.records[id] = rec + return rec, nil + } else if IsNotFound(err) { + // The equal mutable for this ref is not found, check to see if our snapshot exists + if _, statErr := cm.Snapshotter.Stat(ctx, md.getSnapshotID()); statErr != nil { + // this ref's snapshot also doesn't exist, just remove this record + cm.MetadataStore.Clear(id) + return nil, errors.Wrap(errNotFound, id) + } + // Our snapshot exists, so there may have been a crash while finalizing this ref. + // Clear the equal mutable field and continue using this ref. + md.clearEqualMutable() + md.commitMetadata() + } else { return nil, err } - - // parent refs are possibly lazy so keep it hold the description handlers. - var dhs DescHandlers - if mutable.parent != nil { - dhs = mutable.parent.descHandlers - } - rec := &cacheRecord{ - mu: &sync.Mutex{}, - cm: cm, - refs: make(map[ref]struct{}), - parent: mutable.parentRef(false, dhs), - md: md, - equalMutable: &mutableRef{cacheRecord: mutable}, - } - mutable.equalImmutable = &immutableRef{cacheRecord: rec} - cm.records[id] = rec - return rec, nil - } - - var parent *immutableRef - if parentID := getParent(md); parentID != "" { - var err error - parent, err = cm.get(ctx, parentID, append(opts, NoUpdateLastUsed)...) - if err != nil { - return nil, err - } - defer func() { - if retErr != nil { - parent.mu.Lock() - parent.release(context.TODO()) - parent.mu.Unlock() - } - }() } rec := &cacheRecord{ - mu: &sync.Mutex{}, - mutable: !getCommitted(md), - cm: cm, - refs: make(map[ref]struct{}), - parent: parent, - md: md, + mu: &sync.Mutex{}, + mutable: !md.getCommitted(), + cm: cm, + refs: make(map[ref]struct{}), + parentRefs: parents, + cacheMetadata: md, } // the record was deleted but we crashed before data on disk was removed - if getDeleted(md) { + if md.getDeleted() { if err := rec.remove(ctx, true); err != nil { return nil, err } return nil, errors.Wrapf(errNotFound, "failed to get deleted record %s", id) } - if err := initializeMetadata(rec, getParent(md), opts...); err != nil { + if rec.mutable { + // If the record is mutable, then the snapshot must exist + if _, err := cm.Snapshotter.Stat(ctx, rec.ID()); err != nil { + if !errdefs.IsNotFound(err) { + return nil, errors.Wrap(err, "failed to check mutable ref snapshot") + } + // the snapshot doesn't exist, clear this record + if err := rec.remove(ctx, true); err != nil { + return nil, errors.Wrap(err, "failed to remove mutable rec with missing snapshot") + } + return nil, errors.Wrap(errNotFound, rec.ID()) + } + } + + if err := initializeMetadata(rec.cacheMetadata, rec.parentRefs, opts...); err != nil { return nil, err } - if err := setImageRefMetadata(rec, opts...); err != nil { + if err := setImageRefMetadata(rec.cacheMetadata, opts...); err != nil { return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", rec.ID()) } @@ -459,30 +495,67 @@ func (cm *cacheManager) getRecord(ctx context.Context, id string, opts ...RefOpt return rec, nil } +func (cm *cacheManager) parentsOf(ctx context.Context, md *cacheMetadata, opts ...RefOption) (ps parentRefs, rerr error) { + if parentID := md.getParent(); parentID != "" { + p, err := cm.get(ctx, parentID, nil, append(opts, NoUpdateLastUsed)) + if err != nil { + return ps, err + } + ps.layerParent = p + return ps, nil + } + for _, parentID := range md.getMergeParents() { + p, err := cm.get(ctx, parentID, nil, append(opts, NoUpdateLastUsed)) + if err != nil { + return ps, err + } + ps.mergeParents = append(ps.mergeParents, p) + } + if lowerParentID := md.getLowerDiffParent(); lowerParentID != "" { + p, err := cm.get(ctx, lowerParentID, nil, append(opts, NoUpdateLastUsed)) + if err != nil { + return ps, err + } + if ps.diffParents == nil { + ps.diffParents = &diffParents{} + } + ps.diffParents.lower = p + } + if upperParentID := md.getUpperDiffParent(); upperParentID != "" { + p, err := cm.get(ctx, upperParentID, nil, append(opts, NoUpdateLastUsed)) + if err != nil { + return ps, err + } + if ps.diffParents == nil { + ps.diffParents = &diffParents{} + } + ps.diffParents.upper = p + } + return ps, nil +} + func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Group, opts ...RefOption) (mr MutableRef, err error) { id := identity.NewID() var parent *immutableRef - var parentID string var parentSnapshotID string if s != nil { if _, ok := s.(*immutableRef); ok { parent = s.Clone().(*immutableRef) } else { - p, err := cm.Get(ctx, s.ID(), append(opts, NoUpdateLastUsed)...) + p, err := cm.Get(ctx, s.ID(), nil, append(opts, NoUpdateLastUsed)...) if err != nil { return nil, err } parent = p.(*immutableRef) } - if err := parent.Finalize(ctx, true); err != nil { + if err := parent.Finalize(ctx); err != nil { return nil, err } if err := parent.Extract(ctx, sess); err != nil { return nil, err } - parentSnapshotID = getSnapshotID(parent.md) - parentID = parent.ID() + parentSnapshotID = parent.getSnapshotID() } defer func() { @@ -491,7 +564,7 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr } }() - l, err := cm.ManagerOpt.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l, err := cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { l.ID = id l.Labels = map[string]string{ "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), @@ -504,7 +577,7 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr defer func() { if err != nil { - if err := cm.ManagerOpt.LeaseManager.Delete(context.TODO(), leases.Lease{ + if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ ID: l.ID, }); err != nil { logrus.Errorf("failed to remove lease: %+v", err) @@ -512,48 +585,50 @@ func (cm *cacheManager) New(ctx context.Context, s ImmutableRef, sess session.Gr } }() - if err := cm.ManagerOpt.LeaseManager.AddResource(ctx, l, leases.Resource{ - ID: id, - Type: "snapshots/" + cm.ManagerOpt.Snapshotter.Name(), - }); err != nil { - return nil, errors.Wrapf(err, "failed to add snapshot %s to lease", id) + snapshotID := id + if err := cm.LeaseManager.AddResource(ctx, l, leases.Resource{ + ID: snapshotID, + Type: "snapshots/" + cm.Snapshotter.Name(), + }); err != nil && !errdefs.IsAlreadyExists(err) { + return nil, errors.Wrapf(err, "failed to add snapshot %s to lease", snapshotID) } if cm.Snapshotter.Name() == "stargz" && parent != nil { if rerr := parent.withRemoteSnapshotLabelsStargzMode(ctx, sess, func() { - err = cm.Snapshotter.Prepare(ctx, id, parentSnapshotID) + err = cm.Snapshotter.Prepare(ctx, snapshotID, parentSnapshotID) }); rerr != nil { return nil, rerr } } else { - err = cm.Snapshotter.Prepare(ctx, id, parentSnapshotID) + err = cm.Snapshotter.Prepare(ctx, snapshotID, parentSnapshotID) } if err != nil { - return nil, errors.Wrapf(err, "failed to prepare %s", id) - } - - md, _ := cm.md.Get(id) - - rec := &cacheRecord{ - mu: &sync.Mutex{}, - mutable: true, - cm: cm, - refs: make(map[ref]struct{}), - parent: parent, - md: md, - } - - if err := initializeMetadata(rec, parentID, opts...); err != nil { - return nil, err - } - - if err := setImageRefMetadata(rec, opts...); err != nil { - return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", rec.ID()) + return nil, errors.Wrapf(err, "failed to prepare %v as %s", parentSnapshotID, snapshotID) } cm.mu.Lock() defer cm.mu.Unlock() + md, _ := cm.getMetadata(id) + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + mutable: true, + cm: cm, + refs: make(map[ref]struct{}), + parentRefs: parentRefs{layerParent: parent}, + cacheMetadata: md, + } + + opts = append(opts, withSnapshotID(snapshotID)) + if err := initializeMetadata(rec.cacheMetadata, rec.parentRefs, opts...); err != nil { + return nil, err + } + + if err := setImageRefMetadata(rec.cacheMetadata, opts...); err != nil { + return nil, errors.Wrapf(err, "failed to append image ref metadata to ref %s", rec.ID()) + } + cm.records[id] = rec // TODO: save to db // parent refs are possibly lazy so keep it hold the description handlers. @@ -597,6 +672,312 @@ func (cm *cacheManager) GetMutable(ctx context.Context, id string, opts ...RefOp return rec.mref(true, descHandlersOf(opts...)), nil } +func (cm *cacheManager) Merge(ctx context.Context, inputParents []ImmutableRef, pg progress.Controller, opts ...RefOption) (ir ImmutableRef, rerr error) { + // TODO:(sipsma) optimize merge further by + // * Removing repeated occurrences of input layers (only leaving the uppermost) + // * Reusing existing merges that are equivalent to this one + // * Reusing existing merges that can be used as a base for this one + // * Calculating diffs only once (across both merges and during computeBlobChain). Save diff metadata so it can be reapplied. + // These optimizations may make sense here in cache, in the snapshotter or both. + // Be sure that any optimizations handle existing pre-optimization refs correctly. + + parents := parentRefs{mergeParents: make([]*immutableRef, 0, len(inputParents))} + dhs := make(map[digest.Digest]*DescHandler) + defer func() { + if rerr != nil { + parents.release(context.TODO()) + } + }() + for _, inputParent := range inputParents { + if inputParent == nil { + continue + } + var parent *immutableRef + if p, ok := inputParent.(*immutableRef); ok { + parent = p + } else { + // inputParent implements ImmutableRef but isn't our internal struct, get an instance of the internal struct + // by calling Get on its ID. + p, err := cm.Get(ctx, inputParent.ID(), nil, append(opts, NoUpdateLastUsed)...) + if err != nil { + return nil, err + } + parent = p.(*immutableRef) + defer parent.Release(context.TODO()) + } + // On success, cloned parents will be not be released and will be owned by the returned ref + switch parent.kind() { + case Merge: + // if parent is itself a merge, flatten it out by just setting our parents directly to its parents + for _, grandparent := range parent.mergeParents { + parents.mergeParents = append(parents.mergeParents, grandparent.clone()) + } + default: + parents.mergeParents = append(parents.mergeParents, parent.clone()) + } + for dgst, handler := range parent.descHandlers { + dhs[dgst] = handler + } + } + + // On success, createMergeRef takes ownership of parents + mergeRef, err := cm.createMergeRef(ctx, parents, dhs, pg, opts...) + if err != nil { + return nil, err + } + return mergeRef, nil +} + +func (cm *cacheManager) createMergeRef(ctx context.Context, parents parentRefs, dhs DescHandlers, pg progress.Controller, opts ...RefOption) (ir *immutableRef, rerr error) { + if len(parents.mergeParents) == 0 { + // merge of nothing is nothing + return nil, nil + } + if len(parents.mergeParents) == 1 { + // merge of 1 thing is that thing + parents.mergeParents[0].progress = pg + return parents.mergeParents[0], nil + } + + for _, parent := range parents.mergeParents { + if err := parent.Finalize(ctx); err != nil { + return nil, errors.Wrapf(err, "failed to finalize parent during merge") + } + } + + cm.mu.Lock() + defer cm.mu.Unlock() + + // Build the new ref + id := identity.NewID() + md, _ := cm.getMetadata(id) + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + mutable: false, + cm: cm, + cacheMetadata: md, + parentRefs: parents, + refs: make(map[ref]struct{}), + } + + if err := initializeMetadata(rec.cacheMetadata, rec.parentRefs, opts...); err != nil { + return nil, err + } + + snapshotID := id + l, err := cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = id + l.Labels = map[string]string{ + "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), + } + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "failed to create lease") + } + defer func() { + if rerr != nil { + if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ + ID: l.ID, + }); err != nil { + bklog.G(ctx).Errorf("failed to remove lease: %+v", err) + } + } + }() + + if err := cm.LeaseManager.AddResource(ctx, leases.Lease{ID: id}, leases.Resource{ + ID: snapshotID, + Type: "snapshots/" + cm.Snapshotter.Name(), + }); err != nil { + return nil, err + } + + rec.queueSnapshotID(snapshotID) + + if err := rec.commitMetadata(); err != nil { + return nil, err + } + + cm.records[id] = rec + + return rec.ref(true, dhs, pg), nil +} + +func (cm *cacheManager) Diff(ctx context.Context, lower, upper ImmutableRef, pg progress.Controller, opts ...RefOption) (ir ImmutableRef, rerr error) { + if lower == nil { + return nil, errors.New("lower ref for diff cannot be nil") + } + + var dps diffParents + parents := parentRefs{diffParents: &dps} + dhs := make(map[digest.Digest]*DescHandler) + defer func() { + if rerr != nil { + parents.release(context.TODO()) + } + }() + for i, inputParent := range []ImmutableRef{lower, upper} { + if inputParent == nil { + continue + } + var parent *immutableRef + if p, ok := inputParent.(*immutableRef); ok { + parent = p + } else { + // inputParent implements ImmutableRef but isn't our internal struct, get an instance of the internal struct + // by calling Get on its ID. + p, err := cm.Get(ctx, inputParent.ID(), nil, append(opts, NoUpdateLastUsed)...) + if err != nil { + return nil, err + } + parent = p.(*immutableRef) + defer parent.Release(context.TODO()) + } + // On success, cloned parents will not be released and will be owned by the returned ref + if i == 0 { + dps.lower = parent.clone() + } else { + dps.upper = parent.clone() + } + for dgst, handler := range parent.descHandlers { + dhs[dgst] = handler + } + } + + // Check to see if lower is an ancestor of upper. If so, define the diff as a merge + // of the layers separating the two. This can result in a different diff than just + // running the differ directly on lower and upper, but this is chosen as a default + // behavior in order to maximize layer re-use in the default case. We may add an + // option for controlling this behavior in the future if it's needed. + if dps.upper != nil { + lowerLayers := dps.lower.layerChain() + upperLayers := dps.upper.layerChain() + var lowerIsAncestor bool + // when upper is only 1 layer different than lower, we can skip this as we + // won't need a merge in order to get optimal behavior. + if len(upperLayers) > len(lowerLayers)+1 { + lowerIsAncestor = true + for i, lowerLayer := range lowerLayers { + if lowerLayer.ID() != upperLayers[i].ID() { + lowerIsAncestor = false + break + } + } + } + if lowerIsAncestor { + mergeParents := parentRefs{mergeParents: make([]*immutableRef, len(upperLayers)-len(lowerLayers))} + defer func() { + if rerr != nil { + mergeParents.release(context.TODO()) + } + }() + for i := len(lowerLayers); i < len(upperLayers); i++ { + subUpper := upperLayers[i] + subLower := subUpper.layerParent + // On success, cloned refs will not be released and will be owned by the returned ref + if subLower == nil { + mergeParents.mergeParents[i-len(lowerLayers)] = subUpper.clone() + } else { + subParents := parentRefs{diffParents: &diffParents{lower: subLower.clone(), upper: subUpper.clone()}} + diffRef, err := cm.createDiffRef(ctx, subParents, subUpper.descHandlers, pg, + WithDescription(fmt.Sprintf("diff %q -> %q", subLower.ID(), subUpper.ID()))) + if err != nil { + subParents.release(context.TODO()) + return nil, err + } + mergeParents.mergeParents[i-len(lowerLayers)] = diffRef + } + } + // On success, createMergeRef takes ownership of mergeParents + mergeRef, err := cm.createMergeRef(ctx, mergeParents, dhs, pg) + if err != nil { + return nil, err + } + parents.release(context.TODO()) + return mergeRef, nil + } + } + + // On success, createDiffRef takes ownership of parents + diffRef, err := cm.createDiffRef(ctx, parents, dhs, pg, opts...) + if err != nil { + return nil, err + } + return diffRef, nil +} + +func (cm *cacheManager) createDiffRef(ctx context.Context, parents parentRefs, dhs DescHandlers, pg progress.Controller, opts ...RefOption) (ir *immutableRef, rerr error) { + dps := parents.diffParents + if err := dps.lower.Finalize(ctx); err != nil { + return nil, errors.Wrapf(err, "failed to finalize lower parent during diff") + } + if dps.upper != nil { + if err := dps.upper.Finalize(ctx); err != nil { + return nil, errors.Wrapf(err, "failed to finalize upper parent during diff") + } + } + + id := identity.NewID() + + snapshotID := id + + l, err := cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = id + l.Labels = map[string]string{ + "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), + } + return nil + }) + if err != nil { + return nil, errors.Wrap(err, "failed to create lease") + } + defer func() { + if rerr != nil { + if err := cm.LeaseManager.Delete(context.TODO(), leases.Lease{ + ID: l.ID, + }); err != nil { + bklog.G(ctx).Errorf("failed to remove lease: %+v", err) + } + } + }() + + if err := cm.LeaseManager.AddResource(ctx, leases.Lease{ID: id}, leases.Resource{ + ID: snapshotID, + Type: "snapshots/" + cm.Snapshotter.Name(), + }); err != nil { + return nil, err + } + + cm.mu.Lock() + defer cm.mu.Unlock() + + // Build the new ref + md, _ := cm.getMetadata(id) + + rec := &cacheRecord{ + mu: &sync.Mutex{}, + mutable: false, + cm: cm, + cacheMetadata: md, + parentRefs: parents, + refs: make(map[ref]struct{}), + } + + if err := initializeMetadata(rec.cacheMetadata, rec.parentRefs, opts...); err != nil { + return nil, err + } + + rec.queueSnapshotID(snapshotID) + if err := rec.commitMetadata(); err != nil { + return nil, err + } + + cm.records[id] = rec + + return rec.ref(true, dhs, pg), nil +} + func (cm *cacheManager) Prune(ctx context.Context, ch chan client.UsageInfo, opts ...client.PruneInfo) error { cm.muPrune.Lock() @@ -689,14 +1070,14 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt } if len(cr.refs) == 0 { - recordType := GetRecordType(cr) + recordType := cr.GetRecordType() if recordType == "" { recordType = client.UsageRecordTypeRegular } shared := false if opt.checkShared != nil { - shared = opt.checkShared.Exists(cr.ID(), cr.parentChain()) + shared = opt.checkShared.Exists(cr.ID(), cr.layerDigestChain()) } if !opt.all { @@ -713,7 +1094,7 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt Shared: shared, } - usageCount, lastUsedAt := getLastUsed(cr.md) + usageCount, lastUsedAt := cr.getLastUsed() c.LastUsedAt = lastUsedAt c.UsageCount = usageCount @@ -734,7 +1115,12 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt cr.dead = true // mark metadata as deleted in case we crash before cleanup finished - if err := setDeleted(cr.md); err != nil { + if err := cr.queueDeleted(); err != nil { + cr.mu.Unlock() + cm.mu.Unlock() + return err + } + if err := cr.commitMetadata(); err != nil { cr.mu.Unlock() cm.mu.Unlock() return err @@ -755,7 +1141,10 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt // only remove single record at a time if i == 0 { cr.dead = true - err = setDeleted(cr.md) + err = cr.queueDeleted() + if err == nil { + err = cr.commitMetadata() + } } cr.mu.Unlock() } @@ -773,14 +1162,14 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt // calculate sizes here so that lock does not need to be held for slow process for _, cr := range toDelete { - size := getSize(cr.md) + size := cr.getSize() if size == sizeUnknown && cr.equalImmutable != nil { - size = getSize(cr.equalImmutable.md) // benefit from DiskUsage calc + size = cr.equalImmutable.getSize() // benefit from DiskUsage calc } if size == sizeUnknown { // calling size will warm cache for next call - if _, err := cr.Size(ctx); err != nil { + if _, err := cr.size(ctx); err != nil { return err } } @@ -791,24 +1180,38 @@ func (cm *cacheManager) prune(ctx context.Context, ch chan client.UsageInfo, opt for _, cr := range toDelete { cr.mu.Lock() - usageCount, lastUsedAt := getLastUsed(cr.md) + usageCount, lastUsedAt := cr.getLastUsed() c := client.UsageInfo{ ID: cr.ID(), Mutable: cr.mutable, InUse: len(cr.refs) > 0, - Size: getSize(cr.md), - CreatedAt: GetCreatedAt(cr.md), - Description: GetDescription(cr.md), + Size: cr.getSize(), + CreatedAt: cr.GetCreatedAt(), + Description: cr.GetDescription(), LastUsedAt: lastUsedAt, UsageCount: usageCount, } - if cr.parent != nil { - c.Parent = cr.parent.ID() + switch cr.kind() { + case Layer: + c.Parents = []string{cr.layerParent.ID()} + case Merge: + c.Parents = make([]string, len(cr.mergeParents)) + for i, p := range cr.mergeParents { + c.Parents[i] = p.ID() + } + case Diff: + c.Parents = make([]string, 0, 2) + if cr.diffParents.lower != nil { + c.Parents = append(c.Parents, cr.diffParents.lower.ID()) + } + if cr.diffParents.upper != nil { + c.Parents = append(c.Parents, cr.diffParents.upper.ID()) + } } if c.Size == sizeUnknown && cr.equalImmutable != nil { - c.Size = getSize(cr.equalImmutable.md) // benefit from DiskUsage calc + c.Size = cr.equalImmutable.getSize() // benefit from DiskUsage calc } opt.totalSize -= c.Size @@ -849,12 +1252,15 @@ func (cm *cacheManager) markShared(m map[string]*cacheUsageInfo) error { return errors.WithStack(err) } - var markAllParentsShared func(string) - markAllParentsShared = func(id string) { - if v, ok := m[id]; ok { - v.shared = true - if v.parent != "" { - markAllParentsShared(v.parent) + var markAllParentsShared func(...string) + markAllParentsShared = func(ids ...string) { + for _, id := range ids { + if id == "" { + continue + } + if v, ok := m[id]; ok { + v.shared = true + markAllParentsShared(v.parents...) } } } @@ -872,7 +1278,7 @@ func (cm *cacheManager) markShared(m map[string]*cacheUsageInfo) error { type cacheUsageInfo struct { refs int - parent string + parents []string size int64 mutable bool createdAt time.Time @@ -904,24 +1310,38 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) continue } - usageCount, lastUsedAt := getLastUsed(cr.md) + usageCount, lastUsedAt := cr.getLastUsed() c := &cacheUsageInfo{ refs: len(cr.refs), mutable: cr.mutable, - size: getSize(cr.md), - createdAt: GetCreatedAt(cr.md), + size: cr.getSize(), + createdAt: cr.GetCreatedAt(), usageCount: usageCount, lastUsedAt: lastUsedAt, - description: GetDescription(cr.md), + description: cr.GetDescription(), doubleRef: cr.equalImmutable != nil, - recordType: GetRecordType(cr), - parentChain: cr.parentChain(), + recordType: cr.GetRecordType(), + parentChain: cr.layerDigestChain(), } if c.recordType == "" { c.recordType = client.UsageRecordTypeRegular } - if cr.parent != nil { - c.parent = cr.parent.ID() + + switch cr.kind() { + case Layer: + c.parents = []string{cr.layerParent.ID()} + case Merge: + c.parents = make([]string, len(cr.mergeParents)) + for i, p := range cr.mergeParents { + c.parents[i] = p.ID() + } + case Diff: + if cr.diffParents.lower != nil { + c.parents = append(c.parents, cr.diffParents.lower.ID()) + } + if cr.diffParents.upper != nil { + c.parents = append(c.parents, cr.diffParents.upper.ID()) + } } if cr.mutable && c.refs > 0 { c.size = 0 // size can not be determined because it is changing @@ -938,12 +1358,14 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) } for id := range rescan { v := m[id] - if v.refs == 0 && v.parent != "" { - m[v.parent].refs-- - if v.doubleRef { - m[v.parent].refs-- + if v.refs == 0 { + for _, p := range v.parents { + m[p].refs-- + if v.doubleRef { + m[p].refs-- + } + rescan[p] = struct{}{} } - rescan[v.parent] = struct{}{} } delete(rescan, id) } @@ -960,7 +1382,7 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) Mutable: cr.mutable, InUse: cr.refs > 0, Size: cr.size, - Parent: cr.parent, + Parents: cr.parents, CreatedAt: cr.createdAt, Description: cr.description, LastUsedAt: cr.lastUsedAt, @@ -979,12 +1401,14 @@ func (cm *cacheManager) DiskUsage(ctx context.Context, opt client.DiskUsageInfo) if d.Size == sizeUnknown { func(d *client.UsageInfo) { eg.Go(func() error { - ref, err := cm.Get(ctx, d.ID, NoUpdateLastUsed) + cm.mu.Lock() + ref, err := cm.get(ctx, d.ID, nil, NoUpdateLastUsed) + cm.mu.Unlock() if err != nil { d.Size = 0 return nil } - s, err := ref.Size(ctx) + s, err := ref.size(ctx) if err != nil { return err } @@ -1015,41 +1439,33 @@ const ( cachePolicyRetain ) -type withMetadata interface { - Metadata() *metadata.StorageItem -} - type noUpdateLastUsed struct{} var NoUpdateLastUsed noUpdateLastUsed -func HasCachePolicyRetain(m withMetadata) bool { - return getCachePolicy(m.Metadata()) == cachePolicyRetain +func CachePolicyRetain(m *cacheMetadata) error { + return m.SetCachePolicyRetain() } -func CachePolicyRetain(m withMetadata) error { - return queueCachePolicy(m.Metadata(), cachePolicyRetain) -} - -func CachePolicyDefault(m withMetadata) error { - return queueCachePolicy(m.Metadata(), cachePolicyDefault) +func CachePolicyDefault(m *cacheMetadata) error { + return m.SetCachePolicyDefault() } func WithDescription(descr string) RefOption { - return func(m withMetadata) error { - return queueDescription(m.Metadata(), descr) + return func(m *cacheMetadata) error { + return m.queueDescription(descr) } } func WithRecordType(t client.UsageRecordType) RefOption { - return func(m withMetadata) error { - return queueRecordType(m.Metadata(), t) + return func(m *cacheMetadata) error { + return m.queueRecordType(t) } } func WithCreationTime(tm time.Time) RefOption { - return func(m withMetadata) error { - return queueCreatedAt(m.Metadata(), tm) + return func(m *cacheMetadata) error { + return m.queueCreatedAt(tm) } } @@ -1057,17 +1473,16 @@ func WithCreationTime(tm time.Time) RefOption { // initializeMetadata while still being a RefOption, so wrapping it in a // different type ensures initializeMetadata won't catch it too and duplicate // setting the metadata. -type imageRefOption func(m withMetadata) error +type imageRefOption func(m *cacheMetadata) error // WithImageRef appends the given imageRef to the cache ref's metadata func WithImageRef(imageRef string) RefOption { - return imageRefOption(func(m withMetadata) error { - return appendImageRef(m.Metadata(), imageRef) + return imageRefOption(func(m *cacheMetadata) error { + return m.appendImageRef(imageRef) }) } -func setImageRefMetadata(m withMetadata, opts ...RefOption) error { - md := m.Metadata() +func setImageRefMetadata(m *cacheMetadata, opts ...RefOption) error { for _, opt := range opts { if fn, ok := opt.(imageRefOption); ok { if err := fn(m); err != nil { @@ -1075,32 +1490,59 @@ func setImageRefMetadata(m withMetadata, opts ...RefOption) error { } } } - return md.Commit() + return m.commitMetadata() } -func initializeMetadata(m withMetadata, parent string, opts ...RefOption) error { - md := m.Metadata() - if tm := GetCreatedAt(md); !tm.IsZero() { +func withSnapshotID(id string) RefOption { + return imageRefOption(func(m *cacheMetadata) error { + return m.queueSnapshotID(id) + }) +} + +func initializeMetadata(m *cacheMetadata, parents parentRefs, opts ...RefOption) error { + if tm := m.GetCreatedAt(); !tm.IsZero() { return nil } - if err := queueParent(md, parent); err != nil { - return err + switch { + case parents.layerParent != nil: + if err := m.queueParent(parents.layerParent.ID()); err != nil { + return err + } + case len(parents.mergeParents) > 0: + var ids []string + for _, p := range parents.mergeParents { + ids = append(ids, p.ID()) + } + if err := m.queueMergeParents(ids); err != nil { + return err + } + case parents.diffParents != nil: + if parents.diffParents.lower != nil { + if err := m.queueLowerDiffParent(parents.diffParents.lower.ID()); err != nil { + return err + } + } + if parents.diffParents.upper != nil { + if err := m.queueUpperDiffParent(parents.diffParents.upper.ID()); err != nil { + return err + } + } } - if err := queueCreatedAt(md, time.Now()); err != nil { + if err := m.queueCreatedAt(time.Now()); err != nil { return err } for _, opt := range opts { - if fn, ok := opt.(func(withMetadata) error); ok { + if fn, ok := opt.(func(*cacheMetadata) error); ok { if err := fn(m); err != nil { return err } } } - return md.Commit() + return m.commitMetadata() } func adaptUsageInfo(info *client.UsageInfo) filters.Adaptor { @@ -1112,8 +1554,8 @@ func adaptUsageInfo(info *client.UsageInfo) filters.Adaptor { switch fieldpath[0] { case "id": return info.ID, info.ID != "" - case "parent": - return info.Parent, info.Parent != "" + case "parents": + return strings.Join(info.Parents, ";"), len(info.Parents) > 0 case "description": return info.Description, info.Description != "" case "inuse": @@ -1196,7 +1638,7 @@ func sortDeleteRecords(toDelete []*deleteRecord) { }) } -func diffIDFromDescriptor(desc ocispec.Descriptor) (digest.Digest, error) { +func diffIDFromDescriptor(desc ocispecs.Descriptor) (digest.Digest, error) { diffIDStr, ok := desc.Annotations["containerd.io/uncompressed"] if !ok { return "", errors.Errorf("missing uncompressed annotation for %s", desc.Digest) diff --git a/vendor/github.com/moby/buildkit/cache/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata.go index a5c7df7f4c..121110bd13 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata.go @@ -1,10 +1,13 @@ package cache import ( + "context" "time" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" + "github.com/moby/buildkit/util/bklog" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" bolt "go.etcd.io/bbolt" ) @@ -21,6 +24,9 @@ const keyLayerType = "cache.layerType" const keyRecordType = "cache.recordType" const keyCommitted = "snapshot.committed" const keyParent = "cache.parent" +const keyMergeParents = "cache.mergeParents" +const keyLowerDiffParent = "cache.lowerDiffParent" +const keyUpperDiffParent = "cache.upperDiffParent" const keyDiffID = "cache.diffID" const keyChainID = "cache.chainID" const keyBlobChainID = "cache.blobChainID" @@ -29,446 +35,359 @@ const keySnapshot = "cache.snapshot" const keyBlobOnly = "cache.blobonly" const keyMediaType = "cache.mediatype" const keyImageRefs = "cache.imageRefs" - -// BlobSize is the packed blob size as specified in the oci descriptor -const keyBlobSize = "cache.blobsize" - const keyDeleted = "cache.deleted" +const keyBlobSize = "cache.blobsize" // the packed blob size as specified in the oci descriptor +const keyURLs = "cache.layer.urls" -func queueDiffID(si *metadata.StorageItem, str string) error { - if str == "" { - return nil - } - v, err := metadata.NewValue(str) +// Indexes +const blobchainIndex = "blobchainid:" +const chainIndex = "chainid:" + +type MetadataStore interface { + Search(context.Context, string) ([]RefMetadata, error) +} + +type RefMetadata interface { + ID() string + + GetDescription() string + SetDescription(string) error + + GetCreatedAt() time.Time + SetCreatedAt(time.Time) error + + HasCachePolicyDefault() bool + SetCachePolicyDefault() error + HasCachePolicyRetain() bool + SetCachePolicyRetain() error + + GetLayerType() string + SetLayerType(string) error + + GetRecordType() client.UsageRecordType + SetRecordType(client.UsageRecordType) error + + GetEqualMutable() (RefMetadata, bool) + + // generic getters/setters for external packages + GetString(string) string + SetString(key, val, index string) error + + GetExternal(string) ([]byte, error) + SetExternal(string, []byte) error + + ClearValueAndIndex(string, string) error +} + +func (cm *cacheManager) Search(ctx context.Context, idx string) ([]RefMetadata, error) { + cm.mu.Lock() + defer cm.mu.Unlock() + return cm.search(ctx, idx) +} + +// callers must hold cm.mu lock +func (cm *cacheManager) search(ctx context.Context, idx string) ([]RefMetadata, error) { + sis, err := cm.MetadataStore.Search(idx) if err != nil { - return errors.Wrap(err, "failed to create diffID value") + return nil, err } - si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, keyDiffID, v) - }) - return nil -} - -func getMediaType(si *metadata.StorageItem) string { - v := si.Get(keyMediaType) - if v == nil { - return si.ID() - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func queueMediaType(si *metadata.StorageItem, str string) error { - if str == "" { - return nil - } - v, err := metadata.NewValue(str) - if err != nil { - return errors.Wrap(err, "failed to create mediaType value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyMediaType, v) - }) - return nil -} - -func getSnapshotID(si *metadata.StorageItem) string { - v := si.Get(keySnapshot) - if v == nil { - return si.ID() - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func queueSnapshotID(si *metadata.StorageItem, str string) error { - if str == "" { - return nil - } - v, err := metadata.NewValue(str) - if err != nil { - return errors.Wrap(err, "failed to create chainID value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keySnapshot, v) - }) - return nil -} - -func getDiffID(si *metadata.StorageItem) string { - v := si.Get(keyDiffID) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func queueChainID(si *metadata.StorageItem, str string) error { - if str == "" { - return nil - } - v, err := metadata.NewValue(str) - if err != nil { - return errors.Wrap(err, "failed to create chainID value") - } - v.Index = "chainid:" + str - si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, keyChainID, v) - }) - return nil -} - -func getBlobChainID(si *metadata.StorageItem) string { - v := si.Get(keyBlobChainID) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func queueBlobChainID(si *metadata.StorageItem, str string) error { - if str == "" { - return nil - } - v, err := metadata.NewValue(str) - if err != nil { - return errors.Wrap(err, "failed to create chainID value") - } - v.Index = "blobchainid:" + str - si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, keyBlobChainID, v) - }) - return nil -} - -func getChainID(si *metadata.StorageItem) string { - v := si.Get(keyChainID) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func queueBlob(si *metadata.StorageItem, str string) error { - if str == "" { - return nil - } - v, err := metadata.NewValue(str) - if err != nil { - return errors.Wrap(err, "failed to create blob value") - } - si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, keyBlob, v) - }) - return nil -} - -func getBlob(si *metadata.StorageItem) string { - v := si.Get(keyBlob) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str -} - -func queueBlobOnly(si *metadata.StorageItem, b bool) error { - v, err := metadata.NewValue(b) - if err != nil { - return errors.Wrap(err, "failed to create blobonly value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyBlobOnly, v) - }) - return nil -} - -func getBlobOnly(si *metadata.StorageItem) bool { - v := si.Get(keyBlobOnly) - if v == nil { - return false - } - var blobOnly bool - if err := v.Unmarshal(&blobOnly); err != nil { - return false - } - return blobOnly -} - -func setDeleted(si *metadata.StorageItem) error { - v, err := metadata.NewValue(true) - if err != nil { - return errors.Wrap(err, "failed to create deleted value") - } - si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, keyDeleted, v) - }) - return nil -} - -func getDeleted(si *metadata.StorageItem) bool { - v := si.Get(keyDeleted) - if v == nil { - return false - } - var deleted bool - if err := v.Unmarshal(&deleted); err != nil { - return false - } - return deleted -} - -func queueCommitted(si *metadata.StorageItem) error { - v, err := metadata.NewValue(true) - if err != nil { - return errors.Wrap(err, "failed to create committed value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyCommitted, v) - }) - return nil -} - -func getCommitted(si *metadata.StorageItem) bool { - v := si.Get(keyCommitted) - if v == nil { - return false - } - var committed bool - if err := v.Unmarshal(&committed); err != nil { - return false - } - return committed -} - -func queueParent(si *metadata.StorageItem, parent string) error { - if parent == "" { - return nil - } - v, err := metadata.NewValue(parent) - if err != nil { - return errors.Wrap(err, "failed to create parent value") - } - si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, keyParent, v) - }) - return nil -} - -func getParent(si *metadata.StorageItem) string { - v := si.Get(keyParent) - if v == nil { - return "" - } - var parent string - if err := v.Unmarshal(&parent); err != nil { - return "" - } - return parent -} - -func setSize(si *metadata.StorageItem, s int64) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrap(err, "failed to create size value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keySize, v) - }) - return nil -} - -func getSize(si *metadata.StorageItem) int64 { - v := si.Get(keySize) - if v == nil { - return sizeUnknown - } - var size int64 - if err := v.Unmarshal(&size); err != nil { - return sizeUnknown - } - return size -} - -func appendImageRef(si *metadata.StorageItem, s string) error { - return si.GetAndSetValue(keyImageRefs, func(v *metadata.Value) (*metadata.Value, error) { - var imageRefs []string - if v != nil { - if err := v.Unmarshal(&imageRefs); err != nil { - return nil, err - } + var mds []RefMetadata + for _, si := range sis { + // calling getMetadata ensures we return the same storage item object that's cached in memory + md, ok := cm.getMetadata(si.ID()) + if !ok { + bklog.G(ctx).Warnf("missing metadata for storage item %q during search for %q", si.ID(), idx) + continue } - for _, existing := range imageRefs { - if existing == s { - return nil, metadata.ErrSkipSetValue - } + if md.getDeleted() { + continue } - imageRefs = append(imageRefs, s) - v, err := metadata.NewValue(imageRefs) - if err != nil { - return nil, errors.Wrap(err, "failed to create imageRefs value") - } - return v, nil - }) + mds = append(mds, md) + } + return mds, nil } -func getImageRefs(si *metadata.StorageItem) []string { - v := si.Get(keyImageRefs) - if v == nil { +// callers must hold cm.mu lock +func (cm *cacheManager) getMetadata(id string) (*cacheMetadata, bool) { + if rec, ok := cm.records[id]; ok { + return rec.cacheMetadata, true + } + si, ok := cm.MetadataStore.Get(id) + md := &cacheMetadata{si} + return md, ok +} + +// callers must hold cm.mu lock +func (cm *cacheManager) searchBlobchain(ctx context.Context, id digest.Digest) ([]RefMetadata, error) { + return cm.search(ctx, blobchainIndex+id.String()) +} + +// callers must hold cm.mu lock +func (cm *cacheManager) searchChain(ctx context.Context, id digest.Digest) ([]RefMetadata, error) { + return cm.search(ctx, chainIndex+id.String()) +} + +type cacheMetadata struct { + si *metadata.StorageItem +} + +func (md *cacheMetadata) ID() string { + return md.si.ID() +} + +func (md *cacheMetadata) commitMetadata() error { + return md.si.Commit() +} + +func (md *cacheMetadata) GetDescription() string { + return md.GetString(keyDescription) +} + +func (md *cacheMetadata) SetDescription(descr string) error { + return md.setValue(keyDescription, descr, "") +} + +func (md *cacheMetadata) queueDescription(descr string) error { + return md.queueValue(keyDescription, descr, "") +} + +func (md *cacheMetadata) queueCommitted(b bool) error { + return md.queueValue(keyCommitted, b, "") +} + +func (md *cacheMetadata) getCommitted() bool { + return md.getBool(keyCommitted) +} + +func (md *cacheMetadata) GetLayerType() string { + return md.GetString(keyLayerType) +} + +func (md *cacheMetadata) SetLayerType(value string) error { + return md.setValue(keyLayerType, value, "") +} + +func (md *cacheMetadata) GetRecordType() client.UsageRecordType { + return client.UsageRecordType(md.GetString(keyRecordType)) +} + +func (md *cacheMetadata) SetRecordType(value client.UsageRecordType) error { + return md.setValue(keyRecordType, value, "") +} + +func (md *cacheMetadata) queueRecordType(value client.UsageRecordType) error { + return md.queueValue(keyRecordType, value, "") +} + +func (md *cacheMetadata) SetCreatedAt(tm time.Time) error { + return md.setTime(keyCreatedAt, tm, "") +} + +func (md *cacheMetadata) queueCreatedAt(tm time.Time) error { + return md.queueTime(keyCreatedAt, tm, "") +} + +func (md *cacheMetadata) GetCreatedAt() time.Time { + return md.getTime(keyCreatedAt) +} + +func (md *cacheMetadata) HasCachePolicyDefault() bool { + return md.getCachePolicy() == cachePolicyDefault +} + +func (md *cacheMetadata) SetCachePolicyDefault() error { + return md.setCachePolicy(cachePolicyDefault) +} + +func (md *cacheMetadata) HasCachePolicyRetain() bool { + return md.getCachePolicy() == cachePolicyRetain +} + +func (md *cacheMetadata) SetCachePolicyRetain() error { + return md.setCachePolicy(cachePolicyRetain) +} + +func (md *cacheMetadata) GetExternal(s string) ([]byte, error) { + return md.si.GetExternal(s) +} + +func (md *cacheMetadata) SetExternal(s string, dt []byte) error { + return md.si.SetExternal(s, dt) +} + +func (md *cacheMetadata) GetEqualMutable() (RefMetadata, bool) { + emSi, ok := md.si.Storage().Get(md.getEqualMutable()) + if !ok { + return nil, false + } + return &cacheMetadata{emSi}, true +} + +func (md *cacheMetadata) getEqualMutable() string { + return md.GetString(keyEqualMutable) +} + +func (md *cacheMetadata) setEqualMutable(s string) error { + return md.queueValue(keyEqualMutable, s, "") +} + +func (md *cacheMetadata) clearEqualMutable() error { + md.si.Queue(func(b *bolt.Bucket) error { + return md.si.SetValue(b, keyEqualMutable, nil) + }) + return nil +} + +func (md *cacheMetadata) queueDiffID(str digest.Digest) error { + return md.queueValue(keyDiffID, str, "") +} + +func (md *cacheMetadata) getMediaType() string { + return md.GetString(keyMediaType) +} + +func (md *cacheMetadata) queueMediaType(str string) error { + return md.queueValue(keyMediaType, str, "") +} + +func (md *cacheMetadata) getSnapshotID() string { + return md.GetString(keySnapshot) +} + +func (md *cacheMetadata) queueSnapshotID(str string) error { + return md.queueValue(keySnapshot, str, "") +} + +func (md *cacheMetadata) getDiffID() digest.Digest { + return digest.Digest(md.GetString(keyDiffID)) +} + +func (md *cacheMetadata) queueChainID(str digest.Digest) error { + return md.queueValue(keyChainID, str, chainIndex+str.String()) +} + +func (md *cacheMetadata) getBlobChainID() digest.Digest { + return digest.Digest(md.GetString(keyBlobChainID)) +} + +func (md *cacheMetadata) queueBlobChainID(str digest.Digest) error { + return md.queueValue(keyBlobChainID, str, blobchainIndex+str.String()) +} + +func (md *cacheMetadata) getChainID() digest.Digest { + return digest.Digest(md.GetString(keyChainID)) +} + +func (md *cacheMetadata) queueBlob(str digest.Digest) error { + return md.queueValue(keyBlob, str, "") +} + +func (md *cacheMetadata) appendURLs(urls []string) error { + if len(urls) == 0 { return nil } - var refs []string - if err := v.Unmarshal(&refs); err != nil { - return nil - } - return refs + return md.appendStringSlice(keyURLs, urls...) } -func queueBlobSize(si *metadata.StorageItem, s int64) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrap(err, "failed to create blobsize value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyBlobSize, v) - }) - return nil +func (md *cacheMetadata) getURLs() []string { + return md.GetStringSlice(keyURLs) } -func getBlobSize(si *metadata.StorageItem) int64 { - v := si.Get(keyBlobSize) - if v == nil { - return sizeUnknown - } - var size int64 - if err := v.Unmarshal(&size); err != nil { - return sizeUnknown - } - return size +func (md *cacheMetadata) getBlob() digest.Digest { + return digest.Digest(md.GetString(keyBlob)) } -func getEqualMutable(si *metadata.StorageItem) string { - v := si.Get(keyEqualMutable) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str +func (md *cacheMetadata) queueBlobOnly(b bool) error { + return md.queueValue(keyBlobOnly, b, "") } -func setEqualMutable(si *metadata.StorageItem, s string) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrapf(err, "failed to create %s meta value", keyEqualMutable) - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyEqualMutable, v) - }) - return nil +func (md *cacheMetadata) getBlobOnly() bool { + return md.getBool(keyBlobOnly) } -func clearEqualMutable(si *metadata.StorageItem) error { - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyEqualMutable, nil) - }) - return nil +func (md *cacheMetadata) queueDeleted() error { + return md.queueValue(keyDeleted, true, "") } -func queueCachePolicy(si *metadata.StorageItem, p cachePolicy) error { - v, err := metadata.NewValue(p) - if err != nil { - return errors.Wrap(err, "failed to create cachePolicy value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyCachePolicy, v) - }) - return nil +func (md *cacheMetadata) getDeleted() bool { + return md.getBool(keyDeleted) } -func getCachePolicy(si *metadata.StorageItem) cachePolicy { - v := si.Get(keyCachePolicy) - if v == nil { - return cachePolicyDefault - } - var p cachePolicy - if err := v.Unmarshal(&p); err != nil { - return cachePolicyDefault - } - return p +func (md *cacheMetadata) queueParent(parent string) error { + return md.queueValue(keyParent, parent, "") } -func queueDescription(si *metadata.StorageItem, descr string) error { - v, err := metadata.NewValue(descr) - if err != nil { - return errors.Wrap(err, "failed to create description value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyDescription, v) - }) - return nil +func (md *cacheMetadata) getParent() string { + return md.GetString(keyParent) } -func GetDescription(si *metadata.StorageItem) string { - v := si.Get(keyDescription) - if v == nil { - return "" - } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" - } - return str +func (md *cacheMetadata) queueMergeParents(parents []string) error { + return md.queueValue(keyMergeParents, parents, "") } -func queueCreatedAt(si *metadata.StorageItem, tm time.Time) error { - v, err := metadata.NewValue(tm.UnixNano()) - if err != nil { - return errors.Wrap(err, "failed to create createdAt value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyCreatedAt, v) - }) - return nil +func (md *cacheMetadata) getMergeParents() []string { + return md.getStringSlice(keyMergeParents) } -func GetCreatedAt(si *metadata.StorageItem) time.Time { - v := si.Get(keyCreatedAt) - if v == nil { - return time.Time{} - } - var tm int64 - if err := v.Unmarshal(&tm); err != nil { - return time.Time{} - } - return time.Unix(tm/1e9, tm%1e9) +func (md *cacheMetadata) queueLowerDiffParent(parent string) error { + return md.queueValue(keyLowerDiffParent, parent, "") } -func getLastUsed(si *metadata.StorageItem) (int, *time.Time) { - v := si.Get(keyUsageCount) +func (md *cacheMetadata) getLowerDiffParent() string { + return md.GetString(keyLowerDiffParent) +} + +func (md *cacheMetadata) queueUpperDiffParent(parent string) error { + return md.queueValue(keyUpperDiffParent, parent, "") +} + +func (md *cacheMetadata) getUpperDiffParent() string { + return md.GetString(keyUpperDiffParent) +} + +func (md *cacheMetadata) queueSize(s int64) error { + return md.queueValue(keySize, s, "") +} + +func (md *cacheMetadata) getSize() int64 { + if size, ok := md.getInt64(keySize); ok { + return size + } + return sizeUnknown +} + +func (md *cacheMetadata) appendImageRef(s string) error { + return md.appendStringSlice(keyImageRefs, s) +} + +func (md *cacheMetadata) getImageRefs() []string { + return md.getStringSlice(keyImageRefs) +} + +func (md *cacheMetadata) queueBlobSize(s int64) error { + return md.queueValue(keyBlobSize, s, "") +} + +func (md *cacheMetadata) getBlobSize() int64 { + if size, ok := md.getInt64(keyBlobSize); ok { + return size + } + return sizeUnknown +} + +func (md *cacheMetadata) setCachePolicy(p cachePolicy) error { + return md.setValue(keyCachePolicy, p, "") +} + +func (md *cacheMetadata) getCachePolicy() cachePolicy { + if i, ok := md.getInt64(keyCachePolicy); ok { + return cachePolicy(i) + } + return cachePolicyDefault +} + +func (md *cacheMetadata) getLastUsed() (int, *time.Time) { + v := md.si.Get(keyUsageCount) if v == nil { return 0, nil } @@ -476,7 +395,7 @@ func getLastUsed(si *metadata.StorageItem) (int, *time.Time) { if err := v.Unmarshal(&usageCount); err != nil { return 0, nil } - v = si.Get(keyLastUsedAt) + v = md.si.Get(keyLastUsedAt) if v == nil { return usageCount, nil } @@ -488,8 +407,8 @@ func getLastUsed(si *metadata.StorageItem) (int, *time.Time) { return usageCount, &tm } -func updateLastUsed(si *metadata.StorageItem) error { - count, _ := getLastUsed(si) +func (md *cacheMetadata) updateLastUsed() error { + count, _ := md.getLastUsed() count++ v, err := metadata.NewValue(count) @@ -500,27 +419,57 @@ func updateLastUsed(si *metadata.StorageItem) error { if err != nil { return errors.Wrap(err, "failed to create lastUsedAt value") } - return si.Update(func(b *bolt.Bucket) error { - if err := si.SetValue(b, keyUsageCount, v); err != nil { + return md.si.Update(func(b *bolt.Bucket) error { + if err := md.si.SetValue(b, keyUsageCount, v); err != nil { return err } - return si.SetValue(b, keyLastUsedAt, v2) + return md.si.SetValue(b, keyLastUsedAt, v2) }) } -func SetLayerType(m withMetadata, value string) error { +func (md *cacheMetadata) queueValue(key string, value interface{}, index string) error { v, err := metadata.NewValue(value) if err != nil { - return errors.Wrap(err, "failed to create layertype value") + return errors.Wrap(err, "failed to create value") } - m.Metadata().Queue(func(b *bolt.Bucket) error { - return m.Metadata().SetValue(b, keyLayerType, v) + v.Index = index + md.si.Queue(func(b *bolt.Bucket) error { + return md.si.SetValue(b, key, v) }) - return m.Metadata().Commit() + return nil } -func GetLayerType(m withMetadata) string { - v := m.Metadata().Get(keyLayerType) +func (md *cacheMetadata) SetString(key, value string, index string) error { + return md.setValue(key, value, index) +} + +func (md *cacheMetadata) setValue(key string, value interface{}, index string) error { + v, err := metadata.NewValue(value) + if err != nil { + return errors.Wrap(err, "failed to create value") + } + v.Index = index + return md.si.Update(func(b *bolt.Bucket) error { + return md.si.SetValue(b, key, v) + }) +} + +func (md *cacheMetadata) ClearValueAndIndex(key string, index string) error { + currentVal := md.GetString(key) + return md.si.Update(func(b *bolt.Bucket) error { + if err := md.si.SetValue(b, key, nil); err != nil { + return err + } + if currentVal != "" { + // force clearing index, see #1836 https://github.com/moby/buildkit/pull/1836 + return md.si.ClearIndex(b.Tx(), index+currentVal) + } + return nil + }) +} + +func (md *cacheMetadata) GetString(key string) string { + v := md.si.Get(key) if v == nil { return "" } @@ -531,32 +480,105 @@ func GetLayerType(m withMetadata) string { return str } -func GetRecordType(m withMetadata) client.UsageRecordType { - v := m.Metadata().Get(keyRecordType) +func (md *cacheMetadata) GetStringSlice(key string) []string { + v := md.si.Get(key) if v == nil { - return "" + return nil } - var str string - if err := v.Unmarshal(&str); err != nil { - return "" + var val []string + if err := v.Unmarshal(&val); err != nil { + return nil } - return client.UsageRecordType(str) + return val } -func SetRecordType(m withMetadata, value client.UsageRecordType) error { - if err := queueRecordType(m.Metadata(), value); err != nil { - return err - } - return m.Metadata().Commit() +func (md *cacheMetadata) setTime(key string, value time.Time, index string) error { + return md.setValue(key, value.UnixNano(), index) } -func queueRecordType(si *metadata.StorageItem, value client.UsageRecordType) error { - v, err := metadata.NewValue(value) - if err != nil { - return errors.Wrap(err, "failed to create recordtype value") +func (md *cacheMetadata) queueTime(key string, value time.Time, index string) error { + return md.queueValue(key, value.UnixNano(), index) +} + +func (md *cacheMetadata) getTime(key string) time.Time { + v := md.si.Get(key) + if v == nil { + return time.Time{} } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyRecordType, v) + var tm int64 + if err := v.Unmarshal(&tm); err != nil { + return time.Time{} + } + return time.Unix(tm/1e9, tm%1e9) +} + +func (md *cacheMetadata) getBool(key string) bool { + v := md.si.Get(key) + if v == nil { + return false + } + var b bool + if err := v.Unmarshal(&b); err != nil { + return false + } + return b +} + +func (md *cacheMetadata) getInt64(key string) (int64, bool) { + v := md.si.Get(key) + if v == nil { + return 0, false + } + var i int64 + if err := v.Unmarshal(&i); err != nil { + return 0, false + } + return i, true +} + +func (md *cacheMetadata) appendStringSlice(key string, values ...string) error { + return md.si.GetAndSetValue(key, func(v *metadata.Value) (*metadata.Value, error) { + var slice []string + if v != nil { + if err := v.Unmarshal(&slice); err != nil { + return nil, err + } + } + + idx := make(map[string]struct{}, len(values)) + for _, v := range values { + idx[v] = struct{}{} + } + + for _, existing := range slice { + if _, ok := idx[existing]; ok { + delete(idx, existing) + } + } + + if len(idx) == 0 { + return nil, metadata.ErrSkipSetValue + } + + for value := range idx { + slice = append(slice, value) + } + v, err := metadata.NewValue(slice) + if err != nil { + return nil, err + } + return v, nil }) - return nil +} + +func (md *cacheMetadata) getStringSlice(key string) []string { + v := md.si.Get(key) + if v == nil { + return nil + } + var s []string + if err := v.Unmarshal(&s); err != nil { + return nil + } + return s } diff --git a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go index ff76a0be5d..ae957c3e72 100644 --- a/vendor/github.com/moby/buildkit/cache/metadata/metadata.go +++ b/vendor/github.com/moby/buildkit/cache/metadata/metadata.go @@ -235,7 +235,7 @@ func newStorageItem(id string, b *bolt.Bucket, s *Store) (*StorageItem, error) { return si, nil } -func (s *StorageItem) Storage() *Store { // TODO: used in local source. how to remove this? +func (s *StorageItem) Storage() *Store { return s.storage } @@ -345,15 +345,25 @@ func (s *StorageItem) SetValue(b *bolt.Bucket, key string, v *Value) error { return s.setValue(b, key, v) } +func (s *StorageItem) ClearIndex(tx *bolt.Tx, index string) error { + s.vmu.Lock() + defer s.vmu.Unlock() + return s.clearIndex(tx, index) +} + +func (s *StorageItem) clearIndex(tx *bolt.Tx, index string) error { + b, err := tx.CreateBucketIfNotExists([]byte(indexBucket)) + if err != nil { + return errors.WithStack(err) + } + return b.Delete([]byte(indexKey(index, s.ID()))) +} + func (s *StorageItem) setValue(b *bolt.Bucket, key string, v *Value) error { if v == nil { if old, ok := s.values[key]; ok { if old.Index != "" { - b, err := b.Tx().CreateBucketIfNotExists([]byte(indexBucket)) - if err != nil { - return errors.WithStack(err) - } - b.Delete([]byte(indexKey(old.Index, s.ID()))) // ignore error + s.clearIndex(b.Tx(), old.Index) // ignore error } } if err := b.Put([]byte(key), nil); err != nil { diff --git a/vendor/github.com/moby/buildkit/cache/migrate_v2.go b/vendor/github.com/moby/buildkit/cache/migrate_v2.go index d94ef1eef0..803b0173a3 100644 --- a/vendor/github.com/moby/buildkit/cache/migrate_v2.go +++ b/vendor/github.com/moby/buildkit/cache/migrate_v2.go @@ -13,22 +13,23 @@ import ( "github.com/containerd/containerd/snapshots" "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/snapshot" + "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) func migrateChainID(si *metadata.StorageItem, all map[string]*metadata.StorageItem) (digest.Digest, digest.Digest, error) { - diffID := digest.Digest(getDiffID(si)) + md := &cacheMetadata{si} + diffID := md.getDiffID() if diffID == "" { return "", "", nil } - blobID := digest.Digest(getBlob(si)) + blobID := md.getBlob() if blobID == "" { return "", "", nil } - chainID := digest.Digest(getChainID(si)) - blobChainID := digest.Digest(getBlobChainID(si)) + chainID := md.getChainID() + blobChainID := md.getBlobChainID() if chainID != "" && blobChainID != "" { return chainID, blobChainID, nil @@ -37,7 +38,7 @@ func migrateChainID(si *metadata.StorageItem, all map[string]*metadata.StorageIt chainID = diffID blobChainID = digest.FromBytes([]byte(blobID + " " + diffID)) - parent := getParent(si) + parent := md.getParent() if parent != "" { pChainID, pBlobChainID, err := migrateChainID(all[parent], all) if err != nil { @@ -47,10 +48,10 @@ func migrateChainID(si *metadata.StorageItem, all map[string]*metadata.StorageIt blobChainID = digest.FromBytes([]byte(pBlobChainID + " " + blobChainID)) } - queueChainID(si, chainID.String()) - queueBlobChainID(si, blobChainID.String()) + md.queueChainID(chainID) + md.queueBlobChainID(blobChainID) - return chainID, blobChainID, si.Commit() + return chainID, blobChainID, md.commitMetadata() } func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapshot.Snapshotter, lm leases.Manager) error { @@ -105,31 +106,34 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho // add committed, parent, snapshot for id, item := range byID { - em := getEqualMutable(item) + md := &cacheMetadata{item} + em := md.getEqualMutable() if em == "" { info, err := s.Stat(ctx, id) if err != nil { return err } if info.Kind == snapshots.KindCommitted { - queueCommitted(item) + md.queueCommitted(true) } if info.Parent != "" { - queueParent(item, info.Parent) + md.queueParent(info.Parent) } } else { - queueCommitted(item) + md.queueCommitted(true) } - queueSnapshotID(item, id) - item.Commit() + md.queueSnapshotID(id) + md.commitMetadata() } for _, item := range byID { - em := getEqualMutable(item) + md := &cacheMetadata{item} + em := md.getEqualMutable() if em != "" { - if getParent(item) == "" { - queueParent(item, getParent(byID[em])) - item.Commit() + if md.getParent() == "" { + emMd := &cacheMetadata{byID[em]} + md.queueParent(emMd.getParent()) + md.commitMetadata() } } } @@ -151,13 +155,13 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho if _, err := cs.Info(ctx, digest.Digest(blob.Blobsum)); err != nil { continue } - queueDiffID(item, blob.DiffID) - queueBlob(item, blob.Blobsum) - queueMediaType(item, images.MediaTypeDockerSchema2LayerGzip) - if err := item.Commit(); err != nil { + md := &cacheMetadata{item} + md.queueDiffID(digest.Digest(blob.DiffID)) + md.queueBlob(digest.Digest(blob.Blobsum)) + md.queueMediaType(images.MediaTypeDockerSchema2LayerGzip) + if err := md.commitMetadata(); err != nil { return err } - } // calculate new chainid/blobsumid @@ -171,6 +175,7 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho // add new leases for _, item := range byID { + md := &cacheMetadata{item} l, err := lm.Create(ctx, func(l *leases.Lease) error { l.ID = item.ID() l.Labels = map[string]string{ @@ -187,15 +192,15 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho } if err := lm.AddResource(ctx, l, leases.Resource{ - ID: getSnapshotID(item), + ID: md.getSnapshotID(), Type: "snapshots/" + s.Name(), }); err != nil { return errors.Wrapf(err, "failed to add snapshot %s to lease", item.ID()) } - if blobID := getBlob(item); blobID != "" { + if blobID := md.getBlob(); blobID != "" { if err := lm.AddResource(ctx, l, leases.Resource{ - ID: blobID, + ID: string(blobID), Type: "content", }); err != nil { return errors.Wrapf(err, "failed to add blob %s to lease", item.ID()) @@ -205,17 +210,18 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho // remove old root labels for _, item := range byID { - em := getEqualMutable(item) + md := &cacheMetadata{item} + em := md.getEqualMutable() if em == "" { if _, err := s.Update(ctx, snapshots.Info{ - Name: getSnapshotID(item), + Name: md.getSnapshotID(), }, "labels.containerd.io/gc.root"); err != nil { if !errors.Is(err, errdefs.ErrNotFound) { return err } } - if blob := getBlob(item); blob != "" { + if blob := md.getBlob(); blob != "" { if _, err := cs.Update(ctx, content.Info{ Digest: digest.Digest(blob), }, "labels.containerd.io/gc.root"); err != nil { @@ -252,8 +258,9 @@ func MigrateV2(ctx context.Context, from, to string, cs content.Store, s snapsho } for _, item := range byID { - logrus.Infof("migrated %s parent:%q snapshot:%v committed:%v blob:%v diffid:%v chainID:%v blobChainID:%v", - item.ID(), getParent(item), getSnapshotID(item), getCommitted(item), getBlob(item), getDiffID(item), getChainID(item), getBlobChainID(item)) + md := &cacheMetadata{item} + bklog.G(ctx).Infof("migrated %s parent:%q snapshot:%v blob:%v diffid:%v chainID:%v blobChainID:%v", + item.ID(), md.getParent(), md.getSnapshotID(), md.getBlob(), md.getDiffID(), md.getChainID(), md.getBlobChainID()) } return nil diff --git a/vendor/github.com/moby/buildkit/cache/opts.go b/vendor/github.com/moby/buildkit/cache/opts.go index 911def3e1f..92df9989d9 100644 --- a/vendor/github.com/moby/buildkit/cache/opts.go +++ b/vendor/github.com/moby/buildkit/cache/opts.go @@ -13,6 +13,8 @@ type DescHandler struct { Provider func(session.Group) content.Provider Progress progress.Controller SnapshotLabels map[string]string + Annotations map[string]string + Ref string // string representation of desc origin, can be used as a sync key } type DescHandlers map[digest.Digest]*DescHandler @@ -28,8 +30,10 @@ func descHandlersOf(opts ...RefOption) DescHandlers { type DescHandlerKey digest.Digest -type NeedsRemoteProvidersError []digest.Digest +type NeedsRemoteProviderError []digest.Digest //nolint:errname -func (m NeedsRemoteProvidersError) Error() string { +func (m NeedsRemoteProviderError) Error() string { return fmt.Sprintf("missing descriptor handlers for lazy blobs %+v", []digest.Digest(m)) } + +type ProgressKey struct{} diff --git a/vendor/github.com/moby/buildkit/cache/refs.go b/vendor/github.com/moby/buildkit/cache/refs.go index d7905d7ed1..c937dd1bfa 100644 --- a/vendor/github.com/moby/buildkit/cache/refs.go +++ b/vendor/github.com/moby/buildkit/cache/refs.go @@ -3,26 +3,35 @@ package cache import ( "context" "fmt" + "io/ioutil" + "os" + "path/filepath" "strings" "sync" "time" + "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" "github.com/containerd/containerd/leases" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/pkg/idtools" - "github.com/moby/buildkit/cache/metadata" + "github.com/hashicorp/go-multierror" + "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/winlayers" + "github.com/moby/sys/mountinfo" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" @@ -31,32 +40,22 @@ import ( // Ref is a reference to cacheable objects. type Ref interface { Mountable - ID() string + RefMetadata Release(context.Context) error - Size(ctx context.Context) (int64, error) - Metadata() *metadata.StorageItem IdentityMapping() *idtools.IdentityMapping + DescHandler(digest.Digest) *DescHandler } type ImmutableRef interface { Ref - Parent() ImmutableRef - Finalize(ctx context.Context, commit bool) error // Make sure reference is flushed to driver Clone() ImmutableRef + // Finalize commits the snapshot to the driver if it's not already. + // This means the snapshot can no longer be mounted as mutable. + Finalize(context.Context) error - Info() RefInfo Extract(ctx context.Context, s session.Group) error // +progress - GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, s session.Group) (*solver.Remote, error) -} - -type RefInfo struct { - SnapshotID string - ChainID digest.Digest - BlobChainID digest.Digest - DiffID digest.Digest - Blob digest.Digest - MediaType string - Extracted bool + GetRemotes(ctx context.Context, createIfNeeded bool, cfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error) + LayerChain() RefList } type MutableRef interface { @@ -69,7 +68,7 @@ type Mountable interface { } type ref interface { - updateLastUsed() bool + shouldUpdateLastUsed() bool } type cacheRecord struct { @@ -78,14 +77,13 @@ type cacheRecord struct { mutable bool refs map[ref]struct{} - parent *immutableRef - md *metadata.StorageItem + parentRefs + *cacheMetadata // dead means record is marked as deleted dead bool - view string - viewMount snapshot.Mountable + mountCache snapshot.Mountable sizeG flightcontrol.Group @@ -93,15 +91,16 @@ type cacheRecord struct { equalMutable *mutableRef equalImmutable *immutableRef - parentChainCache []digest.Digest + layerDigestChainCache []digest.Digest } // hold ref lock before calling -func (cr *cacheRecord) ref(triggerLastUsed bool, descHandlers DescHandlers) *immutableRef { +func (cr *cacheRecord) ref(triggerLastUsed bool, descHandlers DescHandlers, pg progress.Controller) *immutableRef { ref := &immutableRef{ cacheRecord: cr, triggerLastUsed: triggerLastUsed, descHandlers: descHandlers, + progress: pg, } cr.refs[ref] = struct{}{} return ref @@ -118,24 +117,107 @@ func (cr *cacheRecord) mref(triggerLastUsed bool, descHandlers DescHandlers) *mu return ref } -func (cr *cacheRecord) parentChain() []digest.Digest { - if cr.parentChainCache != nil { - return cr.parentChainCache - } - blob := getBlob(cr.md) - if blob == "" { - return nil +// parentRefs is a disjoint union type that holds either a single layerParent for this record, a list +// of parents if this is a merged record or all nil fields if this record has no parents. At most one +// field should be non-nil at a time. +type parentRefs struct { + layerParent *immutableRef + mergeParents []*immutableRef + diffParents *diffParents +} + +type diffParents struct { + lower *immutableRef + upper *immutableRef +} + +// caller must hold cacheManager.mu +func (p parentRefs) release(ctx context.Context) (rerr error) { + switch { + case p.layerParent != nil: + p.layerParent.mu.Lock() + defer p.layerParent.mu.Unlock() + rerr = p.layerParent.release(ctx) + case len(p.mergeParents) > 0: + for i, parent := range p.mergeParents { + if parent == nil { + continue + } + parent.mu.Lock() + if err := parent.release(ctx); err != nil { + rerr = multierror.Append(rerr, err).ErrorOrNil() + } else { + p.mergeParents[i] = nil + } + parent.mu.Unlock() + } + case p.diffParents != nil: + if p.diffParents.lower != nil { + p.diffParents.lower.mu.Lock() + defer p.diffParents.lower.mu.Unlock() + if err := p.diffParents.lower.release(ctx); err != nil { + rerr = multierror.Append(rerr, err).ErrorOrNil() + } else { + p.diffParents.lower = nil + } + } + if p.diffParents.upper != nil { + p.diffParents.upper.mu.Lock() + defer p.diffParents.upper.mu.Unlock() + if err := p.diffParents.upper.release(ctx); err != nil { + rerr = multierror.Append(rerr, err).ErrorOrNil() + } else { + p.diffParents.upper = nil + } + } } - var parent []digest.Digest - if cr.parent != nil { - parent = cr.parent.parentChain() + return rerr +} + +func (p parentRefs) clone() parentRefs { + switch { + case p.layerParent != nil: + p.layerParent = p.layerParent.clone() + case len(p.mergeParents) > 0: + newParents := make([]*immutableRef, len(p.mergeParents)) + for i, p := range p.mergeParents { + newParents[i] = p.clone() + } + p.mergeParents = newParents + case p.diffParents != nil: + newDiffParents := &diffParents{} + if p.diffParents.lower != nil { + newDiffParents.lower = p.diffParents.lower.clone() + } + if p.diffParents.upper != nil { + newDiffParents.upper = p.diffParents.upper.clone() + } + p.diffParents = newDiffParents } - pcc := make([]digest.Digest, len(parent)+1) - copy(pcc, parent) - pcc[len(parent)] = digest.Digest(blob) - cr.parentChainCache = pcc - return pcc + return p +} + +type refKind int + +const ( + BaseLayer refKind = iota + Layer + Merge + Diff +) + +func (cr *cacheRecord) kind() refKind { + if len(cr.mergeParents) > 0 { + return Merge + } + if cr.diffParents != nil { + return Diff + } + if cr.layerParent != nil { + return Layer + } + return BaseLayer } // hold ref lock before calling @@ -143,16 +225,65 @@ func (cr *cacheRecord) isDead() bool { return cr.dead || (cr.equalImmutable != nil && cr.equalImmutable.dead) || (cr.equalMutable != nil && cr.equalMutable.dead) } +var errSkipWalk = errors.New("skip") + +// walkAncestors calls the provided func on cr and each of its ancestors, counting layer, +// diff, and merge parents. It starts at cr and does a depth-first walk to parents. It will visit +// a record and its parents multiple times if encountered more than once. It will only skip +// visiting parents of a record if errSkipWalk is returned. If any other error is returned, +// the walk will stop and return the error to the caller. +func (cr *cacheRecord) walkAncestors(f func(*cacheRecord) error) error { + curs := []*cacheRecord{cr} + for len(curs) > 0 { + cur := curs[len(curs)-1] + curs = curs[:len(curs)-1] + if err := f(cur); err != nil { + if errors.Is(err, errSkipWalk) { + continue + } + return err + } + switch cur.kind() { + case Layer: + curs = append(curs, cur.layerParent.cacheRecord) + case Merge: + for _, p := range cur.mergeParents { + curs = append(curs, p.cacheRecord) + } + case Diff: + if cur.diffParents.lower != nil { + curs = append(curs, cur.diffParents.lower.cacheRecord) + } + if cur.diffParents.upper != nil { + curs = append(curs, cur.diffParents.upper.cacheRecord) + } + } + } + return nil +} + +// walkUniqueAncestors calls walkAncestors but skips a record if it's already been visited. +func (cr *cacheRecord) walkUniqueAncestors(f func(*cacheRecord) error) error { + memo := make(map[*cacheRecord]struct{}) + return cr.walkAncestors(func(cr *cacheRecord) error { + if _, ok := memo[cr]; ok { + return errSkipWalk + } + memo[cr] = struct{}{} + return f(cr) + }) +} + func (cr *cacheRecord) isLazy(ctx context.Context) (bool, error) { - if !getBlobOnly(cr.md) { + if !cr.getBlobOnly() { return false, nil } - dgst := getBlob(cr.md) + dgst := cr.getBlob() // special case for moby where there is no compressed blob (empty digest) if dgst == "" { return false, nil } - _, err := cr.cm.ContentStore.Info(ctx, digest.Digest(dgst)) + _, err := cr.cm.ContentStore.Info(ctx, dgst) if errors.Is(err, errdefs.ErrNotFound) { return true, nil } else if err != nil { @@ -160,7 +291,7 @@ func (cr *cacheRecord) isLazy(ctx context.Context) (bool, error) { } // If the snapshot is a remote snapshot, this layer is lazy. - if info, err := cr.cm.Snapshotter.Stat(ctx, getSnapshotID(cr.md)); err == nil { + if info, err := cr.cm.Snapshotter.Stat(ctx, cr.getSnapshotID()); err == nil { if _, ok := info.Labels["containerd.io/snapshot/remote"]; ok { return true, nil } @@ -173,24 +304,36 @@ func (cr *cacheRecord) IdentityMapping() *idtools.IdentityMapping { return cr.cm.IdentityMapping() } -func (cr *cacheRecord) Size(ctx context.Context) (int64, error) { +func (cr *cacheRecord) viewLeaseID() string { + return cr.ID() + "-view" +} + +func (cr *cacheRecord) compressionVariantsLeaseID() string { + return cr.ID() + "-variants" +} + +func (cr *cacheRecord) viewSnapshotID() string { + return cr.getSnapshotID() + "-view" +} + +func (cr *cacheRecord) size(ctx context.Context) (int64, error) { // this expects that usage() is implemented lazily s, err := cr.sizeG.Do(ctx, cr.ID(), func(ctx context.Context) (interface{}, error) { cr.mu.Lock() - s := getSize(cr.md) + s := cr.getSize() if s != sizeUnknown { cr.mu.Unlock() return s, nil } - driverID := getSnapshotID(cr.md) + driverID := cr.getSnapshotID() if cr.equalMutable != nil { - driverID = getSnapshotID(cr.equalMutable.md) + driverID = cr.equalMutable.getSnapshotID() } cr.mu.Unlock() var usage snapshots.Usage - if !getBlobOnly(cr.md) { + if !cr.getBlobOnly() { var err error - usage, err = cr.cm.ManagerOpt.Snapshotter.Usage(ctx, driverID) + usage, err = cr.cm.Snapshotter.Usage(ctx, driverID) if err != nil { cr.mu.Lock() isDead := cr.isDead() @@ -203,15 +346,26 @@ func (cr *cacheRecord) Size(ctx context.Context) (int64, error) { } } } - if dgst := getBlob(cr.md); dgst != "" { + if dgst := cr.getBlob(); dgst != "" { + added := make(map[digest.Digest]struct{}) info, err := cr.cm.ContentStore.Info(ctx, digest.Digest(dgst)) if err == nil { usage.Size += info.Size + added[digest.Digest(dgst)] = struct{}{} } + walkBlobVariantsOnly(ctx, cr.cm.ContentStore, digest.Digest(dgst), func(desc ocispecs.Descriptor) bool { + if _, ok := added[desc.Digest]; !ok { + if info, err := cr.cm.ContentStore.Info(ctx, desc.Digest); err == nil { + usage.Size += info.Size + added[desc.Digest] = struct{}{} + } + } + return true + }, nil) } cr.mu.Lock() - setSize(cr.md, usage.Size) - if err := cr.md.Commit(); err != nil { + cr.queueSize(usage.Size) + if err := cr.commitMetadata(); err != nil { cr.mu.Unlock() return s, err } @@ -224,94 +378,207 @@ func (cr *cacheRecord) Size(ctx context.Context) (int64, error) { return s.(int64), nil } -func (cr *cacheRecord) parentRef(hidden bool, descHandlers DescHandlers) *immutableRef { - p := cr.parent - if p == nil { - return nil +// caller must hold cr.mu +func (cr *cacheRecord) mount(ctx context.Context, s session.Group) (_ snapshot.Mountable, rerr error) { + if cr.mountCache != nil { + return cr.mountCache, nil } - p.mu.Lock() - defer p.mu.Unlock() - return p.ref(hidden, descHandlers) -} -// must be called holding cacheRecord mu -func (cr *cacheRecord) mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + var mountSnapshotID string if cr.mutable { - m, err := cr.cm.Snapshotter.Mounts(ctx, getSnapshotID(cr.md)) - if err != nil { - return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) - } - if readonly { - m = setReadonly(m) - } - return m, nil - } - - if cr.equalMutable != nil && readonly { - m, err := cr.cm.Snapshotter.Mounts(ctx, getSnapshotID(cr.equalMutable.md)) - if err != nil { - return nil, errors.Wrapf(err, "failed to mount %s", cr.equalMutable.ID()) - } - return setReadonly(m), nil - } - - if err := cr.finalize(ctx, true); err != nil { - return nil, err - } - if cr.viewMount == nil { // TODO: handle this better - view := identity.NewID() - l, err := cr.cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { - l.ID = view + mountSnapshotID = cr.getSnapshotID() + } else if cr.equalMutable != nil { + mountSnapshotID = cr.equalMutable.getSnapshotID() + } else { + mountSnapshotID = cr.viewSnapshotID() + if _, err := cr.cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = cr.viewLeaseID() l.Labels = map[string]string{ "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), } return nil - }, leaseutil.MakeTemporary) - if err != nil { + }, leaseutil.MakeTemporary); err != nil && !errdefs.IsAlreadyExists(err) { return nil, err } - ctx = leases.WithLease(ctx, l.ID) - m, err := cr.cm.Snapshotter.View(ctx, view, getSnapshotID(cr.md)) - if err != nil { - cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: l.ID}) - return nil, errors.Wrapf(err, "failed to mount %s", cr.ID()) + defer func() { + if rerr != nil { + cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.viewLeaseID()}) + } + }() + if err := cr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: cr.viewLeaseID()}, leases.Resource{ + ID: mountSnapshotID, + Type: "snapshots/" + cr.cm.Snapshotter.Name(), + }); err != nil && !errdefs.IsAlreadyExists(err) { + return nil, err } - cr.view = view - cr.viewMount = m + // Return the mount direct from View rather than setting it using the Mounts call below. + // The two are equivalent for containerd snapshotters but the moby snapshotter requires + // the use of the mountable returned by View in this case. + mnts, err := cr.cm.Snapshotter.View(ctx, mountSnapshotID, cr.getSnapshotID()) + if err != nil && !errdefs.IsAlreadyExists(err) { + return nil, err + } + cr.mountCache = mnts } - return cr.viewMount, nil + + if cr.mountCache != nil { + return cr.mountCache, nil + } + + mnts, err := cr.cm.Snapshotter.Mounts(ctx, mountSnapshotID) + if err != nil { + return nil, err + } + cr.mountCache = mnts + return cr.mountCache, nil } // call when holding the manager lock func (cr *cacheRecord) remove(ctx context.Context, removeSnapshot bool) error { delete(cr.cm.records, cr.ID()) - if cr.parent != nil { - cr.parent.mu.Lock() - err := cr.parent.release(ctx) - cr.parent.mu.Unlock() - if err != nil { - return err - } - } if removeSnapshot { - if err := cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()}); err != nil { - return errors.Wrapf(err, "failed to remove %s", cr.ID()) + if err := cr.cm.LeaseManager.Delete(ctx, leases.Lease{ + ID: cr.ID(), + }); err != nil && !errdefs.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete lease for %s", cr.ID()) + } + if err := cr.cm.LeaseManager.Delete(ctx, leases.Lease{ + ID: cr.compressionVariantsLeaseID(), + }); err != nil && !errdefs.IsNotFound(err) { + return errors.Wrapf(err, "failed to delete compression variant lease for %s", cr.ID()) } } - if err := cr.cm.md.Clear(cr.ID()); err != nil { - return err + if err := cr.cm.MetadataStore.Clear(cr.ID()); err != nil { + return errors.Wrapf(err, "failed to delete metadata of %s", cr.ID()) + } + if err := cr.parentRefs.release(ctx); err != nil { + return errors.Wrapf(err, "failed to release parents of %s", cr.ID()) } return nil } -func (cr *cacheRecord) ID() string { - return cr.md.ID() -} - type immutableRef struct { *cacheRecord triggerLastUsed bool descHandlers DescHandlers + // TODO:(sipsma) de-dupe progress with the same field inside descHandlers? + progress progress.Controller +} + +// Order is from parent->child, sr will be at end of slice. Refs should not +// be released as they are used internally in the underlying cacheRecords. +func (sr *immutableRef) layerChain() []*immutableRef { + var count int + sr.layerWalk(func(*immutableRef) { + count++ + }) + layers := make([]*immutableRef, count) + var index int + sr.layerWalk(func(sr *immutableRef) { + layers[index] = sr + index++ + }) + return layers +} + +// returns the set of cache record IDs for each layer in sr's layer chain +func (sr *immutableRef) layerSet() map[string]struct{} { + var count int + sr.layerWalk(func(*immutableRef) { + count++ + }) + set := make(map[string]struct{}, count) + sr.layerWalk(func(sr *immutableRef) { + set[sr.ID()] = struct{}{} + }) + return set +} + +// layerWalk visits each ref representing an actual layer in the chain for +// sr (including sr). The layers are visited from lowest->highest as ordered +// in the remote for the ref. +func (sr *immutableRef) layerWalk(f func(*immutableRef)) { + switch sr.kind() { + case Merge: + for _, parent := range sr.mergeParents { + parent.layerWalk(f) + } + case Diff: + lower := sr.diffParents.lower + upper := sr.diffParents.upper + // If upper is only one blob different from lower, then re-use that blob + switch { + case upper != nil && lower == nil && upper.kind() == BaseLayer: + // upper is a single layer being diffed with scratch + f(upper) + case upper != nil && lower != nil && upper.kind() == Layer && upper.layerParent.ID() == lower.ID(): + // upper is a single layer on top of lower + f(upper) + default: + // otherwise, the diff will be computed and turned into its own single blob + f(sr) + } + case Layer: + sr.layerParent.layerWalk(f) + fallthrough + case BaseLayer: + f(sr) + } +} + +// hold cacheRecord.mu lock before calling +func (cr *cacheRecord) layerDigestChain() []digest.Digest { + if cr.layerDigestChainCache != nil { + return cr.layerDigestChainCache + } + switch cr.kind() { + case Diff: + if cr.getBlob() == "" { + // this diff just reuses the upper blob + cr.layerDigestChainCache = cr.diffParents.upper.layerDigestChain() + } else { + cr.layerDigestChainCache = append(cr.layerDigestChainCache, cr.getBlob()) + } + case Merge: + for _, parent := range cr.mergeParents { + cr.layerDigestChainCache = append(cr.layerDigestChainCache, parent.layerDigestChain()...) + } + case Layer: + cr.layerDigestChainCache = append(cr.layerDigestChainCache, cr.layerParent.layerDigestChain()...) + fallthrough + case BaseLayer: + cr.layerDigestChainCache = append(cr.layerDigestChainCache, cr.getBlob()) + } + return cr.layerDigestChainCache +} + +type RefList []ImmutableRef + +func (l RefList) Release(ctx context.Context) (rerr error) { + for i, r := range l { + if r == nil { + continue + } + if err := r.Release(ctx); err != nil { + rerr = multierror.Append(rerr, err).ErrorOrNil() + } else { + l[i] = nil + } + } + return rerr +} + +func (sr *immutableRef) LayerChain() RefList { + chain := sr.layerChain() + l := RefList(make([]ImmutableRef, len(chain))) + for i, p := range chain { + l[i] = p.Clone() + } + return l +} + +func (sr *immutableRef) DescHandler(dgst digest.Digest) *DescHandler { + return sr.descHandlers[dgst] } type mutableRef struct { @@ -320,50 +587,109 @@ type mutableRef struct { descHandlers DescHandlers } -func (sr *immutableRef) Clone() ImmutableRef { +func (sr *mutableRef) DescHandler(dgst digest.Digest) *DescHandler { + return sr.descHandlers[dgst] +} + +func (sr *immutableRef) clone() *immutableRef { sr.mu.Lock() - ref := sr.ref(false, sr.descHandlers) + ref := sr.ref(false, sr.descHandlers, sr.progress) sr.mu.Unlock() return ref } -func (sr *immutableRef) Parent() ImmutableRef { - if p := sr.parentRef(true, sr.descHandlers); p != nil { // avoid returning typed nil pointer - return p - } - return nil +func (sr *immutableRef) Clone() ImmutableRef { + return sr.clone() } -func (sr *immutableRef) Info() RefInfo { - return RefInfo{ - ChainID: digest.Digest(getChainID(sr.md)), - DiffID: digest.Digest(getDiffID(sr.md)), - Blob: digest.Digest(getBlob(sr.md)), - MediaType: getMediaType(sr.md), - BlobChainID: digest.Digest(getBlobChainID(sr.md)), - SnapshotID: getSnapshotID(sr.md), - Extracted: !getBlobOnly(sr.md), +// layertoDistributable changes the passed in media type to the "distributable" version of the media type. +func layerToDistributable(mt string) string { + if !images.IsNonDistributable(mt) { + // Layer is already a distributable media type (or this is not even a layer). + // No conversion needed + return mt + } + + switch mt { + case ocispecs.MediaTypeImageLayerNonDistributable: + return ocispecs.MediaTypeImageLayer + case ocispecs.MediaTypeImageLayerNonDistributableGzip: + return ocispecs.MediaTypeImageLayerGzip + case ocispecs.MediaTypeImageLayerNonDistributableZstd: + return ocispecs.MediaTypeImageLayerZstd + case images.MediaTypeDockerSchema2LayerForeign: + return images.MediaTypeDockerSchema2Layer + case images.MediaTypeDockerSchema2LayerForeignGzip: + return images.MediaTypeDockerSchema2LayerGzip + default: + return mt } } -func (sr *immutableRef) ociDesc() (ocispec.Descriptor, error) { - desc := ocispec.Descriptor{ - Digest: digest.Digest(getBlob(sr.md)), - Size: getBlobSize(sr.md), - MediaType: getMediaType(sr.md), +func layerToNonDistributable(mt string) string { + switch mt { + case ocispecs.MediaTypeImageLayer: + return ocispecs.MediaTypeImageLayerNonDistributable + case ocispecs.MediaTypeImageLayerGzip: + return ocispecs.MediaTypeImageLayerNonDistributableGzip + case ocispecs.MediaTypeImageLayerZstd: + return ocispecs.MediaTypeImageLayerNonDistributableZstd + case images.MediaTypeDockerSchema2Layer: + return images.MediaTypeDockerSchema2LayerForeign + case images.MediaTypeDockerSchema2LayerForeignGzip: + return images.MediaTypeDockerSchema2LayerForeignGzip + default: + return mt + } +} + +func (sr *immutableRef) ociDesc(ctx context.Context, dhs DescHandlers, preferNonDist bool) (ocispecs.Descriptor, error) { + dgst := sr.getBlob() + if dgst == "" { + return ocispecs.Descriptor{}, errors.Errorf("no blob set for cache record %s", sr.ID()) + } + + desc := ocispecs.Descriptor{ + Digest: sr.getBlob(), + Size: sr.getBlobSize(), Annotations: make(map[string]string), + MediaType: sr.getMediaType(), } - diffID := getDiffID(sr.md) + if preferNonDist { + if urls := sr.getURLs(); len(urls) > 0 { + // Make sure the media type is the non-distributable version + // We don't want to rely on the stored media type here because it could have been stored as distributable originally. + desc.MediaType = layerToNonDistributable(desc.MediaType) + desc.URLs = urls + } + } + if len(desc.URLs) == 0 { + // If there are no URL's, there is no reason to have this be non-dsitributable + desc.MediaType = layerToDistributable(desc.MediaType) + } + + if blobDesc, err := getBlobDesc(ctx, sr.cm.ContentStore, desc.Digest); err == nil { + if blobDesc.Annotations != nil { + desc.Annotations = blobDesc.Annotations + } + } else if dh, ok := dhs[desc.Digest]; ok { + // No blob metadtata is stored in the content store. Try to get annotations from desc handlers. + for k, v := range filterAnnotationsForSave(dh.Annotations) { + desc.Annotations[k] = v + } + } + + diffID := sr.getDiffID() if diffID != "" { - desc.Annotations["containerd.io/uncompressed"] = diffID + desc.Annotations["containerd.io/uncompressed"] = string(diffID) } - createdAt := GetCreatedAt(sr.md) + createdAt := sr.GetCreatedAt() if !createdAt.IsZero() { createdAt, err := createdAt.MarshalText() if err != nil { - return ocispec.Descriptor{}, err + return ocispecs.Descriptor{}, err } desc.Annotations["buildkit/createdat"] = string(createdAt) } @@ -371,20 +697,218 @@ func (sr *immutableRef) ociDesc() (ocispec.Descriptor, error) { return desc, nil } -// order is from parent->child, sr will be at end of slice -func (sr *immutableRef) parentRefChain() []*immutableRef { - var count int - for ref := sr; ref != nil; ref = ref.parent { - count++ +const ( + blobVariantGCLabel = "containerd.io/gc.ref.content.blob-" + blobAnnotationsLabelPrefix = "buildkit.io/blob/annotation." + blobMediaTypeLabel = "buildkit.io/blob/mediatype" +) + +// linkBlob makes a link between this ref and the passed blob. The linked blob can be +// acquired during walkBlob. This is useful to associate a compression variant blob to +// this ref. This doesn't record the blob to the cache record (i.e. the passed blob can't +// be acquired through getBlob). Use setBlob for that purpose. +func (sr *immutableRef) linkBlob(ctx context.Context, desc ocispecs.Descriptor) error { + if _, err := sr.cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { + l.ID = sr.compressionVariantsLeaseID() + // do not make it flat lease to allow linking blobs using gc label + return nil + }); err != nil && !errdefs.IsAlreadyExists(err) { + return err } - refs := make([]*immutableRef, count) - for i, ref := count-1, sr; ref != nil; i, ref = i-1, ref.parent { - refs[i] = ref + if err := sr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: sr.compressionVariantsLeaseID()}, leases.Resource{ + ID: desc.Digest.String(), + Type: "content", + }); err != nil { + return err } - return refs + cs := sr.cm.ContentStore + blobDigest := sr.getBlob() + info, err := cs.Info(ctx, blobDigest) + if err != nil { + return err + } + vInfo, err := cs.Info(ctx, desc.Digest) + if err != nil { + return err + } + vInfo.Labels = map[string]string{ + blobVariantGCLabel + blobDigest.String(): blobDigest.String(), + } + vInfo = addBlobDescToInfo(desc, vInfo) + if _, err := cs.Update(ctx, vInfo, fieldsFromLabels(vInfo.Labels)...); err != nil { + return err + } + // let the future call to size() recalcultate the new size + sr.mu.Lock() + sr.queueSize(sizeUnknown) + if err := sr.commitMetadata(); err != nil { + sr.mu.Unlock() + return err + } + sr.mu.Unlock() + if desc.Digest == blobDigest { + return nil + } + info.Labels = map[string]string{ + blobVariantGCLabel + desc.Digest.String(): desc.Digest.String(), + } + _, err = cs.Update(ctx, info, fieldsFromLabels(info.Labels)...) + return err } -func (sr *immutableRef) Mount(ctx context.Context, readonly bool, s session.Group) (snapshot.Mountable, error) { +func (sr *immutableRef) getBlobWithCompression(ctx context.Context, compressionType compression.Type) (ocispecs.Descriptor, error) { + if _, err := sr.cm.ContentStore.Info(ctx, sr.getBlob()); err != nil { + return ocispecs.Descriptor{}, err + } + desc, err := sr.ociDesc(ctx, nil, true) + if err != nil { + return ocispecs.Descriptor{}, err + } + return getBlobWithCompression(ctx, sr.cm.ContentStore, desc, compressionType) +} + +func getBlobWithCompression(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, compressionType compression.Type) (ocispecs.Descriptor, error) { + if compressionType == compression.UnknownCompression { + return ocispecs.Descriptor{}, fmt.Errorf("cannot get unknown compression type") + } + var target *ocispecs.Descriptor + if err := walkBlob(ctx, cs, desc, func(desc ocispecs.Descriptor) bool { + if needs, err := needsConversion(ctx, cs, desc, compressionType); err == nil && !needs { + target = &desc + return false + } + return true + }); err != nil || target == nil { + return ocispecs.Descriptor{}, errdefs.ErrNotFound + } + return *target, nil +} + +func walkBlob(ctx context.Context, cs content.Store, desc ocispecs.Descriptor, f func(ocispecs.Descriptor) bool) error { + if !f(desc) { + return nil + } + if _, err := walkBlobVariantsOnly(ctx, cs, desc.Digest, func(desc ocispecs.Descriptor) bool { return f(desc) }, nil); err != nil { + return err + } + return nil +} + +func walkBlobVariantsOnly(ctx context.Context, cs content.Store, dgst digest.Digest, f func(ocispecs.Descriptor) bool, visited map[digest.Digest]struct{}) (bool, error) { + if visited == nil { + visited = make(map[digest.Digest]struct{}) + } + visited[dgst] = struct{}{} + info, err := cs.Info(ctx, dgst) + if errors.Is(err, errdefs.ErrNotFound) { + return true, nil + } else if err != nil { + return false, err + } + var children []digest.Digest + for k, dgstS := range info.Labels { + if !strings.HasPrefix(k, blobVariantGCLabel) { + continue + } + cDgst, err := digest.Parse(dgstS) + if err != nil || cDgst == dgst { + continue + } + if cDesc, err := getBlobDesc(ctx, cs, cDgst); err == nil { + if !f(cDesc) { + return false, nil + } + } + children = append(children, cDgst) + } + for _, c := range children { + if _, isVisited := visited[c]; isVisited { + continue + } + if isContinue, err := walkBlobVariantsOnly(ctx, cs, c, f, visited); !isContinue || err != nil { + return isContinue, err + } + } + return true, nil +} + +func getBlobDesc(ctx context.Context, cs content.Store, dgst digest.Digest) (ocispecs.Descriptor, error) { + info, err := cs.Info(ctx, dgst) + if err != nil { + return ocispecs.Descriptor{}, err + } + if info.Labels == nil { + return ocispecs.Descriptor{}, fmt.Errorf("no blob metadata is stored for %q", info.Digest) + } + mt, ok := info.Labels[blobMediaTypeLabel] + if !ok { + return ocispecs.Descriptor{}, fmt.Errorf("no media type is stored for %q", info.Digest) + } + desc := ocispecs.Descriptor{ + Digest: info.Digest, + Size: info.Size, + MediaType: mt, + } + for k, v := range info.Labels { + if strings.HasPrefix(k, blobAnnotationsLabelPrefix) { + if desc.Annotations == nil { + desc.Annotations = make(map[string]string) + } + desc.Annotations[strings.TrimPrefix(k, blobAnnotationsLabelPrefix)] = v + } + } + if len(desc.URLs) == 0 { + // If there are no URL's, there is no reason to have this be non-dsitributable + desc.MediaType = layerToDistributable(desc.MediaType) + } + return desc, nil +} + +func addBlobDescToInfo(desc ocispecs.Descriptor, info content.Info) content.Info { + if _, ok := info.Labels[blobMediaTypeLabel]; ok { + return info // descriptor information already stored + } + if info.Labels == nil { + info.Labels = make(map[string]string) + } + info.Labels[blobMediaTypeLabel] = desc.MediaType + for k, v := range filterAnnotationsForSave(desc.Annotations) { + info.Labels[blobAnnotationsLabelPrefix+k] = v + } + return info +} + +func filterAnnotationsForSave(a map[string]string) (b map[string]string) { + if a == nil { + return nil + } + for _, k := range append(eStargzAnnotations, containerdUncompressed) { + v, ok := a[k] + if !ok { + continue + } + if b == nil { + b = make(map[string]string) + } + b[k] = v + } + return +} + +func fieldsFromLabels(labels map[string]string) (fields []string) { + for k := range labels { + fields = append(fields, "labels."+k) + } + return +} + +func (sr *immutableRef) Mount(ctx context.Context, readonly bool, s session.Group) (_ snapshot.Mountable, rerr error) { + if sr.equalMutable != nil && !readonly { + if err := sr.Finalize(ctx); err != nil { + return nil, err + } + } + if err := sr.Extract(ctx, s); err != nil { return nil, err } @@ -392,35 +916,36 @@ func (sr *immutableRef) Mount(ctx context.Context, readonly bool, s session.Grou sr.mu.Lock() defer sr.mu.Unlock() + if sr.mountCache != nil { + if readonly { + return setReadonly(sr.mountCache), nil + } + return sr.mountCache, nil + } + + var mnt snapshot.Mountable if sr.cm.Snapshotter.Name() == "stargz" { - var ( - m snapshot.Mountable - rerr error - ) if err := sr.withRemoteSnapshotLabelsStargzMode(ctx, s, func() { - m, rerr = sr.mount(ctx, readonly) + mnt, rerr = sr.mount(ctx, s) }); err != nil { return nil, err } - return m, rerr + } else { + mnt, rerr = sr.mount(ctx, s) + } + if rerr != nil { + return nil, rerr } - return sr.mount(ctx, readonly) + if readonly { + mnt = setReadonly(mnt) + } + return mnt, nil } func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr error) { - if !getBlobOnly(sr.md) { - return - } - - ctx, done, err := leaseutil.WithLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary) - if err != nil { - return err - } - defer done(ctx) - - if GetLayerType(sr) == "windows" { - ctx = winlayers.UseWindowsLayerMode(ctx) + if (sr.kind() == Layer || sr.kind() == BaseLayer) && !sr.getBlobOnly() { + return nil } if sr.cm.Snapshotter.Name() == "stargz" { @@ -428,21 +953,21 @@ func (sr *immutableRef) Extract(ctx context.Context, s session.Group) (rerr erro if rerr = sr.prepareRemoteSnapshotsStargzMode(ctx, s); rerr != nil { return } - rerr = sr.extract(ctx, sr.descHandlers, s) + rerr = sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true) }); err != nil { return err } return rerr } - return sr.extract(ctx, sr.descHandlers, s) + return sr.unlazy(ctx, sr.descHandlers, sr.progress, s, true) } func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, s session.Group, f func()) error { dhs := sr.descHandlers - for _, r := range sr.parentRefChain() { + for _, r := range sr.layerChain() { r := r - info, err := r.cm.Snapshotter.Stat(ctx, getSnapshotID(r.md)) + info, err := r.cm.Snapshotter.Stat(ctx, r.getSnapshotID()) if err != nil && !errdefs.IsNotFound(err) { return err } else if errdefs.IsNotFound(err) { @@ -450,11 +975,7 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, } else if _, ok := info.Labels["containerd.io/snapshot/remote"]; !ok { continue // This isn't a remote snapshot; skip } - desc, err := r.ociDesc() - if err != nil { - return err - } - dh := dhs[desc.Digest] + dh := dhs[digest.Digest(r.getBlob())] if dh == nil { continue // no info passed; skip } @@ -486,18 +1007,14 @@ func (sr *immutableRef) withRemoteSnapshotLabelsStargzMode(ctx context.Context, func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s session.Group) error { _, err := sr.sizeG.Do(ctx, sr.ID()+"-prepare-remote-snapshot", func(ctx context.Context) (_ interface{}, rerr error) { dhs := sr.descHandlers - for _, r := range sr.parentRefChain() { + for _, r := range sr.layerChain() { r := r - snapshotID := getSnapshotID(r.md) + snapshotID := r.getSnapshotID() if _, err := r.cm.Snapshotter.Stat(ctx, snapshotID); err == nil { continue } - desc, err := r.ociDesc() - if err != nil { - return nil, err - } - dh := dhs[desc.Digest] + dh := dhs[digest.Digest(r.getBlob())] if dh == nil { // We cannot prepare remote snapshots without descHandler. return nil, nil @@ -515,17 +1032,17 @@ func (sr *immutableRef) prepareRemoteSnapshotsStargzMode(ctx context.Context, s // Prepare remote snapshots var ( - key = fmt.Sprintf("tmp-%s %s", identity.NewID(), r.Info().ChainID) + key = fmt.Sprintf("tmp-%s %s", identity.NewID(), r.getChainID()) opts = []snapshots.Opt{ snapshots.WithLabels(defaultLabels), snapshots.WithLabels(tmpLabels), } ) parentID := "" - if r.parent != nil { - parentID = getSnapshotID(r.parent.md) + if r.layerParent != nil { + parentID = r.layerParent.getSnapshotID() } - if err = r.cm.Snapshotter.Prepare(ctx, key, parentID, opts...); err != nil { + if err := r.cm.Snapshotter.Prepare(ctx, key, parentID, opts...); err != nil { if errdefs.IsAlreadyExists(err) { // Check if the targeting snapshot ID has been prepared as // a remote snapshot in the snapshotter. @@ -574,96 +1091,182 @@ func makeTmpLabelsStargzMode(labels map[string]string, s session.Group) (fields return } -func (sr *immutableRef) extract(ctx context.Context, dhs DescHandlers, s session.Group) error { - _, err := sr.sizeG.Do(ctx, sr.ID()+"-extract", func(ctx context.Context) (_ interface{}, rerr error) { - snapshotID := getSnapshotID(sr.md) - if _, err := sr.cm.Snapshotter.Stat(ctx, snapshotID); err == nil { +func (sr *immutableRef) unlazy(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) error { + _, err := sr.sizeG.Do(ctx, sr.ID()+"-unlazy", func(ctx context.Context) (_ interface{}, rerr error) { + if _, err := sr.cm.Snapshotter.Stat(ctx, sr.getSnapshotID()); err == nil { return nil, nil } - if sr.cm.Applier == nil { - return nil, errors.New("extract requires an applier") - } - - eg, egctx := errgroup.WithContext(ctx) - - parentID := "" - if sr.parent != nil { - eg.Go(func() error { - if err := sr.parent.extract(egctx, dhs, s); err != nil { - return err - } - parentID = getSnapshotID(sr.parent.md) - return nil - }) - } - - desc, err := sr.ociDesc() - if err != nil { - return nil, err - } - dh := dhs[desc.Digest] - - eg.Go(func() error { - // unlazies if needed, otherwise a no-op - return lazyRefProvider{ - ref: sr, - desc: desc, - dh: dh, - session: s, - }.Unlazy(egctx) - }) - - if err := eg.Wait(); err != nil { - return nil, err - } - - if dh != nil && dh.Progress != nil { - _, stopProgress := dh.Progress.Start(ctx) - defer stopProgress(rerr) - statusDone := dh.Progress.Status("extracting "+desc.Digest.String(), "extracting") - defer statusDone() - } - - key := fmt.Sprintf("extract-%s %s", identity.NewID(), sr.Info().ChainID) - - err = sr.cm.Snapshotter.Prepare(ctx, key, parentID) - if err != nil { - return nil, err - } - - mountable, err := sr.cm.Snapshotter.Mounts(ctx, key) - if err != nil { - return nil, err - } - mounts, unmount, err := mountable.Mount() - if err != nil { - return nil, err - } - _, err = sr.cm.Applier.Apply(ctx, desc, mounts) - if err != nil { - unmount() - return nil, err - } - - if err := unmount(); err != nil { - return nil, err - } - if err := sr.cm.Snapshotter.Commit(ctx, getSnapshotID(sr.md), key); err != nil { - if !errors.Is(err, errdefs.ErrAlreadyExists) { - return nil, err - } - } - queueBlobOnly(sr.md, false) - setSize(sr.md, sizeUnknown) - if err := sr.md.Commit(); err != nil { - return nil, err + switch sr.kind() { + case Merge, Diff: + return nil, sr.unlazyDiffMerge(ctx, dhs, pg, s, topLevel) + case Layer, BaseLayer: + return nil, sr.unlazyLayer(ctx, dhs, pg, s) } return nil, nil }) return err } +// should be called within sizeG.Do call for this ref's ID +func (sr *immutableRef) unlazyDiffMerge(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group, topLevel bool) (rerr error) { + eg, egctx := errgroup.WithContext(ctx) + var diffs []snapshot.Diff + sr.layerWalk(func(sr *immutableRef) { + var diff snapshot.Diff + switch sr.kind() { + case Diff: + if sr.diffParents.lower != nil { + diff.Lower = sr.diffParents.lower.getSnapshotID() + eg.Go(func() error { + return sr.diffParents.lower.unlazy(egctx, dhs, pg, s, false) + }) + } + if sr.diffParents.upper != nil { + diff.Upper = sr.diffParents.upper.getSnapshotID() + eg.Go(func() error { + return sr.diffParents.upper.unlazy(egctx, dhs, pg, s, false) + }) + } + case Layer: + diff.Lower = sr.layerParent.getSnapshotID() + fallthrough + case BaseLayer: + diff.Upper = sr.getSnapshotID() + eg.Go(func() error { + return sr.unlazy(egctx, dhs, pg, s, false) + }) + } + diffs = append(diffs, diff) + }) + if err := eg.Wait(); err != nil { + return err + } + + if pg != nil { + action := "merging" + if sr.kind() == Diff { + action = "diffing" + } + progressID := sr.GetDescription() + if topLevel { + progressID = action + } + if progressID == "" { + progressID = fmt.Sprintf("%s %s", action, sr.ID()) + } + _, stopProgress := pg.Start(ctx) + defer stopProgress(rerr) + statusDone := pg.Status(progressID, action) + defer statusDone() + } + + return sr.cm.Snapshotter.Merge(ctx, sr.getSnapshotID(), diffs) +} + +// should be called within sizeG.Do call for this ref's ID +func (sr *immutableRef) unlazyLayer(ctx context.Context, dhs DescHandlers, pg progress.Controller, s session.Group) (rerr error) { + if !sr.getBlobOnly() { + return nil + } + + if sr.cm.Applier == nil { + return errors.New("unlazy requires an applier") + } + + if _, ok := leases.FromContext(ctx); !ok { + leaseCtx, done, err := leaseutil.WithLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary) + if err != nil { + return err + } + defer done(leaseCtx) + ctx = leaseCtx + } + + if sr.GetLayerType() == "windows" { + ctx = winlayers.UseWindowsLayerMode(ctx) + } + + eg, egctx := errgroup.WithContext(ctx) + + parentID := "" + if sr.layerParent != nil { + eg.Go(func() error { + if err := sr.layerParent.unlazy(egctx, dhs, pg, s, false); err != nil { + return err + } + parentID = sr.layerParent.getSnapshotID() + return nil + }) + } + + desc, err := sr.ociDesc(ctx, dhs, true) + if err != nil { + return err + } + dh := dhs[desc.Digest] + + eg.Go(func() error { + // unlazies if needed, otherwise a no-op + return lazyRefProvider{ + ref: sr, + desc: desc, + dh: dh, + session: s, + }.Unlazy(egctx) + }) + + if err := eg.Wait(); err != nil { + return err + } + + if pg == nil && dh != nil { + pg = dh.Progress + } + if pg != nil { + _, stopProgress := pg.Start(ctx) + defer stopProgress(rerr) + statusDone := pg.Status("extracting "+desc.Digest.String(), "extracting") + defer statusDone() + } + + key := fmt.Sprintf("extract-%s %s", identity.NewID(), sr.getChainID()) + + err = sr.cm.Snapshotter.Prepare(ctx, key, parentID) + if err != nil { + return err + } + + mountable, err := sr.cm.Snapshotter.Mounts(ctx, key) + if err != nil { + return err + } + mounts, unmount, err := mountable.Mount() + if err != nil { + return err + } + _, err = sr.cm.Applier.Apply(ctx, desc, mounts) + if err != nil { + unmount() + return err + } + + if err := unmount(); err != nil { + return err + } + if err := sr.cm.Snapshotter.Commit(ctx, sr.getSnapshotID(), key); err != nil { + if !errors.Is(err, errdefs.ErrAlreadyExists) { + return err + } + } + sr.queueBlobOnly(false) + sr.queueSize(sizeUnknown) + if err := sr.commitMetadata(); err != nil { + return err + } + return nil +} + func (sr *immutableRef) Release(ctx context.Context) error { sr.cm.mu.Lock() defer sr.cm.mu.Unlock() @@ -674,7 +1277,7 @@ func (sr *immutableRef) Release(ctx context.Context) error { return sr.release(ctx) } -func (sr *immutableRef) updateLastUsed() bool { +func (sr *immutableRef) shouldUpdateLastUsed() bool { return sr.triggerLastUsed } @@ -683,7 +1286,7 @@ func (sr *immutableRef) updateLastUsedNow() bool { return false } for r := range sr.refs { - if r.updateLastUsed() { + if r.shouldUpdateLastUsed() { return false } } @@ -694,54 +1297,40 @@ func (sr *immutableRef) release(ctx context.Context) error { delete(sr.refs, sr) if sr.updateLastUsedNow() { - updateLastUsed(sr.md) + sr.updateLastUsed() if sr.equalMutable != nil { sr.equalMutable.triggerLastUsed = true } } if len(sr.refs) == 0 { - if sr.viewMount != nil { // TODO: release viewMount earlier if possible - if err := sr.cm.LeaseManager.Delete(ctx, leases.Lease{ID: sr.view}); err != nil { - return errors.Wrapf(err, "failed to remove view lease %s", sr.view) - } - sr.view = "" - sr.viewMount = nil - } - if sr.equalMutable != nil { sr.equalMutable.release(ctx) + } else { + if err := sr.cm.LeaseManager.Delete(ctx, leases.Lease{ID: sr.viewLeaseID()}); err != nil && !errdefs.IsNotFound(err) { + return err + } + sr.mountCache = nil } } return nil } -func (sr *immutableRef) Finalize(ctx context.Context, b bool) error { +func (sr *immutableRef) Finalize(ctx context.Context) error { sr.mu.Lock() defer sr.mu.Unlock() - - return sr.finalize(ctx, b) + return sr.finalize(ctx) } -func (cr *cacheRecord) Metadata() *metadata.StorageItem { - return cr.md -} - -func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error { +// caller must hold cacheRecord.mu +func (cr *cacheRecord) finalize(ctx context.Context) error { mutable := cr.equalMutable if mutable == nil { return nil } - if !commit { - if HasCachePolicyRetain(mutable) { - CachePolicyRetain(mutable) - return mutable.Metadata().Commit() - } - return nil - } - _, err := cr.cm.ManagerOpt.LeaseManager.Create(ctx, func(l *leases.Lease) error { + _, err := cr.cm.LeaseManager.Create(ctx, func(l *leases.Lease) error { l.ID = cr.ID() l.Labels = map[string]string{ "containerd.io/gc.flat": time.Now().UTC().Format(time.RFC3339Nano), @@ -754,19 +1343,20 @@ func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error { } } - if err := cr.cm.ManagerOpt.LeaseManager.AddResource(ctx, leases.Lease{ID: cr.ID()}, leases.Resource{ - ID: cr.ID(), - Type: "snapshots/" + cr.cm.ManagerOpt.Snapshotter.Name(), + if err := cr.cm.LeaseManager.AddResource(ctx, leases.Lease{ID: cr.ID()}, leases.Resource{ + ID: cr.getSnapshotID(), + Type: "snapshots/" + cr.cm.Snapshotter.Name(), }); err != nil { cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()}) - return errors.Wrapf(err, "failed to add snapshot %s to lease", cr.ID()) + return errors.Wrapf(err, "failed to add snapshot %s to lease", cr.getSnapshotID()) } - err = cr.cm.Snapshotter.Commit(ctx, cr.ID(), mutable.ID()) - if err != nil { + if err := cr.cm.Snapshotter.Commit(ctx, cr.getSnapshotID(), mutable.getSnapshotID()); err != nil { cr.cm.LeaseManager.Delete(context.TODO(), leases.Lease{ID: cr.ID()}) - return errors.Wrapf(err, "failed to commit %s", mutable.ID()) + return errors.Wrapf(err, "failed to commit %s to %s during finalize", mutable.getSnapshotID(), cr.getSnapshotID()) } + cr.mountCache = nil + mutable.dead = true go func() { cr.cm.mu.Lock() @@ -777,80 +1367,92 @@ func (cr *cacheRecord) finalize(ctx context.Context, commit bool) error { }() cr.equalMutable = nil - clearEqualMutable(cr.md) - return cr.md.Commit() + cr.clearEqualMutable() + return cr.commitMetadata() } -func (sr *mutableRef) updateLastUsed() bool { +func (sr *mutableRef) shouldUpdateLastUsed() bool { return sr.triggerLastUsed } -func (sr *mutableRef) commit(ctx context.Context) (*immutableRef, error) { +func (sr *mutableRef) commit(ctx context.Context) (_ *immutableRef, rerr error) { if !sr.mutable || len(sr.refs) == 0 { return nil, errors.Wrapf(errInvalid, "invalid mutable ref %p", sr) } id := identity.NewID() - md, _ := sr.cm.md.Get(id) + md, _ := sr.cm.getMetadata(id) rec := &cacheRecord{ - mu: sr.mu, - cm: sr.cm, - parent: sr.parentRef(false, sr.descHandlers), - equalMutable: sr, - refs: make(map[ref]struct{}), - md: md, + mu: sr.mu, + cm: sr.cm, + parentRefs: sr.parentRefs.clone(), + equalMutable: sr, + refs: make(map[ref]struct{}), + cacheMetadata: md, } - if descr := GetDescription(sr.md); descr != "" { - if err := queueDescription(md, descr); err != nil { + if descr := sr.GetDescription(); descr != "" { + if err := md.queueDescription(descr); err != nil { return nil, err } } - parentID := "" - if rec.parent != nil { - parentID = rec.parent.ID() - } - if err := initializeMetadata(rec, parentID); err != nil { + if err := initializeMetadata(rec.cacheMetadata, rec.parentRefs); err != nil { return nil, err } sr.cm.records[id] = rec - if err := sr.md.Commit(); err != nil { + if err := sr.commitMetadata(); err != nil { return nil, err } - queueCommitted(md) - setSize(md, sizeUnknown) - setEqualMutable(md, sr.ID()) - if err := md.Commit(); err != nil { + md.queueCommitted(true) + md.queueSize(sizeUnknown) + md.queueSnapshotID(id) + md.setEqualMutable(sr.ID()) + if err := md.commitMetadata(); err != nil { return nil, err } - ref := rec.ref(true, sr.descHandlers) + ref := rec.ref(true, sr.descHandlers, nil) sr.equalImmutable = ref return ref, nil } -func (sr *mutableRef) Mount(ctx context.Context, readonly bool, s session.Group) (snapshot.Mountable, error) { +func (sr *mutableRef) Mount(ctx context.Context, readonly bool, s session.Group) (_ snapshot.Mountable, rerr error) { sr.mu.Lock() defer sr.mu.Unlock() - if sr.cm.Snapshotter.Name() == "stargz" && sr.parent != nil { - var ( - m snapshot.Mountable - rerr error - ) - if err := sr.parent.withRemoteSnapshotLabelsStargzMode(ctx, s, func() { - m, rerr = sr.mount(ctx, readonly) + if sr.mountCache != nil { + if readonly { + return setReadonly(sr.mountCache), nil + } + return sr.mountCache, nil + } + + var mnt snapshot.Mountable + if sr.cm.Snapshotter.Name() == "stargz" && sr.layerParent != nil { + if err := sr.layerParent.withRemoteSnapshotLabelsStargzMode(ctx, s, func() { + mnt, rerr = sr.mount(ctx, s) }); err != nil { return nil, err } - return m, rerr + } else { + mnt, rerr = sr.mount(ctx, s) + } + if rerr != nil { + return nil, rerr } - return sr.mount(ctx, readonly) + // Make the mounts sharable. We don't do this for immutableRef mounts because + // it requires the raw []mount.Mount for computing diff on overlayfs. + mnt = sr.cm.mountPool.setSharable(mnt) + sr.mountCache = mnt + if readonly { + mnt = setReadonly(mnt) + } + return mnt, nil } func (sr *mutableRef) Commit(ctx context.Context) (ImmutableRef, error) { @@ -875,11 +1477,11 @@ func (sr *mutableRef) Release(ctx context.Context) error { func (sr *mutableRef) release(ctx context.Context) error { delete(sr.refs, sr) - if getCachePolicy(sr.md) != cachePolicyRetain { + if !sr.HasCachePolicyRetain() { if sr.equalImmutable != nil { - if getCachePolicy(sr.equalImmutable.md) == cachePolicyRetain { - if sr.updateLastUsed() { - updateLastUsed(sr.md) + if sr.equalImmutable.HasCachePolicyRetain() { + if sr.shouldUpdateLastUsed() { + sr.updateLastUsed() sr.triggerLastUsed = false } return nil @@ -890,8 +1492,8 @@ func (sr *mutableRef) release(ctx context.Context) error { } return sr.remove(ctx, true) } - if sr.updateLastUsed() { - updateLastUsed(sr.md) + if sr.shouldUpdateLastUsed() { + sr.updateLastUsed() sr.triggerLastUsed = false } return nil @@ -946,3 +1548,145 @@ func readonlyOverlay(opt []string) []string { } return out } + +func newSharableMountPool(tmpdirRoot string) (sharableMountPool, error) { + if tmpdirRoot != "" { + if err := os.MkdirAll(tmpdirRoot, 0700); err != nil { + return sharableMountPool{}, fmt.Errorf("failed to prepare mount pool: %w", err) + } + // If tmpdirRoot is specified, remove existing mounts to avoid conflict. + files, err := os.ReadDir(tmpdirRoot) + if err != nil { + return sharableMountPool{}, fmt.Errorf("failed to read mount pool: %w", err) + } + for _, file := range files { + if file.IsDir() { + dir := filepath.Join(tmpdirRoot, file.Name()) + bklog.G(context.Background()).Debugf("cleaning up existing temporary mount %q", dir) + if err := mount.Unmount(dir, 0); err != nil { + if mounted, merr := mountinfo.Mounted(dir); merr != nil || mounted { + bklog.G(context.Background()).WithError(err).WithError(merr). + WithField("mounted", mounted).Warnf("failed to unmount existing temporary mount %q", dir) + continue + } + } + if err := os.Remove(dir); err != nil { + bklog.G(context.Background()).WithError(err).Warnf("failed to remove existing temporary mount %q", dir) + } + } + } + } + return sharableMountPool{tmpdirRoot}, nil +} + +type sharableMountPool struct { + tmpdirRoot string +} + +func (p sharableMountPool) setSharable(mounts snapshot.Mountable) snapshot.Mountable { + return &sharableMountable{Mountable: mounts, mountPoolRoot: p.tmpdirRoot} +} + +// sharableMountable allows sharing underlying (possibly writable) mounts among callers. +// This is useful to share writable overlayfs mounts. +// +// NOTE: Mount() method doesn't return the underlying mount configuration (e.g. overlayfs mounts) +// instead it always return bind mounts of the temporary mount point. So if the caller +// needs to inspect the underlying mount configuration (e.g. for optimized differ for +// overlayfs), this wrapper shouldn't be used. +type sharableMountable struct { + snapshot.Mountable + + count int32 + mu sync.Mutex + mountPoolRoot string + + curMounts []mount.Mount + curMountPoint string + curRelease func() error +} + +func (sm *sharableMountable) Mount() (_ []mount.Mount, _ func() error, retErr error) { + sm.mu.Lock() + defer sm.mu.Unlock() + + if sm.curMounts == nil { + mounts, release, err := sm.Mountable.Mount() + if err != nil { + return nil, nil, err + } + defer func() { + if retErr != nil { + release() + } + }() + var isOverlay bool + for _, m := range mounts { + if m.Type == "overlay" { + isOverlay = true + break + } + } + if !isOverlay { + // Don't need temporary mount wrapper for non-overlayfs mounts + return mounts, release, nil + } + dir, err := ioutil.TempDir(sm.mountPoolRoot, "buildkit") + if err != nil { + return nil, nil, err + } + defer func() { + if retErr != nil { + os.Remove(dir) + } + }() + if err := mount.All(mounts, dir); err != nil { + return nil, nil, err + } + defer func() { + if retErr != nil { + mount.Unmount(dir, 0) + } + }() + sm.curMounts = []mount.Mount{ + { + Source: dir, + Type: "bind", + Options: []string{ + "rw", + "rbind", + }, + }, + } + sm.curMountPoint = dir + sm.curRelease = release + } + + mounts := make([]mount.Mount, len(sm.curMounts)) + copy(mounts, sm.curMounts) + + sm.count++ + return mounts, func() error { + sm.mu.Lock() + defer sm.mu.Unlock() + + sm.count-- + if sm.count < 0 { + if v := os.Getenv("BUILDKIT_DEBUG_PANIC_ON_ERROR"); v == "1" { + panic("release of released mount") + } + } else if sm.count > 0 { + return nil + } + + // no mount exist. release the current mount. + sm.curMounts = nil + if err := mount.Unmount(sm.curMountPoint, 0); err != nil { + return err + } + if err := sm.curRelease(); err != nil { + return err + } + return os.Remove(sm.curMountPoint) + }, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remote.go b/vendor/github.com/moby/buildkit/cache/remote.go index 299a571fd8..d0ac594b6a 100644 --- a/vendor/github.com/moby/buildkit/cache/remote.go +++ b/vendor/github.com/moby/buildkit/cache/remote.go @@ -9,14 +9,16 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/reference" + "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/leaseutil" "github.com/moby/buildkit/util/progress/logs" "github.com/moby/buildkit/util/pull/pullprogress" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -25,31 +27,134 @@ type Unlazier interface { Unlazy(ctx context.Context) error } -// GetRemote gets a *solver.Remote from content store for this ref (potentially pulling lazily). -// Note: Use WorkerRef.GetRemote instead as moby integration requires custom GetRemote implementation. -func (sr *immutableRef) GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, s session.Group) (*solver.Remote, error) { +// GetRemotes gets []*solver.Remote from content store for this ref (potentially pulling lazily). +// Compressionopt can be used to specify the compression type of blobs. If Force is true, the compression +// type is applied to all blobs in the chain. If Force is false, it's applied only to the newly created +// layers. If all is true, all available chains that has the specified compression type of topmost blob are +// appended to the result. +// Note: Use WorkerRef.GetRemotes instead as moby integration requires custom GetRemotes implementation. +func (sr *immutableRef) GetRemotes(ctx context.Context, createIfNeeded bool, refCfg config.RefConfig, all bool, s session.Group) ([]*solver.Remote, error) { ctx, done, err := leaseutil.WithLease(ctx, sr.cm.LeaseManager, leaseutil.MakeTemporary) if err != nil { return nil, err } defer done(ctx) - err = sr.computeBlobChain(ctx, createIfNeeded, compressionType, s) + // fast path if compression variants aren't required + // NOTE: compressionopt is applied only to *newly created layers* if Force != true. + remote, err := sr.getRemote(ctx, createIfNeeded, refCfg, s) + if err != nil { + return nil, err + } + if !all || refCfg.Compression.Force || len(remote.Descriptors) == 0 { + return []*solver.Remote{remote}, nil // early return if compression variants aren't required + } + + // Search all available remotes that has the topmost blob with the specified + // compression with all combination of copmressions + res := []*solver.Remote{remote} + topmost, parentChain := remote.Descriptors[len(remote.Descriptors)-1], remote.Descriptors[:len(remote.Descriptors)-1] + vDesc, err := getBlobWithCompression(ctx, sr.cm.ContentStore, topmost, refCfg.Compression.Type) + if err != nil { + return res, nil // compression variant doesn't exist. return the main blob only. + } + + var variants []*solver.Remote + if len(parentChain) == 0 { + variants = append(variants, &solver.Remote{ + Descriptors: []ocispecs.Descriptor{vDesc}, + Provider: sr.cm.ContentStore, + }) + } else { + // get parents with all combination of all available compressions. + parents, err := getAvailableBlobs(ctx, sr.cm.ContentStore, &solver.Remote{ + Descriptors: parentChain, + Provider: remote.Provider, + }) + if err != nil { + return nil, err + } + variants = appendRemote(parents, vDesc, sr.cm.ContentStore) + } + + // Return the main remote and all its compression variants. + // NOTE: Because compressionopt is applied only to *newly created layers* in the main remote (i.e. res[0]), + // it's possible that the main remote doesn't contain any blobs of the compressionopt.Type. + // The topmost blob of the variants (res[1:]) is guaranteed to be the compressionopt.Type. + res = append(res, variants...) + return res, nil +} + +func appendRemote(parents []*solver.Remote, desc ocispecs.Descriptor, p content.Provider) (res []*solver.Remote) { + for _, pRemote := range parents { + provider := contentutil.NewMultiProvider(pRemote.Provider) + provider.Add(desc.Digest, p) + res = append(res, &solver.Remote{ + Descriptors: append(pRemote.Descriptors, desc), + Provider: provider, + }) + } + return +} + +func getAvailableBlobs(ctx context.Context, cs content.Store, chain *solver.Remote) ([]*solver.Remote, error) { + if len(chain.Descriptors) == 0 { + return nil, nil + } + target, parentChain := chain.Descriptors[len(chain.Descriptors)-1], chain.Descriptors[:len(chain.Descriptors)-1] + parents, err := getAvailableBlobs(ctx, cs, &solver.Remote{ + Descriptors: parentChain, + Provider: chain.Provider, + }) + if err != nil { + return nil, err + } + var descs []ocispecs.Descriptor + if err := walkBlob(ctx, cs, target, func(desc ocispecs.Descriptor) bool { + descs = append(descs, desc) + return true + }); err != nil { + bklog.G(ctx).WithError(err).Warn("failed to walk variant blob") // is not a critical error at this moment. + } + var res []*solver.Remote + for _, desc := range descs { + desc := desc + if len(parents) == 0 { // bottommost ref + res = append(res, &solver.Remote{ + Descriptors: []ocispecs.Descriptor{desc}, + Provider: cs, + }) + continue + } + res = append(res, appendRemote(parents, desc, cs)...) + } + if len(res) == 0 { + // no available compression blobs for this blob. return the original blob. + if len(parents) == 0 { // bottommost ref + return []*solver.Remote{chain}, nil + } + return appendRemote(parents, target, chain.Provider), nil + } + return res, nil +} + +func (sr *immutableRef) getRemote(ctx context.Context, createIfNeeded bool, refCfg config.RefConfig, s session.Group) (*solver.Remote, error) { + err := sr.computeBlobChain(ctx, createIfNeeded, refCfg.Compression, s) if err != nil { return nil, err } - mprovider := &lazyMultiProvider{mprovider: contentutil.NewMultiProvider(nil)} + chain := sr.layerChain() + mproviderBase := contentutil.NewMultiProvider(nil) + mprovider := &lazyMultiProvider{mprovider: mproviderBase} remote := &solver.Remote{ Provider: mprovider, } - - for _, ref := range sr.parentRefChain() { - desc, err := ref.ociDesc() + for _, ref := range chain { + desc, err := ref.ociDesc(ctx, sr.descHandlers, refCfg.PreferNonDistributable) if err != nil { return nil, err } - // NOTE: The media type might be missing for some migrated ones // from before lease based storage. If so, we should detect // the media type from blob data. @@ -65,10 +170,12 @@ func (sr *immutableRef) GetRemote(ctx context.Context, createIfNeeded bool, comp // update distribution source annotation for lazy-refs (non-lazy refs // will already have their dsl stored in the content store, which is // used by the push handlers) - if isLazy, err := ref.isLazy(ctx); err != nil { + var addAnnotations []string + isLazy, err := ref.isLazy(ctx) + if err != nil { return nil, err } else if isLazy { - imageRefs := getImageRefs(ref.md) + imageRefs := ref.getImageRefs() for _, imageRef := range imageRefs { refspec, err := reference.Parse(imageRef) if err != nil { @@ -101,6 +208,36 @@ func (sr *immutableRef) GetRemote(ctx context.Context, createIfNeeded bool, comp existingRepos = append(existingRepos, repo) } desc.Annotations[dslKey] = strings.Join(existingRepos, ",") + addAnnotations = append(addAnnotations, dslKey) + } + } + + if refCfg.Compression.Force { + if needs, err := needsConversion(ctx, sr.cm.ContentStore, desc, refCfg.Compression.Type); err != nil { + return nil, err + } else if needs { + // ensure the compression type. + // compressed blob must be created and stored in the content store. + blobDesc, err := getBlobWithCompressionWithRetry(ctx, ref, refCfg.Compression, s) + if err != nil { + return nil, errors.Wrapf(err, "failed to get compression blob %q", refCfg.Compression.Type) + } + newDesc := desc + newDesc.MediaType = blobDesc.MediaType + newDesc.Digest = blobDesc.Digest + newDesc.Size = blobDesc.Size + newDesc.URLs = blobDesc.URLs + newDesc.Annotations = nil + for _, k := range addAnnotations { + newDesc.Annotations[k] = desc.Annotations[k] + } + for k, v := range blobDesc.Annotations { + if newDesc.Annotations == nil { + newDesc.Annotations = make(map[string]string) + } + newDesc.Annotations[k] = v + } + desc = newDesc } } @@ -115,6 +252,16 @@ func (sr *immutableRef) GetRemote(ctx context.Context, createIfNeeded bool, comp return remote, nil } +func getBlobWithCompressionWithRetry(ctx context.Context, ref *immutableRef, comp compression.Config, s session.Group) (ocispecs.Descriptor, error) { + if blobDesc, err := ref.getBlobWithCompression(ctx, comp.Type); err == nil { + return blobDesc, nil + } + if err := ensureCompression(ctx, ref, comp, s); err != nil { + return ocispecs.Descriptor{}, errors.Wrapf(err, "failed to get and ensure compression type of %q", comp.Type) + } + return ref.getBlobWithCompression(ctx, comp.Type) +} + type lazyMultiProvider struct { mprovider *contentutil.MultiProvider plist []lazyRefProvider @@ -125,7 +272,7 @@ func (mp *lazyMultiProvider) Add(p lazyRefProvider) { mp.plist = append(mp.plist, p) } -func (mp *lazyMultiProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (mp *lazyMultiProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { return mp.mprovider.ReaderAt(ctx, desc) } @@ -142,12 +289,12 @@ func (mp *lazyMultiProvider) Unlazy(ctx context.Context) error { type lazyRefProvider struct { ref *immutableRef - desc ocispec.Descriptor + desc ocispecs.Descriptor dh *DescHandler session session.Group } -func (p lazyRefProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (p lazyRefProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { if desc.Digest != p.desc.Digest { return nil, errdefs.ErrNotFound } @@ -164,6 +311,11 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { } else if !isLazy { return nil, nil } + defer func() { + if rerr == nil { + rerr = p.ref.linkBlob(ctx, p.desc) + } + }() if p.dh == nil { // shouldn't happen, if you have a lazy immutable ref it already should be validated @@ -183,23 +335,22 @@ func (p lazyRefProvider) Unlazy(ctx context.Context) error { err := contentutil.Copy(ctx, p.ref.cm.ContentStore, &pullprogress.ProviderWithProgress{ Provider: p.dh.Provider(p.session), Manager: p.ref.cm.ContentStore, - }, p.desc, logs.LoggerFromContext(ctx)) + }, p.desc, p.dh.Ref, logs.LoggerFromContext(ctx)) if err != nil { return nil, err } - if imageRefs := getImageRefs(p.ref.md); len(imageRefs) > 0 { + if imageRefs := p.ref.getImageRefs(); len(imageRefs) > 0 { // just use the first image ref, it's arbitrary imageRef := imageRefs[0] - if GetDescription(p.ref.md) == "" { - queueDescription(p.ref.md, "pulled from "+imageRef) - err := p.ref.md.Commit() - if err != nil { + if p.ref.GetDescription() == "" { + if err := p.ref.SetDescription("pulled from " + imageRef); err != nil { return nil, err } } } - return nil, err + + return nil, nil }) return err } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/export.go b/vendor/github.com/moby/buildkit/cache/remotecache/export.go index b39045c825..1c3a240cfc 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/export.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/export.go @@ -18,14 +18,14 @@ import ( "github.com/moby/buildkit/util/progress/logs" digest "github.com/opencontainers/go-digest" specs "github.com/opencontainers/image-spec/specs-go" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) type ResolveCacheExporterFunc func(ctx context.Context, g session.Group, attrs map[string]string) (Exporter, error) func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, @@ -45,6 +45,11 @@ type Exporter interface { // Finalize finalizes and return metadata that are returned to the client // e.g. ExporterResponseManifestDesc Finalize(ctx context.Context) (map[string]string, error) + Config() Config +} + +type Config struct { + Compression compression.Config } const ( @@ -58,16 +63,24 @@ type contentCacheExporter struct { chains *v1.CacheChains ingester content.Ingester oci bool + ref string + comp compression.Config } -func NewExporter(ingester content.Ingester, oci bool) Exporter { +func NewExporter(ingester content.Ingester, ref string, oci bool, compressionConfig compression.Config) Exporter { cc := v1.NewCacheChains() - return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci} + return &contentCacheExporter{CacheExporterTarget: cc, chains: cc, ingester: ingester, oci: oci, ref: ref, comp: compressionConfig} +} + +func (ce *contentCacheExporter) Config() Config { + return Config{ + Compression: ce.comp, + } } func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string, error) { res := make(map[string]string) - config, descs, err := ce.chains.Marshal() + config, descs, err := ce.chains.Marshal(ctx) if err != nil { return nil, err } @@ -79,14 +92,14 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string MediaType string `json:"mediaType,omitempty"` // Manifests references platform specific manifests. - Manifests []ocispec.Descriptor `json:"manifests"` + Manifests []ocispecs.Descriptor `json:"manifests"` } var mfst manifestList mfst.SchemaVersion = 2 mfst.MediaType = images.MediaTypeDockerSchema2ManifestList if ce.oci { - mfst.MediaType = ocispec.MediaTypeImageIndex + mfst.MediaType = ocispecs.MediaTypeImageIndex } for _, l := range config.Layers { @@ -95,7 +108,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string return nil, errors.Errorf("missing blob %s", l.Blob) } layerDone := oneOffProgress(ctx, fmt.Sprintf("writing layer %s", l.Blob)) - if err := contentutil.Copy(ctx, ce.ingester, dgstPair.Provider, dgstPair.Descriptor, logs.LoggerFromContext(ctx)); err != nil { + if err := contentutil.Copy(ctx, ce.ingester, dgstPair.Provider, dgstPair.Descriptor, ce.ref, logs.LoggerFromContext(ctx)); err != nil { return nil, layerDone(errors.Wrap(err, "error writing layer blob")) } layerDone(nil) @@ -109,7 +122,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string return nil, err } dgst := digest.FromBytes(dt) - desc := ocispec.Descriptor{ + desc := ocispecs.Descriptor{ Digest: dgst, Size: int64(len(dt)), MediaType: v1.CacheConfigMediaTypeV0, @@ -128,7 +141,7 @@ func (ce *contentCacheExporter) Finalize(ctx context.Context) (map[string]string } dgst = digest.FromBytes(dt) - desc = ocispec.Descriptor{ + desc = ocispecs.Descriptor{ Digest: dgst, Size: int64(len(dt)), MediaType: mfst.MediaType, diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/import.go b/vendor/github.com/moby/buildkit/cache/remotecache/import.go index 1543297299..6278090187 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/import.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/import.go @@ -15,22 +15,22 @@ import ( "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) // ResolveCacheImporterFunc returns importer and descriptor. -type ResolveCacheImporterFunc func(ctx context.Context, g session.Group, attrs map[string]string) (Importer, ocispec.Descriptor, error) +type ResolveCacheImporterFunc func(ctx context.Context, g session.Group, attrs map[string]string) (Importer, ocispecs.Descriptor, error) type Importer interface { - Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) + Resolve(ctx context.Context, desc ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) } type DistributionSourceLabelSetter interface { SetDistributionSourceLabel(context.Context, digest.Digest) error - SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor + SetDistributionSourceAnnotation(desc ocispecs.Descriptor) ocispecs.Descriptor } func NewImporter(provider content.Provider) Importer { @@ -41,20 +41,20 @@ type contentCacheImporter struct { provider content.Provider } -func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { +func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispecs.Descriptor, id string, w worker.Worker) (solver.CacheManager, error) { dt, err := readBlob(ctx, ci.provider, desc) if err != nil { return nil, err } - var mfst ocispec.Index + var mfst ocispecs.Index if err := json.Unmarshal(dt, &mfst); err != nil { return nil, err } allLayers := v1.DescriptorProvider{} - var configDesc ocispec.Descriptor + var configDesc ocispecs.Descriptor for _, m := range mfst.Manifests { if m.MediaType == v1.CacheConfigMediaTypeV0 { @@ -94,10 +94,10 @@ func (ci *contentCacheImporter) Resolve(ctx context.Context, desc ocispec.Descri if err != nil { return nil, err } - return solver.NewCacheManager(id, keysStorage, resultStorage), nil + return solver.NewCacheManager(ctx, id, keysStorage, resultStorage), nil } -func readBlob(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]byte, error) { +func readBlob(ctx context.Context, provider content.Provider, desc ocispecs.Descriptor) ([]byte, error) { maxBlobSize := int64(1 << 20) if desc.Size > maxBlobSize { return nil, errors.Errorf("blob %s is too large (%d > %d)", desc.Digest, desc.Size, maxBlobSize) @@ -132,7 +132,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte for dgst, dt := range m { func(dgst digest.Digest, dt []byte) { eg.Go(func() error { - var m ocispec.Manifest + var m ocispecs.Manifest if err := json.Unmarshal(dt, &m); err != nil { return errors.WithStack(err) @@ -229,7 +229,7 @@ func (ci *contentCacheImporter) importInlineCache(ctx context.Context, dt []byte if err != nil { return nil, err } - cms = append(cms, solver.NewCacheManager(id, keysStorage, resultStorage)) + cms = append(cms, solver.NewCacheManager(ctx, id, keysStorage, resultStorage)) } return solver.NewCombinedCacheManager(cms, nil), nil @@ -242,10 +242,10 @@ func (ci *contentCacheImporter) allDistributionManifests(ctx context.Context, dt } switch mt { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest: + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest: m[digest.FromBytes(dt)] = dt - case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: - var index ocispec.Index + case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + var index ocispecs.Index if err := json.Unmarshal(dt, &index); err != nil { return errors.WithStack(err) } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go index 449c7c0fb5..cf11db4959 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/inline/inline.go @@ -8,7 +8,9 @@ import ( v1 "github.com/moby/buildkit/cache/remotecache/v1" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -28,6 +30,12 @@ type exporter struct { chains *v1.CacheChains } +func (ce *exporter) Config() remotecache.Config { + return remotecache.Config{ + Compression: compression.New(compression.Default), + } +} + func (ce *exporter) Finalize(ctx context.Context) (map[string]string, error) { return nil, nil } @@ -38,8 +46,8 @@ func (ce *exporter) reset() { ce.chains = cc } -func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) { - config, descs, err := ce.chains.Marshal() +func (ce *exporter) ExportForLayers(ctx context.Context, layers []digest.Digest) ([]byte, error) { + config, descs, err := ce.chains.Marshal(ctx) if err != nil { return nil, err } @@ -63,7 +71,7 @@ func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) { return nil, err } - cfg, _, err := cc.Marshal() + cfg, _, err := cc.Marshal(ctx) if err != nil { return nil, err } @@ -73,14 +81,55 @@ func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) { return nil, nil } - cache := map[int]int{} - // reorder layers based on the order in the image + blobIndexes := make(map[digest.Digest]int, len(layers)) + for i, blob := range layers { + blobIndexes[blob] = i + } + for i, r := range cfg.Records { for j, rr := range r.Results { - n := getSortedLayerIndex(rr.LayerIndex, cfg.Layers, cache) - rr.LayerIndex = n - r.Results[j] = rr + resultBlobs := layerToBlobs(rr.LayerIndex, cfg.Layers) + // match being true means the result is in the same order as the image + var match bool + if len(resultBlobs) <= len(layers) { + match = true + for k, resultBlob := range resultBlobs { + layerBlob := layers[k] + if resultBlob != layerBlob { + match = false + break + } + } + } + if match { + // The layers of the result are in the same order as the image, so we can + // specify it just using the CacheResult struct and specifying LayerIndex + // as the top-most layer of the result. + rr.LayerIndex = len(resultBlobs) - 1 + r.Results[j] = rr + } else { + // The layers of the result are not in the same order as the image, so we + // have to use ChainedResult to specify each layer of the result individually. + chainedResult := v1.ChainedResult{} + for _, resultBlob := range resultBlobs { + idx, ok := blobIndexes[resultBlob] + if !ok { + return nil, errors.Errorf("failed to find blob %s in layers", resultBlob) + } + chainedResult.LayerIndexes = append(chainedResult.LayerIndexes, idx) + } + r.Results[j] = v1.CacheResult{} + r.ChainedResults = append(r.ChainedResults, chainedResult) + } + // remove any CacheResults that had to be converted to the ChainedResult format. + var filteredResults []v1.CacheResult + for _, rr := range r.Results { + if rr != (v1.CacheResult{}) { + filteredResults = append(filteredResults, rr) + } + } + r.Results = filteredResults cfg.Records[i] = r } } @@ -94,14 +143,16 @@ func (ce *exporter) ExportForLayers(layers []digest.Digest) ([]byte, error) { return dt, nil } -func getSortedLayerIndex(idx int, layers []v1.CacheLayer, cache map[int]int) int { - if idx == -1 { - return -1 +func layerToBlobs(idx int, layers []v1.CacheLayer) []digest.Digest { + var ds []digest.Digest + for idx != -1 { + layer := layers[idx] + ds = append(ds, layer.Blob) + idx = layer.ParentIndex } - l := layers[idx] - if i, ok := cache[idx]; ok { - return i + // reverse so they go lowest to highest + for i, j := 0, len(ds)-1; i < j; i, j = i+1, j-1 { + ds[i], ds[j] = ds[j], ds[i] } - cache[idx] = getSortedLayerIndex(l.ParentIndex, layers, cache) + 1 - return cache[idx] + return ds } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go index 2ee194afdf..18c73364c0 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/local/local.go @@ -9,8 +9,9 @@ import ( "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/session" sessioncontent "github.com/moby/buildkit/session/content" + "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -20,6 +21,9 @@ const ( attrDest = "dest" attrOCIMediatypes = "oci-mediatypes" contentStoreIDPrefix = "local:" + attrLayerCompression = "compression" + attrForceCompression = "force-compression" + attrCompressionLevel = "compression-level" ) // ResolveCacheExporterFunc for "local" cache exporter. @@ -29,6 +33,10 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor if store == "" { return nil, errors.New("local cache exporter requires dest") } + compressionConfig, err := attrsToCompression(attrs) + if err != nil { + return nil, err + } ociMediatypes := true if v, ok := attrs[attrOCIMediatypes]; ok { b, err := strconv.ParseBool(v) @@ -42,32 +50,32 @@ func ResolveCacheExporterFunc(sm *session.Manager) remotecache.ResolveCacheExpor if err != nil { return nil, err } - return remotecache.NewExporter(cs, ociMediatypes), nil + return remotecache.NewExporter(cs, "", ociMediatypes, *compressionConfig), nil } } // ResolveCacheImporterFunc for "local" cache importer. func ResolveCacheImporterFunc(sm *session.Manager) remotecache.ResolveCacheImporterFunc { - return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) { + return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) { dgstStr := attrs[attrDigest] if dgstStr == "" { - return nil, specs.Descriptor{}, errors.New("local cache importer requires explicit digest") + return nil, ocispecs.Descriptor{}, errors.New("local cache importer requires explicit digest") } dgst := digest.Digest(dgstStr) store := attrs[attrSrc] if store == "" { - return nil, specs.Descriptor{}, errors.New("local cache importer requires src") + return nil, ocispecs.Descriptor{}, errors.New("local cache importer requires src") } csID := contentStoreIDPrefix + store cs, err := getContentStore(ctx, sm, g, csID) if err != nil { - return nil, specs.Descriptor{}, err + return nil, ocispecs.Descriptor{}, err } info, err := cs.Info(ctx, dgst) if err != nil { - return nil, specs.Descriptor{}, err + return nil, ocispecs.Descriptor{}, err } - desc := specs.Descriptor{ + desc := ocispecs.Descriptor{ // MediaType is typically MediaTypeDockerSchema2ManifestList, // but we leave it empty until we get correct support for local index.json Digest: dgst, @@ -92,3 +100,34 @@ func getContentStore(ctx context.Context, sm *session.Manager, g session.Group, } return sessioncontent.NewCallerStore(caller, storeID), nil } + +func attrsToCompression(attrs map[string]string) (*compression.Config, error) { + compressionType := compression.Default + if v, ok := attrs[attrLayerCompression]; ok { + if c := compression.Parse(v); c != compression.UnknownCompression { + compressionType = c + } + } + compressionConfig := compression.New(compressionType) + if v, ok := attrs[attrForceCompression]; ok { + var force bool + if v == "" { + force = true + } else { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) + } + force = b + } + compressionConfig = compressionConfig.SetForce(force) + } + if v, ok := attrs[attrCompressionLevel]; ok { + ii, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) + } + compressionConfig = compressionConfig.SetLevel(int(ii)) + } + return &compressionConfig, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go index 281d9fa4a3..cfe54e52aa 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/registry/registry.go @@ -6,14 +6,18 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/remotes/docker" + "github.com/containerd/containerd/snapshots" "github.com/docker/distribution/reference" "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/contentutil" + "github.com/moby/buildkit/util/estargz" + "github.com/moby/buildkit/util/push" "github.com/moby/buildkit/util/resolver" + "github.com/moby/buildkit/util/resolver/limited" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -29,12 +33,19 @@ func canonicalizeRef(rawRef string) (string, error) { } const ( - attrRef = "ref" - attrOCIMediatypes = "oci-mediatypes" + attrRef = "ref" + attrOCIMediatypes = "oci-mediatypes" + attrLayerCompression = "compression" + attrForceCompression = "force-compression" + attrCompressionLevel = "compression-level" ) func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) remotecache.ResolveCacheExporterFunc { return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Exporter, error) { + compressionConfig, err := attrsToCompression(attrs) + if err != nil { + return nil, err + } ref, err := canonicalizeRef(attrs[attrRef]) if err != nil { return nil, err @@ -48,31 +59,31 @@ func ResolveCacheExporterFunc(sm *session.Manager, hosts docker.RegistryHosts) r ociMediatypes = b } remote := resolver.DefaultPool.GetResolver(hosts, ref, "push", sm, g) - pusher, err := remote.Pusher(ctx, ref) + pusher, err := push.Pusher(ctx, remote, ref) if err != nil { return nil, err } - return remotecache.NewExporter(contentutil.FromPusher(pusher), ociMediatypes), nil + return remotecache.NewExporter(contentutil.FromPusher(pusher), ref, ociMediatypes, *compressionConfig), nil } } func ResolveCacheImporterFunc(sm *session.Manager, cs content.Store, hosts docker.RegistryHosts) remotecache.ResolveCacheImporterFunc { - return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, specs.Descriptor, error) { + return func(ctx context.Context, g session.Group, attrs map[string]string) (remotecache.Importer, ocispecs.Descriptor, error) { ref, err := canonicalizeRef(attrs[attrRef]) if err != nil { - return nil, specs.Descriptor{}, err + return nil, ocispecs.Descriptor{}, err } remote := resolver.DefaultPool.GetResolver(hosts, ref, "pull", sm, g) xref, desc, err := remote.Resolve(ctx, ref) if err != nil { - return nil, specs.Descriptor{}, err + return nil, ocispecs.Descriptor{}, err } fetcher, err := remote.Fetcher(ctx, xref) if err != nil { - return nil, specs.Descriptor{}, err + return nil, ocispecs.Descriptor{}, err } src := &withDistributionSourceLabel{ - Provider: contentutil.FromFetcher(fetcher), + Provider: contentutil.FromFetcher(limited.Default.WrapFetcher(fetcher, ref)), ref: ref, source: cs, } @@ -93,14 +104,59 @@ func (dsl *withDistributionSourceLabel) SetDistributionSourceLabel(ctx context.C if err != nil { return err } - _, err = hf(ctx, ocispec.Descriptor{Digest: dgst}) + _, err = hf(ctx, ocispecs.Descriptor{Digest: dgst}) return err } -func (dsl *withDistributionSourceLabel) SetDistributionSourceAnnotation(desc ocispec.Descriptor) ocispec.Descriptor { +func (dsl *withDistributionSourceLabel) SetDistributionSourceAnnotation(desc ocispecs.Descriptor) ocispecs.Descriptor { if desc.Annotations == nil { desc.Annotations = map[string]string{} } desc.Annotations["containerd.io/distribution.source.ref"] = dsl.ref return desc } + +func (dsl *withDistributionSourceLabel) SnapshotLabels(descs []ocispecs.Descriptor, index int) map[string]string { + if len(descs) < index { + return nil + } + labels := snapshots.FilterInheritedLabels(descs[index].Annotations) + if labels == nil { + labels = make(map[string]string) + } + for k, v := range estargz.SnapshotLabels(dsl.ref, descs, index) { + labels[k] = v + } + return labels +} + +func attrsToCompression(attrs map[string]string) (*compression.Config, error) { + compressionType := compression.Default + if v, ok := attrs[attrLayerCompression]; ok { + if c := compression.Parse(v); c != compression.UnknownCompression { + compressionType = c + } + } + compressionConfig := compression.New(compressionType) + if v, ok := attrs[attrForceCompression]; ok { + var force bool + if v == "" { + force = true + } else { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value %s specified for %s", v, attrForceCompression) + } + force = b + } + compressionConfig = compressionConfig.SetForce(force) + } + if v, ok := attrs[attrCompressionLevel]; ok { + ii, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return nil, errors.Wrapf(err, "non-integer value %s specified for %s", v, attrCompressionLevel) + } + compressionConfig = compressionConfig.SetLevel(int(ii)) + } + return &compressionConfig, nil +} diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go index dee02f4b36..7ba7eb0f60 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/cachestorage.go @@ -7,6 +7,7 @@ import ( "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -214,35 +215,41 @@ func (cs *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solv } func (cs *cacheResultStorage) LoadWithParents(ctx context.Context, res solver.CacheResult) (map[string]solver.Result, error) { - v := cs.byResultID(res.ID) - if v == nil || v.result == nil { - return nil, errors.WithStack(solver.ErrNotFound) - } - m := map[string]solver.Result{} visited := make(map[*item]struct{}) - if err := v.walkAllResults(func(i *item) error { - if i.result == nil { - return nil - } - id, ok := cs.byItem[i] - if !ok { - return nil - } - if isSubRemote(*i.result, *v.result) { - ref, err := cs.w.FromRemote(ctx, i.result) - if err != nil { - return err + + ids, ok := cs.byResult[res.ID] + if !ok || len(ids) == 0 { + return nil, errors.WithStack(solver.ErrNotFound) + } + + for id := range ids { + v, ok := cs.byID[id] + if ok && v.result != nil { + if err := v.walkAllResults(func(i *item) error { + if i.result == nil { + return nil + } + id, ok := cs.byItem[i] + if !ok { + return nil + } + if isSubRemote(*i.result, *v.result) { + ref, err := cs.w.FromRemote(ctx, i.result) + if err != nil { + return err + } + m[id] = worker.NewWorkerRefResult(ref, cs.w) + } + return nil + }, visited); err != nil { + for _, v := range m { + v.Release(context.TODO()) + } + return nil, err } - m[id] = worker.NewWorkerRefResult(ref, cs.w) } - return nil - }, visited); err != nil { - for _, v := range m { - v.Release(context.TODO()) - } - return nil, err } return m, nil @@ -261,9 +268,25 @@ func (cs *cacheResultStorage) Load(ctx context.Context, res solver.CacheResult) return worker.NewWorkerRefResult(ref, cs.w), nil } -func (cs *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult, _ session.Group) (*solver.Remote, error) { +func (cs *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopts *compression.Config, _ session.Group) ([]*solver.Remote, error) { if r := cs.byResultID(res.ID); r != nil && r.result != nil { - return r.result, nil + if compressionopts == nil { + return []*solver.Remote{r.result}, nil + } + // Any of blobs in the remote must meet the specified compression option. + match := false + for _, desc := range r.result.Descriptors { + m := compressionopts.Type.IsMediaType(desc.MediaType) + match = match || m + if compressionopts.Force && !m { + match = false + break + } + } + if match { + return []*solver.Remote{r.result}, nil + } + return nil, nil // return nil as it's best effort. } return nil, errors.WithStack(solver.ErrNotFound) } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go index 166971de17..306e037f7f 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/chains.go @@ -1,6 +1,7 @@ package cacheimport import ( + "context" "strings" "sync" "time" @@ -8,7 +9,7 @@ import ( "github.com/containerd/containerd/content" "github.com/moby/buildkit/solver" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) func NewCacheChains() *CacheChains { @@ -75,7 +76,7 @@ func (c *CacheChains) normalize() error { return nil } -func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) { +func (c *CacheChains) Marshal(ctx context.Context) (*CacheConfig, DescriptorProvider, error) { if err := c.normalize(); err != nil { return nil, nil, err } @@ -87,7 +88,7 @@ func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) { } for _, it := range c.items { - if err := marshalItem(it, st); err != nil { + if err := marshalItem(ctx, it, st); err != nil { return nil, nil, err } } @@ -104,7 +105,7 @@ func (c *CacheChains) Marshal() (*CacheConfig, DescriptorProvider, error) { type DescriptorProvider map[digest.Digest]DescriptorProviderPair type DescriptorProviderPair struct { - Descriptor ocispec.Descriptor + Descriptor ocispecs.Descriptor Provider content.Provider } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go index 79adf014af..65a6e441f5 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/parse.go @@ -5,7 +5,7 @@ import ( "github.com/moby/buildkit/solver" "github.com/moby/buildkit/util/contentutil" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -65,6 +65,31 @@ func parseRecord(cc CacheConfig, idx int, provider DescriptorProvider, t solver. } } + for _, res := range rec.ChainedResults { + remote := &solver.Remote{} + mp := contentutil.NewMultiProvider(nil) + for _, diff := range res.LayerIndexes { + if diff < 0 || diff >= len(cc.Layers) { + return nil, errors.Errorf("invalid layer index %d", diff) + } + + l := cc.Layers[diff] + + descPair, ok := provider[l.Blob] + if !ok { + remote = nil + break + } + + remote.Descriptors = append(remote.Descriptors, descPair.Descriptor) + mp.Add(descPair.Descriptor.Digest, descPair.Provider) + } + if remote != nil { + remote.Provider = mp + r.AddResult(res.CreatedAt, remote) + } + } + cache[idx] = r return r, nil } @@ -103,8 +128,7 @@ func getRemoteChain(layers []CacheLayer, idx int, provider DescriptorProvider, v return r, nil } return &solver.Remote{ - Descriptors: []ocispec.Descriptor{descPair.Descriptor}, + Descriptors: []ocispecs.Descriptor{descPair.Descriptor}, Provider: descPair.Provider, }, nil - } diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go index 4c6bc0bb26..bb9501e4cf 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/spec.go @@ -14,14 +14,23 @@ type CacheConfig struct { } type CacheLayer struct { - Blob digest.Digest `json:"blob,omitempty"` - ParentIndex int `json:"parent,omitempty"` + Blob digest.Digest `json:"blob,omitempty"` + ParentIndex int `json:"parent,omitempty"` + Annotations *LayerAnnotations `json:"annotations,omitempty"` +} + +type LayerAnnotations struct { + MediaType string `json:"mediaType,omitempty"` + DiffID digest.Digest `json:"diffID,omitempty"` + Size int64 `json:"size,omitempty"` + CreatedAt time.Time `json:"createdAt,omitempty"` } type CacheRecord struct { - Results []CacheResult `json:"layers,omitempty"` - Digest digest.Digest `json:"digest,omitempty"` - Inputs [][]CacheInput `json:"inputs,omitempty"` + Results []CacheResult `json:"layers,omitempty"` + ChainedResults []ChainedResult `json:"chains,omitempty"` + Digest digest.Digest `json:"digest,omitempty"` + Inputs [][]CacheInput `json:"inputs,omitempty"` } type CacheResult struct { @@ -29,6 +38,11 @@ type CacheResult struct { CreatedAt time.Time `json:"createdAt,omitempty"` } +type ChainedResult struct { + LayerIndexes []int `json:"layers"` + CreatedAt time.Time `json:"createdAt,omitempty"` +} + type CacheInput struct { Selector string `json:"selector,omitempty"` LinkIndex int `json:"link"` diff --git a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go index 889064a6c1..f7139035fa 100644 --- a/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go +++ b/vendor/github.com/moby/buildkit/cache/remotecache/v1/utils.go @@ -1,20 +1,17 @@ package cacheimport import ( + "context" "fmt" "sort" - "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/solver" digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) -// EmptyLayerRemovalSupported defines if implementation supports removal of empty layers. Buildkit image exporter -// removes empty layers, but moby layerstore based implementation does not. -var EmptyLayerRemovalSupported = true - // sortConfig sorts the config structure to make sure it is deterministic func sortConfig(cc *CacheConfig) { type indexedLayer struct { @@ -277,24 +274,30 @@ type marshalState struct { recordsByItem map[*item]int } -func marshalRemote(r *solver.Remote, state *marshalState) string { +func marshalRemote(ctx context.Context, r *solver.Remote, state *marshalState) string { if len(r.Descriptors) == 0 { return "" } + + if cd, ok := r.Provider.(interface { + CheckDescriptor(context.Context, ocispecs.Descriptor) error + }); ok && len(r.Descriptors) > 0 { + for _, d := range r.Descriptors { + if cd.CheckDescriptor(ctx, d) != nil { + return "" + } + } + } var parentID string if len(r.Descriptors) > 1 { r2 := &solver.Remote{ Descriptors: r.Descriptors[:len(r.Descriptors)-1], Provider: r.Provider, } - parentID = marshalRemote(r2, state) + parentID = marshalRemote(ctx, r2, state) } desc := r.Descriptors[len(r.Descriptors)-1] - if desc.Digest == exptypes.EmptyGZLayer && EmptyLayerRemovalSupported { - return parentID - } - state.descriptors[desc.Digest] = DescriptorProviderPair{ Descriptor: desc, Provider: r.Provider, @@ -318,7 +321,7 @@ func marshalRemote(r *solver.Remote, state *marshalState) string { return id } -func marshalItem(it *item, state *marshalState) error { +func marshalItem(ctx context.Context, it *item, state *marshalState) error { if _, ok := state.recordsByItem[it]; ok { return nil } @@ -330,7 +333,7 @@ func marshalItem(it *item, state *marshalState) error { for i, m := range it.links { for l := range m { - if err := marshalItem(l.src, state); err != nil { + if err := marshalItem(ctx, l.src, state); err != nil { return err } idx, ok := state.recordsByItem[l.src] @@ -345,7 +348,7 @@ func marshalItem(it *item, state *marshalState) error { } if it.result != nil { - id := marshalRemote(it.result, state) + id := marshalRemote(ctx, it.result, state) if id != "" { idx, ok := state.chainsByID[id] if !ok { diff --git a/vendor/github.com/moby/buildkit/client/build.go b/vendor/github.com/moby/buildkit/client/build.go index 4cb91d7aa9..25b3aa6d7c 100644 --- a/vendor/github.com/moby/buildkit/client/build.go +++ b/vendor/github.com/moby/buildkit/client/build.go @@ -44,7 +44,13 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF }) } - cb := func(ref string, s *session.Session) error { + cb := func(ref string, s *session.Session, opts map[string]string) error { + for k, v := range opts { + if feOpts == nil { + feOpts = map[string]string{} + } + feOpts[k] = v + } gwClient := c.gatewayClientForBuild(ref) g, err := grpcclient.New(ctx, feOpts, s.ID(), product, gwClient, gworkers) if err != nil { @@ -148,3 +154,8 @@ func (g *gatewayClientForBuild) ExecProcess(ctx context.Context, opts ...grpc.Ca ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) return g.gateway.ExecProcess(ctx, opts...) } + +func (g *gatewayClientForBuild) Warn(ctx context.Context, in *gatewayapi.WarnRequest, opts ...grpc.CallOption) (*gatewayapi.WarnResponse, error) { + ctx = buildid.AppendToOutgoingContext(ctx, g.buildID) + return g.gateway.Warn(ctx, in) +} diff --git a/vendor/github.com/moby/buildkit/client/client.go b/vendor/github.com/moby/buildkit/client/client.go index 38429ff5bb..8c9259a4a9 100644 --- a/vendor/github.com/moby/buildkit/client/client.go +++ b/vendor/github.com/moby/buildkit/client/client.go @@ -7,24 +7,31 @@ import ( "io/ioutil" "net" "net/url" + "strings" "github.com/containerd/containerd/defaults" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/connhelper" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/grpchijack" "github.com/moby/buildkit/util/appdefaults" "github.com/moby/buildkit/util/grpcerrors" - opentracing "github.com/opentracing/opentracing-go" + "github.com/moby/buildkit/util/tracing/otlptracegrpc" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + "go.opentelemetry.io/otel/propagation" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" "google.golang.org/grpc/credentials" + "google.golang.org/grpc/credentials/insecure" ) type Client struct { - conn *grpc.ClientConn + conn *grpc.ClientConn + sessionDialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) } type ClientOpt interface{} @@ -37,10 +44,16 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error } needDialer := true needWithInsecure := true + tlsServerName := "" var unary []grpc.UnaryClientInterceptor var stream []grpc.StreamClientInterceptor + var customTracer bool // allows manually setting disabling tracing even if tracer in context + var tracerProvider trace.TracerProvider + var tracerDelegate TracerDelegate + var sessionDialer func(context.Context, string, map[string][]string) (net.Conn, error) + for _, o := range opts { if _, ok := o.(*withFailFast); ok { gopts = append(gopts, grpc.FailOnNonTempDialError(true)) @@ -52,16 +65,36 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error } gopts = append(gopts, opt) needWithInsecure = false + tlsServerName = credInfo.ServerName } if wt, ok := o.(*withTracer); ok { - unary = append(unary, otgrpc.OpenTracingClientInterceptor(wt.tracer, otgrpc.LogPayloads())) - stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(wt.tracer)) + customTracer = true + tracerProvider = wt.tp } if wd, ok := o.(*withDialer); ok { gopts = append(gopts, grpc.WithContextDialer(wd.dialer)) needDialer = false } + if wt, ok := o.(*withTracerDelegate); ok { + tracerDelegate = wt + } + if sd, ok := o.(*withSessionDialer); ok { + sessionDialer = sd.dialer + } } + + if !customTracer { + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { + tracerProvider = span.TracerProvider() + } + } + + if tracerProvider != nil { + var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}) + unary = append(unary, filterInterceptor(otelgrpc.UnaryClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators)))) + stream = append(stream, otelgrpc.StreamClientInterceptor(otelgrpc.WithTracerProvider(tracerProvider), otelgrpc.WithPropagators(propagators))) + } + if needDialer { dialFn, err := resolveDialer(address) if err != nil { @@ -70,20 +103,29 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error gopts = append(gopts, grpc.WithContextDialer(dialFn)) } if needWithInsecure { - gopts = append(gopts, grpc.WithInsecure()) + gopts = append(gopts, grpc.WithTransportCredentials(insecure.NewCredentials())) } if address == "" { address = appdefaults.Address } - // grpc-go uses a slightly different naming scheme: https://github.com/grpc/grpc/blob/master/doc/naming.md - // This will end up setting rfc non-complient :authority header to address string (e.g. tcp://127.0.0.1:1234). - // So, here sets right authority header via WithAuthority DialOption. - addressURL, err := url.Parse(address) - if err != nil { - return nil, err + // Setting :authority pseudo header + // - HTTP/2 (RFC7540) defines :authority pseudo header includes + // the authority portion of target URI but it must not include + // userinfo part (i.e. url.Host). + // ref: https://datatracker.ietf.org/doc/html/rfc7540#section-8.1.2.3 + // - However, when TLS specified, grpc-go requires it must match + // with its servername specified for certificate validation. + authority := tlsServerName + if authority == "" { + // authority as hostname from target address + uri, err := url.Parse(address) + if err != nil { + return nil, err + } + authority = uri.Host } - gopts = append(gopts, grpc.WithAuthority(addressURL.Host)) + gopts = append(gopts, grpc.WithAuthority(authority)) unary = append(unary, grpcerrors.UnaryClientInterceptor) stream = append(stream, grpcerrors.StreamClientInterceptor) @@ -104,12 +146,28 @@ func New(ctx context.Context, address string, opts ...ClientOpt) (*Client, error if err != nil { return nil, errors.Wrapf(err, "failed to dial %q . make sure buildkitd is running", address) } + c := &Client{ - conn: conn, + conn: conn, + sessionDialer: sessionDialer, } + + if tracerDelegate != nil { + _ = c.setupDelegatedTracing(ctx, tracerDelegate) // ignore error + } + return c, nil } +func (c *Client) setupDelegatedTracing(ctx context.Context, td TracerDelegate) error { + pd := otlptracegrpc.NewClient(c.conn) + e, err := otlptrace.New(ctx, pd) + if err != nil { + return nil + } + return td.SetSpanExporter(ctx, e) +} + func (c *Client) controlClient() controlapi.ControlClient { return controlapi.NewControlClient(c.conn) } @@ -182,12 +240,34 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) { return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil } -func WithTracer(t opentracing.Tracer) ClientOpt { +func WithTracerProvider(t trace.TracerProvider) ClientOpt { return &withTracer{t} } type withTracer struct { - tracer opentracing.Tracer + tp trace.TracerProvider +} + +type TracerDelegate interface { + SetSpanExporter(context.Context, sdktrace.SpanExporter) error +} + +func WithTracerDelegate(td TracerDelegate) ClientOpt { + return &withTracerDelegate{ + TracerDelegate: td, + } +} + +type withTracerDelegate struct { + TracerDelegate +} + +func WithSessionDialer(dialer func(context.Context, string, map[string][]string) (net.Conn, error)) ClientOpt { + return &withSessionDialer{dialer} +} + +type withSessionDialer struct { + dialer func(context.Context, string, map[string][]string) (net.Conn, error) } func resolveDialer(address string) (func(context.Context, string) (net.Conn, error), error) { @@ -201,3 +281,12 @@ func resolveDialer(address string) (func(context.Context, string) (net.Conn, err // basic dialer return dialer, nil } + +func filterInterceptor(intercept grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if strings.HasSuffix(method, "opentelemetry.proto.collector.trace.v1.TraceService/Export") { + return invoker(ctx, method, req, reply, cc, opts...) + } + return intercept(ctx, method, req, reply, cc, invoker, opts...) + } +} diff --git a/vendor/github.com/moby/buildkit/client/client_unix.go b/vendor/github.com/moby/buildkit/client/client_unix.go index 888a8173ad..dc55a4b6e6 100644 --- a/vendor/github.com/moby/buildkit/client/client_unix.go +++ b/vendor/github.com/moby/buildkit/client/client_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package client diff --git a/vendor/github.com/moby/buildkit/client/diskusage.go b/vendor/github.com/moby/buildkit/client/diskusage.go index 8034f977c1..2a2373f9d3 100644 --- a/vendor/github.com/moby/buildkit/client/diskusage.go +++ b/vendor/github.com/moby/buildkit/client/diskusage.go @@ -18,7 +18,7 @@ type UsageInfo struct { CreatedAt time.Time LastUsedAt *time.Time UsageCount int - Parent string + Parents []string Description string RecordType UsageRecordType Shared bool @@ -44,7 +44,7 @@ func (c *Client) DiskUsage(ctx context.Context, opts ...DiskUsageOption) ([]*Usa Mutable: d.Mutable, InUse: d.InUse, Size: d.Size_, - Parent: d.Parent, + Parents: d.Parents, CreatedAt: d.CreatedAt, Description: d.Description, UsageCount: int(d.UsageCount), diff --git a/vendor/github.com/moby/buildkit/client/graph.go b/vendor/github.com/moby/buildkit/client/graph.go index bcfa8b839f..aaa96f293f 100644 --- a/vendor/github.com/moby/buildkit/client/graph.go +++ b/vendor/github.com/moby/buildkit/client/graph.go @@ -3,17 +3,19 @@ package client import ( "time" + "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" ) type Vertex struct { - Digest digest.Digest - Inputs []digest.Digest - Name string - Started *time.Time - Completed *time.Time - Cached bool - Error string + Digest digest.Digest + Inputs []digest.Digest + Name string + Started *time.Time + Completed *time.Time + Cached bool + Error string + ProgressGroup *pb.ProgressGroup } type VertexStatus struct { @@ -34,10 +36,21 @@ type VertexLog struct { Timestamp time.Time } +type VertexWarning struct { + Vertex digest.Digest + Level int + Short []byte + Detail [][]byte + URL string + SourceInfo *pb.SourceInfo + Range []*pb.Range +} + type SolveStatus struct { Vertexes []*Vertex Statuses []*VertexStatus Logs []*VertexLog + Warnings []*VertexWarning } type SolveResponse struct { diff --git a/vendor/github.com/moby/buildkit/client/llb/async.go b/vendor/github.com/moby/buildkit/client/llb/async.go index 48216e9895..73d2a92fa1 100644 --- a/vendor/github.com/moby/buildkit/client/llb/async.go +++ b/vendor/github.com/moby/buildkit/client/llb/async.go @@ -10,7 +10,7 @@ import ( ) type asyncState struct { - f func(context.Context, State) (State, error) + f func(context.Context, State, *Constraints) (State, error) prev State target State set bool @@ -22,8 +22,8 @@ func (as *asyncState) Output() Output { return as } -func (as *asyncState) Vertex(ctx context.Context) Vertex { - err := as.Do(ctx) +func (as *asyncState) Vertex(ctx context.Context, c *Constraints) Vertex { + err := as.Do(ctx, c) if err != nil { return &errVertex{err} } @@ -32,13 +32,13 @@ func (as *asyncState) Vertex(ctx context.Context) Vertex { if out == nil { return nil } - return out.Vertex(ctx) + return out.Vertex(ctx, c) } return nil } func (as *asyncState) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error) { - err := as.Do(ctx) + err := as.Do(ctx, c) if err != nil { return nil, err } @@ -52,12 +52,12 @@ func (as *asyncState) ToInput(ctx context.Context, c *Constraints) (*pb.Input, e return nil, nil } -func (as *asyncState) Do(ctx context.Context) error { +func (as *asyncState) Do(ctx context.Context, c *Constraints) error { _, err := as.g.Do(ctx, "", func(ctx context.Context) (interface{}, error) { if as.set { return as.target, as.err } - res, err := as.f(ctx, as.prev) + res, err := as.f(ctx, as.prev, c) if err != nil { select { case <-ctx.Done(): @@ -82,7 +82,7 @@ type errVertex struct { err error } -func (v *errVertex) Validate(context.Context) error { +func (v *errVertex) Validate(context.Context, *Constraints) error { return v.err } func (v *errVertex) Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { diff --git a/vendor/github.com/moby/buildkit/client/llb/definition.go b/vendor/github.com/moby/buildkit/client/llb/definition.go index 99af7c6879..697c1f54c9 100644 --- a/vendor/github.com/moby/buildkit/client/llb/definition.go +++ b/vendor/github.com/moby/buildkit/client/llb/definition.go @@ -6,7 +6,7 @@ import ( "github.com/moby/buildkit/solver/pb" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -21,7 +21,7 @@ type DefinitionOp struct { defs map[digest.Digest][]byte metas map[digest.Digest]pb.OpMetadata sources map[digest.Digest][]*SourceLocation - platforms map[digest.Digest]*specs.Platform + platforms map[digest.Digest]*ocispecs.Platform dgst digest.Digest index pb.OutputIndex inputCache map[digest.Digest][]*DefinitionOp @@ -31,7 +31,7 @@ type DefinitionOp struct { func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { ops := make(map[digest.Digest]*pb.Op) defs := make(map[digest.Digest][]byte) - platforms := make(map[digest.Digest]*specs.Platform) + platforms := make(map[digest.Digest]*ocispecs.Platform) var dgst digest.Digest for _, dt := range def.Def { @@ -43,7 +43,7 @@ func NewDefinitionOp(def *pb.Definition) (*DefinitionOp, error) { ops[dgst] = &op defs[dgst] = dt - var platform *specs.Platform + var platform *ocispecs.Platform if op.Platform != nil { spec := op.Platform.Spec() platform = &spec @@ -105,11 +105,11 @@ func (d *DefinitionOp) ToInput(ctx context.Context, c *Constraints) (*pb.Input, return d.Output().ToInput(ctx, c) } -func (d *DefinitionOp) Vertex(context.Context) Vertex { +func (d *DefinitionOp) Vertex(context.Context, *Constraints) Vertex { return d } -func (d *DefinitionOp) Validate(context.Context) error { +func (d *DefinitionOp) Validate(context.Context, *Constraints) error { // Scratch state has no digest, ops or metas. if d.dgst == "" { return nil @@ -151,7 +151,7 @@ func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Dige return "", nil, nil, nil, errors.Errorf("cannot marshal empty definition op") } - if err := d.Validate(ctx); err != nil { + if err := d.Validate(ctx, c); err != nil { return "", nil, nil, nil, err } @@ -160,7 +160,6 @@ func (d *DefinitionOp) Marshal(ctx context.Context, c *Constraints) (digest.Dige meta := d.metas[d.dgst] return d.dgst, d.defs[d.dgst], &meta, d.sources[d.dgst], nil - } func (d *DefinitionOp) Output() Output { diff --git a/vendor/github.com/moby/buildkit/client/llb/diff.go b/vendor/github.com/moby/buildkit/client/llb/diff.go new file mode 100644 index 0000000000..b42fcbbcf4 --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/diff.go @@ -0,0 +1,108 @@ +package llb + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +type DiffOp struct { + MarshalCache + lower Output + upper Output + output Output + constraints Constraints +} + +func NewDiff(lower, upper State, c Constraints) *DiffOp { + addCap(&c, pb.CapDiffOp) + op := &DiffOp{ + lower: lower.Output(), + upper: upper.Output(), + constraints: c, + } + op.output = &output{vertex: op} + return op +} + +func (m *DiffOp) Validate(ctx context.Context, constraints *Constraints) error { + return nil +} + +func (m *DiffOp) Marshal(ctx context.Context, constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { + if m.Cached(constraints) { + return m.Load() + } + if err := m.Validate(ctx, constraints); err != nil { + return "", nil, nil, nil, err + } + + proto, md := MarshalConstraints(constraints, &m.constraints) + proto.Platform = nil // diff op is not platform specific + + op := &pb.DiffOp{} + + op.Lower = &pb.LowerDiffInput{Input: pb.InputIndex(len(proto.Inputs))} + if m.lower == nil { + op.Lower.Input = pb.Empty + } else { + pbLowerInput, err := m.lower.ToInput(ctx, constraints) + if err != nil { + return "", nil, nil, nil, err + } + proto.Inputs = append(proto.Inputs, pbLowerInput) + } + + op.Upper = &pb.UpperDiffInput{Input: pb.InputIndex(len(proto.Inputs))} + if m.upper == nil { + op.Upper.Input = pb.Empty + } else { + pbUpperInput, err := m.upper.ToInput(ctx, constraints) + if err != nil { + return "", nil, nil, nil, err + } + proto.Inputs = append(proto.Inputs, pbUpperInput) + } + + proto.Op = &pb.Op_Diff{Diff: op} + + dt, err := proto.Marshal() + if err != nil { + return "", nil, nil, nil, err + } + + m.Store(dt, md, m.constraints.SourceLocations, constraints) + return m.Load() +} + +func (m *DiffOp) Output() Output { + return m.output +} + +func (m *DiffOp) Inputs() (out []Output) { + if m.lower != nil { + out = append(out, m.lower) + } + if m.upper != nil { + out = append(out, m.upper) + } + return out +} + +func Diff(lower, upper State, opts ...ConstraintsOpt) State { + if lower.Output() == nil { + if upper.Output() == nil { + // diff of scratch and scratch is scratch + return Scratch() + } + // diff of scratch and upper is just upper + return upper + } + + var c Constraints + for _, o := range opts { + o.SetConstraintsOption(&c) + } + return NewState(NewDiff(lower, upper, c).Output()) +} diff --git a/vendor/github.com/moby/buildkit/client/llb/exec.go b/vendor/github.com/moby/buildkit/client/llb/exec.go index 54182e3ff8..994804a139 100644 --- a/vendor/github.com/moby/buildkit/client/llb/exec.go +++ b/vendor/github.com/moby/buildkit/client/llb/exec.go @@ -43,6 +43,7 @@ type mount struct { selector string cacheID string tmpfs bool + tmpfsOpt TmpfsInfo cacheSharing CacheMountSharingMode noOutput bool } @@ -95,18 +96,18 @@ func (e *ExecOp) GetMount(target string) Output { return nil } -func (e *ExecOp) Validate(ctx context.Context) error { +func (e *ExecOp) Validate(ctx context.Context, c *Constraints) error { if e.isValidated { return nil } - args, err := getArgs(e.base)(ctx) + args, err := getArgs(e.base)(ctx, c) if err != nil { return err } if len(args) == 0 { return errors.Errorf("arguments are required") } - cwd, err := getDir(e.base)(ctx) + cwd, err := getDir(e.base)(ctx, c) if err != nil { return err } @@ -115,7 +116,7 @@ func (e *ExecOp) Validate(ctx context.Context) error { } for _, m := range e.mounts { if m.source != nil { - if err := m.source.Vertex(ctx).Validate(ctx); err != nil { + if err := m.source.Vertex(ctx, c).Validate(ctx, c); err != nil { return err } } @@ -128,7 +129,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] if e.Cached(c) { return e.Load() } - if err := e.Validate(ctx); err != nil { + if err := e.Validate(ctx, c); err != nil { return "", nil, nil, nil, err } // make sure mounts are sorted @@ -136,7 +137,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] return e.mounts[i].target < e.mounts[j].target }) - env, err := getEnv(e.base)(ctx) + env, err := getEnv(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } @@ -165,34 +166,41 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } } - args, err := getArgs(e.base)(ctx) + args, err := getArgs(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } - cwd, err := getDir(e.base)(ctx) + cwd, err := getDir(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } - user, err := getUser(e.base)(ctx) + user, err := getUser(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } - hostname, err := getHostname(e.base)(ctx) + hostname, err := getHostname(e.base)(ctx, c) + if err != nil { + return "", nil, nil, nil, err + } + + cgrpParent, err := getCgroupParent(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } meta := &pb.Meta{ - Args: args, - Env: env.ToArray(), - Cwd: cwd, - User: user, - Hostname: hostname, + Args: args, + Env: env.ToArray(), + Cwd: cwd, + User: user, + Hostname: hostname, + CgroupParent: cgrpParent, } - extraHosts, err := getExtraHosts(e.base)(ctx) + + extraHosts, err := getExtraHosts(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } @@ -204,12 +212,29 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] meta.ExtraHosts = hosts } - network, err := getNetwork(e.base)(ctx) + ulimits, err := getUlimit(e.base)(ctx, c) + if err != nil { + return "", nil, nil, nil, err + } + if len(ulimits) > 0 { + addCap(&e.constraints, pb.CapExecMetaUlimit) + ul := make([]*pb.Ulimit, len(ulimits)) + for i, u := range ulimits { + ul[i] = &pb.Ulimit{ + Name: u.Name, + Soft: u.Soft, + Hard: u.Hard, + } + } + meta.Ulimit = ul + } + + network, err := getNetwork(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } - security, err := getSecurity(e.base)(ctx) + security, err := getSecurity(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } @@ -249,6 +274,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] addCap(&e.constraints, pb.CapExecMountCacheSharing) } else if m.tmpfs { addCap(&e.constraints, pb.CapExecMountTmpfs) + if m.tmpfsOpt.Size > 0 { + addCap(&e.constraints, pb.CapExecMountTmpfsSize) + } } else if m.source != nil { addCap(&e.constraints, pb.CapExecMountBind) } @@ -256,6 +284,12 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] if len(e.secrets) > 0 { addCap(&e.constraints, pb.CapExecMountSecret) + for _, s := range e.secrets { + if s.IsEnv { + addCap(&e.constraints, pb.CapExecSecretEnv) + break + } + } } if len(e.ssh) > 0 { @@ -263,7 +297,7 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } if e.constraints.Platform == nil { - p, err := getPlatform(e.base)(ctx) + p, err := getPlatform(e.base)(ctx, c) if err != nil { return "", nil, nil, nil, err } @@ -333,23 +367,34 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] } if m.tmpfs { pm.MountType = pb.MountType_TMPFS + pm.TmpfsOpt = &pb.TmpfsOpt{ + Size_: m.tmpfsOpt.Size, + } } peo.Mounts = append(peo.Mounts, pm) } for _, s := range e.secrets { - pm := &pb.Mount{ - Dest: s.Target, - MountType: pb.MountType_SECRET, - SecretOpt: &pb.SecretOpt{ + if s.IsEnv { + peo.Secretenv = append(peo.Secretenv, &pb.SecretEnv{ ID: s.ID, - Uid: uint32(s.UID), - Gid: uint32(s.GID), + Name: s.Target, Optional: s.Optional, - Mode: uint32(s.Mode), - }, + }) + } else { + pm := &pb.Mount{ + Dest: s.Target, + MountType: pb.MountType_SECRET, + SecretOpt: &pb.SecretOpt{ + ID: s.ID, + Uid: uint32(s.UID), + Gid: uint32(s.GID), + Optional: s.Optional, + Mode: uint32(s.Mode), + }, + } + peo.Mounts = append(peo.Mounts, pm) } - peo.Mounts = append(peo.Mounts, pm) } for _, s := range e.ssh { @@ -453,12 +498,37 @@ func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption } } -func Tmpfs() MountOption { +func Tmpfs(opts ...TmpfsOption) MountOption { return func(m *mount) { + t := &TmpfsInfo{} + for _, opt := range opts { + opt.SetTmpfsOption(t) + } m.tmpfs = true + m.tmpfsOpt = *t } } +type TmpfsOption interface { + SetTmpfsOption(*TmpfsInfo) +} + +type tmpfsOptionFunc func(*TmpfsInfo) + +func (fn tmpfsOptionFunc) SetTmpfsOption(ti *TmpfsInfo) { + fn(ti) +} + +func TmpfsSize(b int64) TmpfsOption { + return tmpfsOptionFunc(func(ti *TmpfsInfo) { + ti.Size = b + }) +} + +type TmpfsInfo struct { + Size int64 +} + type RunOption interface { SetRunOption(es *ExecInfo) } @@ -498,6 +568,18 @@ func AddExtraHost(host string, ip net.IP) RunOption { }) } +func AddUlimit(name UlimitName, soft int64, hard int64) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.AddUlimit(name, soft, hard) + }) +} + +func WithCgroupParent(cp string) RunOption { + return runOptionFunc(func(ei *ExecInfo) { + ei.State = ei.State.WithCgroupParent(cp) + }) +} + func With(so ...StateOption) RunOption { return runOptionFunc(func(ei *ExecInfo) { ei.State = ei.State.With(so...) @@ -593,6 +675,7 @@ type SecretInfo struct { UID int GID int Optional bool + IsEnv bool } var SecretOptional = secretOptionFunc(func(si *SecretInfo) { @@ -605,6 +688,13 @@ func SecretID(id string) SecretOption { }) } +// SecretAsEnv defines if the secret should be added as an environment variable +func SecretAsEnv(v bool) SecretOption { + return secretOptionFunc(func(si *SecretInfo) { + si.IsEnv = v + }) +} + func SecretFileOpt(uid, gid, mode int) SecretOption { return secretOptionFunc(func(si *SecretInfo) { si.UID = uid @@ -667,3 +757,23 @@ const ( SecurityModeInsecure = pb.SecurityMode_INSECURE SecurityModeSandbox = pb.SecurityMode_SANDBOX ) + +type UlimitName string + +const ( + UlimitCore UlimitName = "core" + UlimitCPU UlimitName = "cpu" + UlimitData UlimitName = "data" + UlimitFsize UlimitName = "fsize" + UlimitLocks UlimitName = "locks" + UlimitMemlock UlimitName = "memlock" + UlimitMsgqueue UlimitName = "msgqueue" + UlimitNice UlimitName = "nice" + UlimitNofile UlimitName = "nofile" + UlimitNproc UlimitName = "nproc" + UlimitRss UlimitName = "rss" + UlimitRtprio UlimitName = "rtprio" + UlimitRttime UlimitName = "rttime" + UlimitSigpending UlimitName = "sigpending" + UlimitStack UlimitName = "stack" +) diff --git a/vendor/github.com/moby/buildkit/client/llb/fileop.go b/vendor/github.com/moby/buildkit/client/llb/fileop.go index 1f43974cb0..ffc6da19e4 100644 --- a/vendor/github.com/moby/buildkit/client/llb/fileop.go +++ b/vendor/github.com/moby/buildkit/client/llb/fileop.go @@ -544,7 +544,7 @@ type FileOp struct { isValidated bool } -func (f *FileOp) Validate(context.Context) error { +func (f *FileOp) Validate(context.Context, *Constraints) error { if f.isValidated { return nil } @@ -667,7 +667,7 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] if f.Cached(c) { return f.Load() } - if err := f.Validate(ctx); err != nil { + if err := f.Validate(ctx, c); err != nil { return "", nil, nil, nil, err } @@ -676,19 +676,25 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] pfo := &pb.FileOp{} if f.constraints.Platform == nil { - p, err := getPlatform(*f.action.state)(ctx) + p, err := getPlatform(*f.action.state)(ctx, c) if err != nil { return "", nil, nil, nil, err } f.constraints.Platform = p } + state := newMarshalState(ctx) + for _, st := range state.actions { + if adder, isCapAdder := st.action.(capAdder); isCapAdder { + adder.addCaps(f) + } + } + pop, md := MarshalConstraints(c, &f.constraints) pop.Op = &pb.Op_File{ File: pfo, } - state := newMarshalState(ctx) _, err := state.add(f.action, c) if err != nil { return "", nil, nil, nil, err @@ -696,10 +702,6 @@ func (f *FileOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, [] pop.Inputs = state.inputs for i, st := range state.actions { - if adder, isCapAdder := st.action.(capAdder); isCapAdder { - adder.addCaps(f) - } - output := pb.OutputIndex(-1) if i+1 == len(state.actions) { output = 0 diff --git a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go index 97a0cba731..6dd40b6943 100644 --- a/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/imagemetaresolver/resolver.go @@ -11,9 +11,10 @@ import ( "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/util/contentutil" "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/version" "github.com/moby/locker" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) var defaultImageMetaResolver llb.ImageMetaResolver @@ -24,12 +25,12 @@ var WithDefault = imageOptionFunc(func(ii *llb.ImageInfo) { }) type imageMetaResolverOpts struct { - platform *specs.Platform + platform *ocispecs.Platform } type ImageMetaResolverOpt func(o *imageMetaResolverOpts) -func WithDefaultPlatform(p *specs.Platform) ImageMetaResolverOpt { +func WithDefaultPlatform(p *ocispecs.Platform) ImageMetaResolverOpt { return func(o *imageMetaResolverOpts) { o.platform = p } @@ -40,9 +41,12 @@ func New(with ...ImageMetaResolverOpt) llb.ImageMetaResolver { for _, f := range with { f(&opts) } + headers := http.Header{} + headers.Set("User-Agent", version.UserAgent()) return &imageMetaResolver{ resolver: docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, + Client: http.DefaultClient, + Headers: headers, }), platform: opts.platform, buffer: contentutil.NewBuffer(), @@ -61,7 +65,7 @@ func Default() llb.ImageMetaResolver { type imageMetaResolver struct { resolver remotes.Resolver buffer contentutil.Buffer - platform *specs.Platform + platform *ocispecs.Platform locker *locker.Locker cache map[string]resolveResult } @@ -95,7 +99,7 @@ func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string return dgst, config, nil } -func (imr *imageMetaResolver) key(ref string, platform *specs.Platform) string { +func (imr *imageMetaResolver) key(ref string, platform *ocispecs.Platform) string { if platform != nil { ref += platforms.Format(*platform) } diff --git a/vendor/github.com/moby/buildkit/client/llb/marshal.go b/vendor/github.com/moby/buildkit/client/llb/marshal.go index 282b592b7b..e59e560ee9 100644 --- a/vendor/github.com/moby/buildkit/client/llb/marshal.go +++ b/vendor/github.com/moby/buildkit/client/llb/marshal.go @@ -12,9 +12,10 @@ import ( // Definition is the LLB definition structure with per-vertex metadata entries // Corresponds to the Definition structure defined in solver/pb.Definition. type Definition struct { - Def [][]byte - Metadata map[digest.Digest]pb.OpMetadata - Source *pb.Source + Def [][]byte + Metadata map[digest.Digest]pb.OpMetadata + Source *pb.Source + Constraints *Constraints } func (def *Definition) ToPB() *pb.Definition { @@ -38,6 +39,24 @@ func (def *Definition) FromPB(x *pb.Definition) { } } +func (def *Definition) Head() (digest.Digest, error) { + if len(def.Def) == 0 { + return "", nil + } + + last := def.Def[len(def.Def)-1] + + var pop pb.Op + if err := (&pop).Unmarshal(last); err != nil { + return "", err + } + if len(pop.Inputs) == 0 { + return "", nil + } + + return pop.Inputs[0].Digest, nil +} + func WriteTo(def *Definition, w io.Writer) error { b, err := def.ToPB().Marshal() if err != nil { diff --git a/vendor/github.com/moby/buildkit/client/llb/merge.go b/vendor/github.com/moby/buildkit/client/llb/merge.go new file mode 100644 index 0000000000..8177d71d2a --- /dev/null +++ b/vendor/github.com/moby/buildkit/client/llb/merge.go @@ -0,0 +1,96 @@ +package llb + +import ( + "context" + + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" + "github.com/pkg/errors" +) + +type MergeOp struct { + MarshalCache + inputs []Output + output Output + constraints Constraints +} + +func NewMerge(inputs []State, c Constraints) *MergeOp { + op := &MergeOp{constraints: c} + for _, input := range inputs { + op.inputs = append(op.inputs, input.Output()) + } + op.output = &output{vertex: op} + return op +} + +func (m *MergeOp) Validate(ctx context.Context, constraints *Constraints) error { + if len(m.inputs) < 2 { + return errors.Errorf("merge must have at least 2 inputs") + } + return nil +} + +func (m *MergeOp) Marshal(ctx context.Context, constraints *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) { + if m.Cached(constraints) { + return m.Load() + } + if err := m.Validate(ctx, constraints); err != nil { + return "", nil, nil, nil, err + } + + pop, md := MarshalConstraints(constraints, &m.constraints) + pop.Platform = nil // merge op is not platform specific + + op := &pb.MergeOp{} + for _, input := range m.inputs { + op.Inputs = append(op.Inputs, &pb.MergeInput{Input: pb.InputIndex(len(pop.Inputs))}) + pbInput, err := input.ToInput(ctx, constraints) + if err != nil { + return "", nil, nil, nil, err + } + pop.Inputs = append(pop.Inputs, pbInput) + } + pop.Op = &pb.Op_Merge{Merge: op} + + dt, err := pop.Marshal() + if err != nil { + return "", nil, nil, nil, err + } + + m.Store(dt, md, m.constraints.SourceLocations, constraints) + return m.Load() +} + +func (m *MergeOp) Output() Output { + return m.output +} + +func (m *MergeOp) Inputs() []Output { + return m.inputs +} + +func Merge(inputs []State, opts ...ConstraintsOpt) State { + // filter out any scratch inputs, which have no effect when merged + var filteredInputs []State + for _, input := range inputs { + if input.Output() != nil { + filteredInputs = append(filteredInputs, input) + } + } + if len(filteredInputs) == 0 { + // a merge of only scratch results in scratch + return Scratch() + } + if len(filteredInputs) == 1 { + // a merge of a single non-empty input results in that non-empty input + return filteredInputs[0] + } + + var c Constraints + for _, o := range opts { + o.SetConstraintsOption(&c) + } + addCap(&c, pb.CapMergeOp) + return NewState(NewMerge(filteredInputs, c).Output()) +} diff --git a/vendor/github.com/moby/buildkit/client/llb/meta.go b/vendor/github.com/moby/buildkit/client/llb/meta.go index 80cc18dab1..b98b6d1063 100644 --- a/vendor/github.com/moby/buildkit/client/llb/meta.go +++ b/vendor/github.com/moby/buildkit/client/llb/meta.go @@ -9,21 +9,24 @@ import ( "github.com/containerd/containerd/platforms" "github.com/google/shlex" "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) type contextKeyT string var ( - keyArgs = contextKeyT("llb.exec.args") - keyDir = contextKeyT("llb.exec.dir") - keyEnv = contextKeyT("llb.exec.env") - keyUser = contextKeyT("llb.exec.user") - keyHostname = contextKeyT("llb.exec.hostname") - keyExtraHost = contextKeyT("llb.exec.extrahost") - keyPlatform = contextKeyT("llb.platform") - keyNetwork = contextKeyT("llb.network") - keySecurity = contextKeyT("llb.security") + keyArgs = contextKeyT("llb.exec.args") + keyDir = contextKeyT("llb.exec.dir") + keyEnv = contextKeyT("llb.exec.env") + keyExtraHost = contextKeyT("llb.exec.extrahost") + keyHostname = contextKeyT("llb.exec.hostname") + keyUlimit = contextKeyT("llb.exec.ulimit") + keyCgroupParent = contextKeyT("llb.exec.cgroup.parent") + keyUser = contextKeyT("llb.exec.user") + + keyPlatform = contextKeyT("llb.platform") + keyNetwork = contextKeyT("llb.network") + keySecurity = contextKeyT("llb.security") ) func AddEnvf(key, value string, v ...interface{}) StateOption { @@ -39,8 +42,8 @@ func addEnvf(key, value string, replace bool, v ...interface{}) StateOption { value = fmt.Sprintf(value, v...) } return func(s State) State { - return s.withValue(keyEnv, func(ctx context.Context) (interface{}, error) { - env, err := getEnv(s)(ctx) + return s.withValue(keyEnv, func(ctx context.Context, c *Constraints) (interface{}, error) { + env, err := getEnv(s)(ctx, c) if err != nil { return nil, err } @@ -62,9 +65,9 @@ func dirf(value string, replace bool, v ...interface{}) StateOption { value = fmt.Sprintf(value, v...) } return func(s State) State { - return s.withValue(keyDir, func(ctx context.Context) (interface{}, error) { + return s.withValue(keyDir, func(ctx context.Context, c *Constraints) (interface{}, error) { if !path.IsAbs(value) { - prev, err := getDir(s)(ctx) + prev, err := getDir(s)(ctx, c) if err != nil { return nil, err } @@ -92,9 +95,9 @@ func Reset(other State) StateOption { } } -func getEnv(s State) func(context.Context) (EnvList, error) { - return func(ctx context.Context) (EnvList, error) { - v, err := s.getValue(keyEnv)(ctx) +func getEnv(s State) func(context.Context, *Constraints) (EnvList, error) { + return func(ctx context.Context, c *Constraints) (EnvList, error) { + v, err := s.getValue(keyEnv)(ctx, c) if err != nil { return nil, err } @@ -105,9 +108,9 @@ func getEnv(s State) func(context.Context) (EnvList, error) { } } -func getDir(s State) func(context.Context) (string, error) { - return func(ctx context.Context) (string, error) { - v, err := s.getValue(keyDir)(ctx) +func getDir(s State) func(context.Context, *Constraints) (string, error) { + return func(ctx context.Context, c *Constraints) (string, error) { + v, err := s.getValue(keyDir)(ctx, c) if err != nil { return "", err } @@ -118,9 +121,9 @@ func getDir(s State) func(context.Context) (string, error) { } } -func getArgs(s State) func(context.Context) ([]string, error) { - return func(ctx context.Context) ([]string, error) { - v, err := s.getValue(keyArgs)(ctx) +func getArgs(s State) func(context.Context, *Constraints) ([]string, error) { + return func(ctx context.Context, c *Constraints) ([]string, error) { + v, err := s.getValue(keyArgs)(ctx, c) if err != nil { return nil, err } @@ -131,9 +134,9 @@ func getArgs(s State) func(context.Context) ([]string, error) { } } -func getUser(s State) func(context.Context) (string, error) { - return func(ctx context.Context) (string, error) { - v, err := s.getValue(keyUser)(ctx) +func getUser(s State) func(context.Context, *Constraints) (string, error) { + return func(ctx context.Context, c *Constraints) (string, error) { + v, err := s.getValue(keyUser)(ctx, c) if err != nil { return "", err } @@ -150,9 +153,9 @@ func Hostname(str string) StateOption { } } -func getHostname(s State) func(context.Context) (string, error) { - return func(ctx context.Context) (string, error) { - v, err := s.getValue(keyHostname)(ctx) +func getHostname(s State) func(context.Context, *Constraints) (string, error) { + return func(ctx context.Context, c *Constraints) (string, error) { + v, err := s.getValue(keyHostname)(ctx, c) if err != nil { return "", err } @@ -182,20 +185,20 @@ func shlexf(str string, replace bool, v ...interface{}) StateOption { } } -func platform(p specs.Platform) StateOption { +func platform(p ocispecs.Platform) StateOption { return func(s State) State { return s.WithValue(keyPlatform, platforms.Normalize(p)) } } -func getPlatform(s State) func(context.Context) (*specs.Platform, error) { - return func(ctx context.Context) (*specs.Platform, error) { - v, err := s.getValue(keyPlatform)(ctx) +func getPlatform(s State) func(context.Context, *Constraints) (*ocispecs.Platform, error) { + return func(ctx context.Context, c *Constraints) (*ocispecs.Platform, error) { + v, err := s.getValue(keyPlatform)(ctx, c) if err != nil { return nil, err } if v != nil { - p := v.(specs.Platform) + p := v.(ocispecs.Platform) return &p, nil } return nil, nil @@ -204,8 +207,8 @@ func getPlatform(s State) func(context.Context) (*specs.Platform, error) { func extraHost(host string, ip net.IP) StateOption { return func(s State) State { - return s.withValue(keyExtraHost, func(ctx context.Context) (interface{}, error) { - v, err := getExtraHosts(s)(ctx) + return s.withValue(keyExtraHost, func(ctx context.Context, c *Constraints) (interface{}, error) { + v, err := getExtraHosts(s)(ctx, c) if err != nil { return nil, err } @@ -214,9 +217,9 @@ func extraHost(host string, ip net.IP) StateOption { } } -func getExtraHosts(s State) func(context.Context) ([]HostIP, error) { - return func(ctx context.Context) ([]HostIP, error) { - v, err := s.getValue(keyExtraHost)(ctx) +func getExtraHosts(s State) func(context.Context, *Constraints) ([]HostIP, error) { + return func(ctx context.Context, c *Constraints) ([]HostIP, error) { + v, err := s.getValue(keyExtraHost)(ctx, c) if err != nil { return nil, err } @@ -232,14 +235,62 @@ type HostIP struct { IP net.IP } +func ulimit(name UlimitName, soft int64, hard int64) StateOption { + return func(s State) State { + return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (interface{}, error) { + v, err := getUlimit(s)(ctx, c) + if err != nil { + return nil, err + } + return append(v, pb.Ulimit{ + Name: string(name), + Soft: soft, + Hard: hard, + }), nil + }) + } +} + +func getUlimit(s State) func(context.Context, *Constraints) ([]pb.Ulimit, error) { + return func(ctx context.Context, c *Constraints) ([]pb.Ulimit, error) { + v, err := s.getValue(keyUlimit)(ctx, c) + if err != nil { + return nil, err + } + if v != nil { + return v.([]pb.Ulimit), nil + } + return nil, nil + } +} + +func cgroupParent(cp string) StateOption { + return func(s State) State { + return s.WithValue(keyCgroupParent, cp) + } +} + +func getCgroupParent(s State) func(context.Context, *Constraints) (string, error) { + return func(ctx context.Context, c *Constraints) (string, error) { + v, err := s.getValue(keyCgroupParent)(ctx, c) + if err != nil { + return "", err + } + if v != nil { + return v.(string), nil + } + return "", nil + } +} + func Network(v pb.NetMode) StateOption { return func(s State) State { return s.WithValue(keyNetwork, v) } } -func getNetwork(s State) func(context.Context) (pb.NetMode, error) { - return func(ctx context.Context) (pb.NetMode, error) { - v, err := s.getValue(keyNetwork)(ctx) +func getNetwork(s State) func(context.Context, *Constraints) (pb.NetMode, error) { + return func(ctx context.Context, c *Constraints) (pb.NetMode, error) { + v, err := s.getValue(keyNetwork)(ctx, c) if err != nil { return 0, err } @@ -256,9 +307,9 @@ func Security(v pb.SecurityMode) StateOption { return s.WithValue(keySecurity, v) } } -func getSecurity(s State) func(context.Context) (pb.SecurityMode, error) { - return func(ctx context.Context) (pb.SecurityMode, error) { - v, err := s.getValue(keySecurity)(ctx) +func getSecurity(s State) func(context.Context, *Constraints) (pb.SecurityMode, error) { + return func(ctx context.Context, c *Constraints) (pb.SecurityMode, error) { + v, err := s.getValue(keySecurity)(ctx, c) if err != nil { return 0, err } diff --git a/vendor/github.com/moby/buildkit/client/llb/resolver.go b/vendor/github.com/moby/buildkit/client/llb/resolver.go index 31fc395993..af1edc1071 100644 --- a/vendor/github.com/moby/buildkit/client/llb/resolver.go +++ b/vendor/github.com/moby/buildkit/client/llb/resolver.go @@ -4,7 +4,7 @@ import ( "context" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) // WithMetaResolver adds a metadata resolver to an image @@ -29,7 +29,7 @@ type ImageMetaResolver interface { } type ResolveImageConfigOpt struct { - Platform *specs.Platform + Platform *ocispecs.Platform ResolveMode string LogName string } diff --git a/vendor/github.com/moby/buildkit/client/llb/source.go b/vendor/github.com/moby/buildkit/client/llb/source.go index 7d389e8bda..c1be90b704 100644 --- a/vendor/github.com/moby/buildkit/client/llb/source.go +++ b/vendor/github.com/moby/buildkit/client/llb/source.go @@ -36,7 +36,7 @@ func NewSource(id string, attrs map[string]string, c Constraints) *SourceOp { return s } -func (s *SourceOp) Validate(ctx context.Context) error { +func (s *SourceOp) Validate(ctx context.Context, c *Constraints) error { if s.err != nil { return s.err } @@ -50,7 +50,7 @@ func (s *SourceOp) Marshal(ctx context.Context, constraints *Constraints) (diges if s.Cached(constraints) { return s.Load() } - if err := s.Validate(ctx); err != nil { + if err := s.Validate(ctx, constraints); err != nil { return "", nil, nil, nil, err } @@ -121,9 +121,13 @@ func Image(ref string, opts ...ImageOption) State { src.err = err } else if info.metaResolver != nil { if _, ok := r.(reference.Digested); ok || !info.resolveDigest { - return NewState(src.Output()).Async(func(ctx context.Context, st State) (State, error) { + return NewState(src.Output()).Async(func(ctx context.Context, st State, c *Constraints) (State, error) { + p := info.Constraints.Platform + if p == nil { + p = c.Platform + } _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{ - Platform: info.Constraints.Platform, + Platform: p, ResolveMode: info.resolveMode.String(), }) if err != nil { @@ -132,9 +136,13 @@ func Image(ref string, opts ...ImageOption) State { return st.WithImageConfig(dt) }) } - return Scratch().Async(func(ctx context.Context, _ State) (State, error) { + return Scratch().Async(func(ctx context.Context, _ State, c *Constraints) (State, error) { + p := info.Constraints.Platform + if p == nil { + p = c.Platform + } dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{ - Platform: info.Constraints.Platform, + Platform: p, ResolveMode: info.resolveMode.String(), }) if err != nil { @@ -361,6 +369,12 @@ func Local(name string, opts ...LocalOption) State { attrs[pb.AttrSharedKeyHint] = gi.SharedKeyHint addCap(&gi.Constraints, pb.CapSourceLocalSharedKeyHint) } + if gi.Differ.Type != "" { + attrs[pb.AttrLocalDiffer] = string(gi.Differ.Type) + if gi.Differ.Required { + addCap(&gi.Constraints, pb.CapSourceLocalDiffer) + } + } addCap(&gi.Constraints, pb.CapSourceLocal) @@ -423,6 +437,32 @@ func SharedKeyHint(h string) LocalOption { }) } +func Differ(t DiffType, required bool) LocalOption { + return localOptionFunc(func(li *LocalInfo) { + li.Differ = DifferInfo{ + Type: t, + Required: required, + } + }) +} + +type DiffType string + +const ( + // DiffNone will do no file comparisons, all files in the Local source will + // be retransmitted. + DiffNone DiffType = pb.AttrLocalDifferNone + // DiffMetadata will compare file metadata (size, modified time, mode, owner, + // group, device and link name) to determine if the files in the Local source need + // to be retransmitted. This is the default behavior. + DiffMetadata DiffType = pb.AttrLocalDifferMetadata +) + +type DifferInfo struct { + Type DiffType + Required bool +} + type LocalInfo struct { constraintsWrapper SessionID string @@ -430,6 +470,7 @@ type LocalInfo struct { ExcludePatterns string FollowPaths string SharedKeyHint string + Differ DifferInfo } func HTTP(url string, opts ...HTTPOption) State { diff --git a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go index 87afde9954..149355d92e 100644 --- a/vendor/github.com/moby/buildkit/client/llb/sourcemap.go +++ b/vendor/github.com/moby/buildkit/client/llb/sourcemap.go @@ -4,7 +4,7 @@ import ( "context" "github.com/moby/buildkit/solver/pb" - "github.com/opencontainers/go-digest" + digest "github.com/opencontainers/go-digest" ) type SourceMap struct { diff --git a/vendor/github.com/moby/buildkit/client/llb/state.go b/vendor/github.com/moby/buildkit/client/llb/state.go index eca7164daa..0295f635cc 100644 --- a/vendor/github.com/moby/buildkit/client/llb/state.go +++ b/vendor/github.com/moby/buildkit/client/llb/state.go @@ -12,23 +12,35 @@ import ( "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) type StateOption func(State) State type Output interface { ToInput(context.Context, *Constraints) (*pb.Input, error) - Vertex(context.Context) Vertex + Vertex(context.Context, *Constraints) Vertex } type Vertex interface { - Validate(context.Context) error + Validate(context.Context, *Constraints) error Marshal(context.Context, *Constraints) (digest.Digest, []byte, *pb.OpMetadata, []*SourceLocation, error) Output() Output Inputs() []Output } +func NewConstraints(co ...ConstraintsOpt) *Constraints { + defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) + c := &Constraints{ + Platform: &defaultPlatform, + LocalUniqueID: identity.NewID(), + } + for _, o := range co { + o.SetConstraintsOption(c) + } + return c +} + func NewState(o Output) State { s := State{ out: o, @@ -41,14 +53,14 @@ type State struct { out Output prev *State key interface{} - value func(context.Context) (interface{}, error) + value func(context.Context, *Constraints) (interface{}, error) opts []ConstraintsOpt async *asyncState } func (s State) ensurePlatform() State { if o, ok := s.out.(interface { - Platform() *specs.Platform + Platform() *ocispecs.Platform }); ok { if p := o.Platform(); p != nil { s = platform(*p)(s) @@ -58,12 +70,12 @@ func (s State) ensurePlatform() State { } func (s State) WithValue(k, v interface{}) State { - return s.withValue(k, func(context.Context) (interface{}, error) { + return s.withValue(k, func(context.Context, *Constraints) (interface{}, error) { return v, nil }) } -func (s State) withValue(k interface{}, v func(context.Context) (interface{}, error)) State { +func (s State) withValue(k interface{}, v func(context.Context, *Constraints) (interface{}, error)) State { return State{ out: s.Output(), prev: &s, // doesn't need to be original pointer @@ -72,21 +84,25 @@ func (s State) withValue(k interface{}, v func(context.Context) (interface{}, er } } -func (s State) Value(ctx context.Context, k interface{}) (interface{}, error) { - return s.getValue(k)(ctx) +func (s State) Value(ctx context.Context, k interface{}, co ...ConstraintsOpt) (interface{}, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + return s.getValue(k)(ctx, c) } -func (s State) getValue(k interface{}) func(context.Context) (interface{}, error) { +func (s State) getValue(k interface{}) func(context.Context, *Constraints) (interface{}, error) { if s.key == k { return s.value } if s.async != nil { - return func(ctx context.Context) (interface{}, error) { - err := s.async.Do(ctx) + return func(ctx context.Context, c *Constraints) (interface{}, error) { + err := s.async.Do(ctx, c) if err != nil { return nil, err } - return s.async.target.getValue(k)(ctx) + return s.async.target.getValue(k)(ctx, c) } } if s.prev == nil { @@ -95,7 +111,7 @@ func (s State) getValue(k interface{}) func(context.Context) (interface{}, error return s.prev.getValue(k) } -func (s State) Async(f func(context.Context, State) (State, error)) State { +func (s State) Async(f func(context.Context, State, *Constraints) (State, error)) State { s2 := State{ async: &asyncState{f: f, prev: s}, } @@ -108,25 +124,18 @@ func (s State) SetMarshalDefaults(co ...ConstraintsOpt) State { } func (s State) Marshal(ctx context.Context, co ...ConstraintsOpt) (*Definition, error) { + c := NewConstraints(append(s.opts, co...)...) def := &Definition{ - Metadata: make(map[digest.Digest]pb.OpMetadata, 0), + Metadata: make(map[digest.Digest]pb.OpMetadata, 0), + Constraints: c, } - if s.Output() == nil || s.Output().Vertex(ctx) == nil { + + if s.Output() == nil || s.Output().Vertex(ctx, c) == nil { return def, nil } - - defaultPlatform := platforms.Normalize(platforms.DefaultSpec()) - c := &Constraints{ - Platform: &defaultPlatform, - LocalUniqueID: identity.NewID(), - } - for _, o := range append(s.opts, co...) { - o.SetConstraintsOption(c) - } - smc := newSourceMapCollector() - def, err := marshal(ctx, s.Output().Vertex(ctx), def, smc, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c) + def, err := marshal(ctx, s.Output().Vertex(ctx, c), def, smc, map[digest.Digest]struct{}{}, map[Vertex]struct{}{}, c) if err != nil { return def, err } @@ -176,7 +185,7 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect } for _, inp := range v.Inputs() { var err error - def, err = marshal(ctx, inp.Vertex(ctx), def, s, cache, vertexCache, c) + def, err = marshal(ctx, inp.Vertex(ctx, c), def, s, cache, vertexCache, c) if err != nil { return def, err } @@ -199,8 +208,8 @@ func marshal(ctx context.Context, v Vertex, def *Definition, s *sourceMapCollect return def, nil } -func (s State) Validate(ctx context.Context) error { - return s.Output().Vertex(ctx).Validate(ctx) +func (s State) Validate(ctx context.Context, c *Constraints) error { + return s.Output().Vertex(ctx, c).Validate(ctx, c) } func (s State) Output() Output { @@ -287,8 +296,12 @@ func (s State) Dirf(str string, v ...interface{}) State { return Dirf(str, v...)(s) } -func (s State) GetEnv(ctx context.Context, key string) (string, bool, error) { - env, err := getEnv(s)(ctx) +func (s State) GetEnv(ctx context.Context, key string, co ...ConstraintsOpt) (string, bool, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + env, err := getEnv(s)(ctx, c) if err != nil { return "", false, err } @@ -296,20 +309,32 @@ func (s State) GetEnv(ctx context.Context, key string) (string, bool, error) { return v, ok, nil } -func (s State) Env(ctx context.Context) ([]string, error) { - env, err := getEnv(s)(ctx) +func (s State) Env(ctx context.Context, co ...ConstraintsOpt) ([]string, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + env, err := getEnv(s)(ctx, c) if err != nil { return nil, err } return env.ToArray(), nil } -func (s State) GetDir(ctx context.Context) (string, error) { - return getDir(s)(ctx) +func (s State) GetDir(ctx context.Context, co ...ConstraintsOpt) (string, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + return getDir(s)(ctx, c) } -func (s State) GetArgs(ctx context.Context) ([]string, error) { - return getArgs(s)(ctx) +func (s State) GetArgs(ctx context.Context, co ...ConstraintsOpt) ([]string, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + return getArgs(s)(ctx, c) } func (s State) Reset(s2 State) State { @@ -324,31 +349,47 @@ func (s State) Hostname(v string) State { return Hostname(v)(s) } -func (s State) GetHostname(ctx context.Context) (string, error) { - return getHostname(s)(ctx) +func (s State) GetHostname(ctx context.Context, co ...ConstraintsOpt) (string, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + return getHostname(s)(ctx, c) } -func (s State) Platform(p specs.Platform) State { +func (s State) Platform(p ocispecs.Platform) State { return platform(p)(s) } -func (s State) GetPlatform(ctx context.Context) (*specs.Platform, error) { - return getPlatform(s)(ctx) +func (s State) GetPlatform(ctx context.Context, co ...ConstraintsOpt) (*ocispecs.Platform, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + return getPlatform(s)(ctx, c) } func (s State) Network(n pb.NetMode) State { return Network(n)(s) } -func (s State) GetNetwork(ctx context.Context) (pb.NetMode, error) { - return getNetwork(s)(ctx) +func (s State) GetNetwork(ctx context.Context, co ...ConstraintsOpt) (pb.NetMode, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + return getNetwork(s)(ctx, c) } func (s State) Security(n pb.SecurityMode) State { return Security(n)(s) } -func (s State) GetSecurity(ctx context.Context) (pb.SecurityMode, error) { - return getSecurity(s)(ctx) +func (s State) GetSecurity(ctx context.Context, co ...ConstraintsOpt) (pb.SecurityMode, error) { + c := &Constraints{} + for _, f := range co { + f.SetConstraintsOption(c) + } + return getSecurity(s)(ctx, c) } func (s State) With(so ...StateOption) State { @@ -362,13 +403,21 @@ func (s State) AddExtraHost(host string, ip net.IP) State { return extraHost(host, ip)(s) } +func (s State) AddUlimit(name UlimitName, soft int64, hard int64) State { + return ulimit(name, soft, hard)(s) +} + +func (s State) WithCgroupParent(cp string) State { + return cgroupParent(cp)(s) +} + func (s State) isFileOpCopyInput() {} type output struct { vertex Vertex getIndex func() (pb.OutputIndex, error) err error - platform *specs.Platform + platform *ocispecs.Platform } func (o *output) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error) { @@ -390,11 +439,11 @@ func (o *output) ToInput(ctx context.Context, c *Constraints) (*pb.Input, error) return &pb.Input{Digest: dgst, Index: index}, nil } -func (o *output) Vertex(context.Context) Vertex { +func (o *output) Vertex(context.Context, *Constraints) Vertex { return o.vertex } -func (o *output) Platform() *specs.Platform { +func (o *output) Platform() *ocispecs.Platform { return o.platform } @@ -456,6 +505,10 @@ func mergeMetadata(m1, m2 pb.OpMetadata) pb.OpMetadata { m1.Caps[k] = true } + if m2.ProgressGroup != nil { + m1.ProgressGroup = m2.ProgressGroup + } + return m1 } @@ -525,7 +578,7 @@ func (cw *constraintsWrapper) applyConstraints(f func(c *Constraints)) { } type Constraints struct { - Platform *specs.Platform + Platform *ocispecs.Platform WorkerConstraints []string Metadata pb.OpMetadata LocalUniqueID string @@ -533,7 +586,7 @@ type Constraints struct { SourceLocations []*SourceLocation } -func Platform(p specs.Platform) ConstraintsOpt { +func Platform(p ocispecs.Platform) ConstraintsOpt { return constraintsOptFunc(func(c *Constraints) { c.Platform = &p }) @@ -545,16 +598,22 @@ func LocalUniqueID(v string) ConstraintsOpt { }) } +func ProgressGroup(id, name string, weak bool) ConstraintsOpt { + return constraintsOptFunc(func(c *Constraints) { + c.Metadata.ProgressGroup = &pb.ProgressGroup{Id: id, Name: name, Weak: weak} + }) +} + var ( - LinuxAmd64 = Platform(specs.Platform{OS: "linux", Architecture: "amd64"}) - LinuxArmhf = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"}) + LinuxAmd64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "amd64"}) + LinuxArmhf = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v7"}) LinuxArm = LinuxArmhf - LinuxArmel = Platform(specs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"}) - LinuxArm64 = Platform(specs.Platform{OS: "linux", Architecture: "arm64"}) - LinuxS390x = Platform(specs.Platform{OS: "linux", Architecture: "s390x"}) - LinuxPpc64le = Platform(specs.Platform{OS: "linux", Architecture: "ppc64le"}) - Darwin = Platform(specs.Platform{OS: "darwin", Architecture: "amd64"}) - Windows = Platform(specs.Platform{OS: "windows", Architecture: "amd64"}) + LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"}) + LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"}) + LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"}) + LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"}) + Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"}) + Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"}) ) func Require(filters ...string) ConstraintsOpt { @@ -565,6 +624,6 @@ func Require(filters ...string) ConstraintsOpt { }) } -func nilValue(context.Context) (interface{}, error) { +func nilValue(context.Context, *Constraints) (interface{}, error) { return nil, nil } diff --git a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go index 7bc583443a..a9c100a95b 100644 --- a/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go +++ b/vendor/github.com/moby/buildkit/client/ociindex/ociindex.go @@ -6,7 +6,7 @@ import ( "os" "github.com/gofrs/flock" - v1 "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -17,9 +17,9 @@ const ( // PutDescToIndex puts desc to index with tag. // Existing manifests with the same tag will be removed from the index. -func PutDescToIndex(index *v1.Index, desc v1.Descriptor, tag string) error { +func PutDescToIndex(index *ocispecs.Index, desc ocispecs.Descriptor, tag string) error { if index == nil { - index = &v1.Index{} + index = &ocispecs.Index{} } if index.SchemaVersion == 0 { index.SchemaVersion = 2 @@ -28,11 +28,11 @@ func PutDescToIndex(index *v1.Index, desc v1.Descriptor, tag string) error { if desc.Annotations == nil { desc.Annotations = make(map[string]string) } - desc.Annotations[v1.AnnotationRefName] = tag + desc.Annotations[ocispecs.AnnotationRefName] = tag // remove existing manifests with the same tag - var manifests []v1.Descriptor + var manifests []ocispecs.Descriptor for _, m := range index.Manifests { - if m.Annotations[v1.AnnotationRefName] != tag { + if m.Annotations[ocispecs.AnnotationRefName] != tag { manifests = append(manifests, m) } } @@ -42,7 +42,7 @@ func PutDescToIndex(index *v1.Index, desc v1.Descriptor, tag string) error { return nil } -func PutDescToIndexJSONFileLocked(indexJSONPath string, desc v1.Descriptor, tag string) error { +func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor, tag string) error { lockPath := indexJSONPath + IndexJSONLockFileSuffix lock := flock.New(lockPath) locked, err := lock.TryLock() @@ -61,7 +61,7 @@ func PutDescToIndexJSONFileLocked(indexJSONPath string, desc v1.Descriptor, tag return errors.Wrapf(err, "could not open %s", indexJSONPath) } defer f.Close() - var idx v1.Index + var idx ocispecs.Index b, err := ioutil.ReadAll(f) if err != nil { return errors.Wrapf(err, "could not read %s", indexJSONPath) @@ -87,7 +87,7 @@ func PutDescToIndexJSONFileLocked(indexJSONPath string, desc v1.Descriptor, tag return nil } -func ReadIndexJSONFileLocked(indexJSONPath string) (*v1.Index, error) { +func ReadIndexJSONFileLocked(indexJSONPath string) (*ocispecs.Index, error) { lockPath := indexJSONPath + IndexJSONLockFileSuffix lock := flock.New(lockPath) locked, err := lock.TryRLock() @@ -105,7 +105,7 @@ func ReadIndexJSONFileLocked(indexJSONPath string) (*v1.Index, error) { if err != nil { return nil, errors.Wrapf(err, "could not read %s", indexJSONPath) } - var idx v1.Index + var idx ocispecs.Index if err := json.Unmarshal(b, &idx); err != nil { return nil, errors.Wrapf(err, "could not unmarshal %s (%q)", indexJSONPath, string(b)) } diff --git a/vendor/github.com/moby/buildkit/client/prune.go b/vendor/github.com/moby/buildkit/client/prune.go index 27fe5dd8cd..ed4815cb5a 100644 --- a/vendor/github.com/moby/buildkit/client/prune.go +++ b/vendor/github.com/moby/buildkit/client/prune.go @@ -42,7 +42,7 @@ func (c *Client) Prune(ctx context.Context, ch chan UsageInfo, opts ...PruneOpti Mutable: d.Mutable, InUse: d.InUse, Size: d.Size_, - Parent: d.Parent, + Parents: d.Parents, CreatedAt: d.CreatedAt, Description: d.Description, UsageCount: int(d.UsageCount), @@ -59,10 +59,10 @@ type PruneOption interface { } type PruneInfo struct { - Filter []string - All bool - KeepDuration time.Duration - KeepBytes int64 + Filter []string `json:"filter"` + All bool `json:"all"` + KeepDuration time.Duration `json:"keepDuration"` + KeepBytes int64 `json:"keepBytes"` } type pruneOptionFunc func(*PruneInfo) diff --git a/vendor/github.com/moby/buildkit/client/solve.go b/vendor/github.com/moby/buildkit/client/solve.go index 1d77bc4588..f14d9c410d 100644 --- a/vendor/github.com/moby/buildkit/client/solve.go +++ b/vendor/github.com/moby/buildkit/client/solve.go @@ -20,12 +20,12 @@ import ( "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/session/grpchijack" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/entitlements" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" - opentracing "github.com/opentracing/opentracing-go" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" fstypes "github.com/tonistiigi/fsutil/types" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/errgroup" ) @@ -75,7 +75,7 @@ func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, s return c.solve(ctx, def, nil, opt, statusChan) } -type runGatewayCB func(ref string, s *session.Session) error +type runGatewayCB func(ref string, s *session.Session, opts map[string]string) error func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) { if def != nil && runGateway != nil { @@ -93,8 +93,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG statusContext, cancelStatus := context.WithCancel(context.Background()) defer cancelStatus() - if span := opentracing.SpanFromContext(ctx); span != nil { - statusContext = opentracing.ContextWithSpan(statusContext, span) + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { + statusContext = trace.ContextWithSpan(statusContext, span) } s := opt.SharedSession @@ -109,7 +109,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } } - cacheOpt, err := parseCacheOptions(opt) + cacheOpt, err := parseCacheOptions(ctx, runGateway != nil, opt) if err != nil { return nil, err } @@ -162,11 +162,18 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG } eg.Go(func() error { - return s.Run(statusContext, grpchijack.Dialer(c.controlClient())) + sd := c.sessionDialer + if sd == nil { + sd = grpchijack.Dialer(c.controlClient()) + } + return s.Run(statusContext, sd) }) } for k, v := range cacheOpt.frontendAttrs { + if opt.FrontendAttrs == nil { + opt.FrontendAttrs = map[string]string{} + } opt.FrontendAttrs[k] = v } @@ -181,7 +188,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG <-time.After(3 * time.Second) cancelStatus() }() - logrus.Debugf("stopping session") + bklog.G(ctx).Debugf("stopping session") s.Close() }() var pbd *pb.Definition @@ -221,7 +228,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG if runGateway != nil { eg.Go(func() error { - err := runGateway(ref, s) + err := runGateway(ref, s, opt.FrontendAttrs) if err == nil { return nil } @@ -259,13 +266,14 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG s := SolveStatus{} for _, v := range resp.Vertexes { s.Vertexes = append(s.Vertexes, &Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + ProgressGroup: v.ProgressGroup, }) } for _, v := range resp.Statuses { @@ -288,6 +296,17 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG Timestamp: v.Timestamp, }) } + for _, v := range resp.Warnings { + s.Warnings = append(s.Warnings, &VertexWarning{ + Vertex: v.Vertex, + Level: int(v.Level), + Short: v.Short, + Detail: v.Detail, + URL: v.Url, + SourceInfo: v.Info, + Range: v.Ranges, + }) + } if statusChan != nil { statusChan <- &s } @@ -300,7 +319,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG // Update index.json of exported cache content store // FIXME(AkihiroSuda): dedupe const definition of cache/remotecache.ExporterResponseManifestDesc = "cache.manifest" if manifestDescJSON := res.ExporterResponse["cache.manifest"]; manifestDescJSON != "" { - var manifestDesc ocispec.Descriptor + var manifestDesc ocispecs.Descriptor if err = json.Unmarshal([]byte(manifestDescJSON), &manifestDesc); err != nil { return nil, err } @@ -341,13 +360,13 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file return nil, errors.Wrap(err, "failed to parse llb proto op") } if src := op.GetSource(); src != nil { - if strings.HasPrefix(src.Identifier, "local://") { // TODO: just make a type property + if strings.HasPrefix(src.Identifier, "local://") { name := strings.TrimPrefix(src.Identifier, "local://") d, ok := localDirs[name] if !ok { return nil, errors.Errorf("local directory %s not enabled", name) } - dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) // TODO: excludes + dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID}) } } } @@ -370,7 +389,7 @@ type cacheOptions struct { frontendAttrs map[string]string } -func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) { +func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cacheOptions, error) { var ( cacheExports []*controlapi.CacheOptionsEntry cacheImports []*controlapi.CacheOptionsEntry @@ -423,18 +442,18 @@ func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) { } cs, err := contentlocal.NewStore(csDir) if err != nil { - logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) + bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) continue } // if digest is not specified, load from "latest" tag if attrs["digest"] == "" { idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json")) if err != nil { - logrus.Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) + bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error()) continue } for _, m := range idx.Manifests { - if (m.Annotations[ocispec.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispec.AnnotationRefName] == attrs["tag"]) { + if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) { attrs["digest"] = string(m.Digest) break } @@ -444,7 +463,6 @@ func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) { } } contentStores["local:"+csDir] = cs - } if im.Type == "registry" { legacyImportRef := attrs["ref"] @@ -456,7 +474,7 @@ func parseCacheOptions(opt SolveOpt) (*cacheOptions, error) { }) } } - if opt.Frontend != "" { + if opt.Frontend != "" || isGateway { // use legacy API for registry importers, because the frontend might not support the new API if len(legacyImportRefs) > 0 { frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",") diff --git a/vendor/github.com/moby/buildkit/client/workers.go b/vendor/github.com/moby/buildkit/client/workers.go index b011ee2efd..e5331cd608 100644 --- a/vendor/github.com/moby/buildkit/client/workers.go +++ b/vendor/github.com/moby/buildkit/client/workers.go @@ -7,16 +7,16 @@ import ( controlapi "github.com/moby/buildkit/api/services/control" apitypes "github.com/moby/buildkit/api/types" "github.com/moby/buildkit/solver/pb" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) // WorkerInfo contains information about a worker type WorkerInfo struct { - ID string - Labels map[string]string - Platforms []specs.Platform - GCPolicy []PruneInfo + ID string `json:"id"` + Labels map[string]string `json:"labels"` + Platforms []ocispecs.Platform `json:"platforms"` + GCPolicy []PruneInfo `json:"gcPolicy"` } // ListWorkers lists all active workers diff --git a/vendor/github.com/moby/buildkit/control/control.go b/vendor/github.com/moby/buildkit/control/control.go index 048e696683..0d3e7976e5 100644 --- a/vendor/github.com/moby/buildkit/control/control.go +++ b/vendor/github.com/moby/buildkit/control/control.go @@ -6,6 +6,8 @@ import ( "sync/atomic" "time" + "github.com/moby/buildkit/util/bklog" + controlapi "github.com/moby/buildkit/api/services/control" apitypes "github.com/moby/buildkit/api/types" "github.com/moby/buildkit/cache/remotecache" @@ -20,11 +22,15 @@ import ( "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/imageutil" "github.com/moby/buildkit/util/throttle" + "github.com/moby/buildkit/util/tracing/transform" "github.com/moby/buildkit/worker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" + sdktrace "go.opentelemetry.io/otel/sdk/trace" + tracev1 "go.opentelemetry.io/proto/otlp/collector/trace/v1" "golang.org/x/sync/errgroup" "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/status" ) type Opt struct { @@ -35,9 +41,11 @@ type Opt struct { ResolveCacheExporterFuncs map[string]remotecache.ResolveCacheExporterFunc ResolveCacheImporterFuncs map[string]remotecache.ResolveCacheImporterFunc Entitlements []string + TraceCollector sdktrace.SpanExporter } type Controller struct { // TODO: ControlService + // buildCount needs to be 64bit aligned buildCount int64 opt Opt solver *llbsolver.Solver @@ -45,10 +53,11 @@ type Controller struct { // TODO: ControlService gatewayForwarder *controlgateway.GatewayForwarder throttledGC func() gcmu sync.Mutex + *tracev1.UnimplementedTraceServiceServer } func NewController(opt Opt) (*Controller, error) { - cache := solver.NewCacheManager("local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) + cache := solver.NewCacheManager(context.TODO(), "local", opt.CacheKeyStorage, worker.NewCacheResultStorage(opt.WorkerController)) gatewayForwarder := controlgateway.NewGatewayForwarder() @@ -75,6 +84,7 @@ func NewController(opt Opt) (*Controller, error) { func (c *Controller) Register(server *grpc.Server) error { controlapi.RegisterControlServer(server, c) c.gatewayForwarder.Register(server) + tracev1.RegisterTraceServiceServer(server, c) return nil } @@ -99,7 +109,7 @@ func (c *Controller) DiskUsage(ctx context.Context, r *controlapi.DiskUsageReque Mutable: r.Mutable, InUse: r.InUse, Size_: r.Size, - Parent: r.Parent, + Parents: r.Parents, UsageCount: int64(r.UsageCount), Description: r.Description, CreatedAt: r.CreatedAt, @@ -132,7 +142,7 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr ReleaseUnreferenced() error }); ok { if err := c.ReleaseUnreferenced(); err != nil { - logrus.Errorf("failed to release cache metadata: %+v", err) + bklog.G(ctx).Errorf("failed to release cache metadata: %+v", err) } } } @@ -167,7 +177,7 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr Mutable: r.Mutable, InUse: r.InUse, Size_: r.Size, - Parent: r.Parent, + Parents: r.Parents, UsageCount: int64(r.UsageCount), Description: r.Description, CreatedAt: r.CreatedAt, @@ -184,6 +194,17 @@ func (c *Controller) Prune(req *controlapi.PruneRequest, stream controlapi.Contr return eg2.Wait() } +func (c *Controller) Export(ctx context.Context, req *tracev1.ExportTraceServiceRequest) (*tracev1.ExportTraceServiceResponse, error) { + if c.opt.TraceCollector == nil { + return nil, status.Errorf(codes.Unavailable, "trace collector not configured") + } + err := c.opt.TraceCollector.ExportSpans(ctx, transform.Spans(req.GetResourceSpans())) + if err != nil { + return nil, err + } + return &tracev1.ExportTraceServiceResponse{}, nil +} + func translateLegacySolveRequest(req *controlapi.SolveRequest) error { // translates ExportRef and ExportAttrs to new Exports (v0.4.0) if legacyExportRef := req.Cache.ExportRefDeprecated; legacyExportRef != "" { @@ -217,6 +238,8 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* atomic.AddInt64(&c.buildCount, 1) defer atomic.AddInt64(&c.buildCount, -1) + // This method registers job ID in solver.Solve. Make sure there are no blocking calls before that might delay this. + if err := translateLegacySolveRequest(req); err != nil { return nil, err } @@ -263,7 +286,11 @@ func (c *Controller) Solve(ctx context.Context, req *controlapi.SolveRequest) (* if err != nil { return nil, err } - cacheExportMode = parseCacheExportMode(e.Attrs["mode"]) + if exportMode, supported := parseCacheExportMode(e.Attrs["mode"]); !supported { + bklog.G(ctx).Debugf("skipping invalid cache export mode: %s", e.Attrs["mode"]) + } else { + cacheExportMode = exportMode + } } for _, im := range req.Cache.Imports { cacheImports = append(cacheImports, frontend.CacheOptionsEntry{ @@ -306,18 +333,19 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con return nil } logSize := 0 - retry := false for { + retry := false sr := controlapi.StatusResponse{} for _, v := range ss.Vertexes { sr.Vertexes = append(sr.Vertexes, &controlapi.Vertex{ - Digest: v.Digest, - Inputs: v.Inputs, - Name: v.Name, - Started: v.Started, - Completed: v.Completed, - Error: v.Error, - Cached: v.Cached, + Digest: v.Digest, + Inputs: v.Inputs, + Name: v.Name, + Started: v.Started, + Completed: v.Completed, + Error: v.Error, + Cached: v.Cached, + ProgressGroup: v.ProgressGroup, }) } for _, v := range ss.Statuses { @@ -339,7 +367,7 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con Msg: v.Data, Timestamp: v.Timestamp, }) - logSize += len(v.Data) + logSize += len(v.Data) + emptyLogVertexSize // avoid logs growing big and split apart if they do if logSize > 1024*1024 { ss.Vertexes = nil @@ -349,6 +377,17 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con break } } + for _, v := range ss.Warnings { + sr.Warnings = append(sr.Warnings, &controlapi.VertexWarning{ + Vertex: v.Vertex, + Level: int64(v.Level), + Short: v.Short, + Detail: v.Detail, + Info: v.SourceInfo, + Ranges: v.Range, + Url: v.URL, + }) + } if err := stream.SendMsg(&sr); err != nil { return err } @@ -363,7 +402,8 @@ func (c *Controller) Status(req *controlapi.StatusRequest, stream controlapi.Con } func (c *Controller) Session(stream controlapi.Control_SessionServer) error { - logrus.Debugf("session started") + bklog.G(stream.Context()).Debugf("session started") + conn, closeCh, opts := grpchijack.Hijack(stream) defer conn.Close() @@ -374,7 +414,7 @@ func (c *Controller) Session(stream controlapi.Control_SessionServer) error { }() err := c.opt.SessionManager.HandleConn(ctx, conn, opts) - logrus.Debugf("session finished: %v", err) + bklog.G(ctx).Debugf("session finished: %v", err) return err } @@ -430,25 +470,22 @@ func (c *Controller) gc() { err = eg.Wait() close(ch) if err != nil { - logrus.Errorf("gc error: %+v", err) + bklog.G(ctx).Errorf("gc error: %+v", err) } <-done if size > 0 { - logrus.Debugf("gc cleaned up %d bytes", size) + bklog.G(ctx).Debugf("gc cleaned up %d bytes", size) } } -func parseCacheExportMode(mode string) solver.CacheExportMode { +func parseCacheExportMode(mode string) (solver.CacheExportMode, bool) { switch mode { case "min": - return solver.CacheExportModeMin + return solver.CacheExportModeMin, true case "max": - return solver.CacheExportModeMax - case "": - default: - logrus.Debugf("skipping invalid cache export mode: %s", mode) + return solver.CacheExportModeMax, true } - return solver.CacheExportModeMin + return solver.CacheExportModeMin, false } func toPBGCPolicy(in []client.PruneInfo) []*apitypes.GCPolicy { diff --git a/vendor/github.com/moby/buildkit/control/gateway/gateway.go b/vendor/github.com/moby/buildkit/control/gateway/gateway.go index 9b91a08e0f..62c696d6c4 100644 --- a/vendor/github.com/moby/buildkit/control/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/control/gateway/gateway.go @@ -176,3 +176,11 @@ func (gwf *GatewayForwarder) ExecProcess(srv gwapi.LLBBridge_ExecProcessServer) } return fwd.ExecProcess(srv) } + +func (gwf *GatewayForwarder) Warn(ctx context.Context, req *gwapi.WarnRequest) (*gwapi.WarnResponse, error) { + fwd, err := gwf.lookupForwarder(ctx) + if err != nil { + return nil, errors.Wrap(err, "forwarding Warn") + } + return fwd.Warn(ctx, req) +} diff --git a/vendor/github.com/moby/buildkit/control/init.go b/vendor/github.com/moby/buildkit/control/init.go new file mode 100644 index 0000000000..2e86133e41 --- /dev/null +++ b/vendor/github.com/moby/buildkit/control/init.go @@ -0,0 +1,10 @@ +package control + +import controlapi "github.com/moby/buildkit/api/services/control" + +var emptyLogVertexSize int + +func init() { + emptyLogVertex := controlapi.VertexLog{} + emptyLogVertexSize = emptyLogVertex.Size() +} diff --git a/vendor/github.com/moby/buildkit/executor/executor.go b/vendor/github.com/moby/buildkit/executor/executor.go index 8fbe4a9234..4727af4b03 100644 --- a/vendor/github.com/moby/buildkit/executor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/executor.go @@ -4,6 +4,7 @@ import ( "context" "io" "net" + "syscall" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/pb" @@ -18,6 +19,8 @@ type Meta struct { Tty bool ReadonlyRootFS bool ExtraHosts []HostIP + Ulimit []*pb.Ulimit + CgroupParent string NetMode pb.NetMode SecurityMode pb.SecurityMode } @@ -43,6 +46,7 @@ type ProcessInfo struct { Stdin io.ReadCloser Stdout, Stderr io.WriteCloser Resize <-chan WinSize + Signal <-chan syscall.Signal } type Executor interface { diff --git a/vendor/github.com/moby/buildkit/executor/oci/mounts.go b/vendor/github.com/moby/buildkit/executor/oci/mounts.go index 62360f4663..50ed827497 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/mounts.go +++ b/vendor/github.com/moby/buildkit/executor/oci/mounts.go @@ -46,7 +46,6 @@ func withCGroup() oci.SpecOpts { }) return nil } - } func hasPrefix(p, prefixDir string) bool { @@ -100,3 +99,17 @@ func withBoundProc() oci.SpecOpts { return nil } } + +func dedupMounts(mnts []specs.Mount) []specs.Mount { + ret := make([]specs.Mount, 0, len(mnts)) + visited := make(map[string]int) + for _, mnt := range mnts { + if j, ok := visited[mnt.Destination]; ok { + ret[j] = mnt + } else { + visited[mnt.Destination] = len(ret) + ret = append(ret, mnt) + } + } + return ret +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec.go b/vendor/github.com/moby/buildkit/executor/oci/spec.go index 8000310813..ea8741995a 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec.go @@ -3,6 +3,8 @@ package oci import ( "context" "path" + "path/filepath" + "strings" "sync" "github.com/containerd/containerd/containers" @@ -11,10 +13,11 @@ import ( "github.com/containerd/containerd/oci" "github.com/containerd/continuity/fs" "github.com/docker/docker/pkg/idtools" - "github.com/mitchellh/hashstructure" + "github.com/mitchellh/hashstructure/v2" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/util/network" + traceexec "github.com/moby/buildkit/util/tracing/exec" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux" "github.com/pkg/errors" @@ -32,15 +35,40 @@ const ( NoProcessSandbox ) +func (pm ProcessMode) String() string { + switch pm { + case ProcessSandbox: + return "sandbox" + case NoProcessSandbox: + return "no-sandbox" + default: + return "" + } +} + // Ideally we don't have to import whole containerd just for the default spec // GenerateSpec generates spec using containerd functionality. // opts are ignored for s.Process, s.Hostname, and s.Mounts . -func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { +func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mount, id, resolvConf, hostsFile string, namespace network.Namespace, cgroupParent string, processMode ProcessMode, idmap *idtools.IdentityMapping, apparmorProfile string, tracingSocket string, opts ...oci.SpecOpts) (*specs.Spec, func(), error) { c := &containers.Container{ ID: id, } + if len(meta.CgroupParent) > 0 { + cgroupParent = meta.CgroupParent + } + if cgroupParent != "" { + var cgroupsPath string + lastSeparator := cgroupParent[len(cgroupParent)-1:] + if strings.Contains(cgroupParent, ".slice") && lastSeparator == ":" { + cgroupsPath = cgroupParent + id + } else { + cgroupsPath = filepath.Join("/", cgroupParent, "buildkit", id) + } + opts = append(opts, oci.WithCgroup(cgroupsPath)) + } + // containerd/oci.GenerateSpec requires a namespace, which // will be used to namespace specs.Linux.CgroupsPath if generated if _, ok := namespaces.Namespace(ctx); !ok { @@ -71,11 +99,23 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou return nil, nil, err } + if rlimitsOpts, err := generateRlimitOpts(meta.Ulimit); err == nil { + opts = append(opts, rlimitsOpts...) + } else { + return nil, nil, err + } + hostname := defaultHostname if meta.Hostname != "" { hostname = meta.Hostname } + if tracingSocket != "" { + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/protocol/exporter.md + meta.Env = append(meta.Env, "OTEL_TRACES_EXPORTER=otlp", "OTEL_EXPORTER_OTLP_TRACES_ENDPOINT=unix:///dev/otel-grpc.sock", "OTEL_EXPORTER_OTLP_TRACES_PROTOCOL=grpc") + meta.Env = append(meta.Env, traceexec.Environ(ctx)...) + } + opts = append(opts, oci.WithProcessArgs(meta.Args...), oci.WithEnv(meta.Env), @@ -89,13 +129,16 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou return nil, nil, err } + if len(meta.Ulimit) == 0 { + // reset open files limit + s.Process.Rlimits = nil + } + // set the networking information on the spec if err := namespace.Set(s); err != nil { return nil, nil, err } - s.Process.Rlimits = nil // reset open files limit - sm := &submounts{} var releasers []func() error @@ -139,6 +182,16 @@ func GenerateSpec(ctx context.Context, meta executor.Meta, mounts []executor.Mou } } + if tracingSocket != "" { + s.Mounts = append(s.Mounts, specs.Mount{ + Destination: "/dev/otel-grpc.sock", + Type: "bind", + Source: tracingSocket, + Options: []string{"ro", "rbind"}, + }) + } + + s.Mounts = dedupMounts(s.Mounts) return s, releaseAll, nil } @@ -158,7 +211,7 @@ func (s *submounts) subMount(m mount.Mount, subPath string) (mount.Mount, error) if s.m == nil { s.m = map[uint64]mountRef{} } - h, err := hashstructure.Hash(m, nil) + h, err := hashstructure.Hash(m, hashstructure.FormatV2, nil) if err != nil { return mount.Mount{}, nil } diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go index 65f2ca6bf9..5f4908ca6b 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_unix.go @@ -1,17 +1,20 @@ +//go:build !windows // +build !windows package oci import ( "context" + "fmt" + "strings" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" + cdseccomp "github.com/containerd/containerd/pkg/seccomp" "github.com/docker/docker/pkg/idtools" "github.com/docker/docker/profiles/seccomp" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/entitlements/security" - "github.com/moby/buildkit/util/system" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/opencontainers/selinux/go-selinux/label" ) @@ -41,7 +44,7 @@ func generateSecurityOpts(mode pb.SecurityMode, apparmorProfile string) (opts [] }, }, nil case pb.SecurityMode_SANDBOX: - if system.SeccompSupported() { + if cdseccomp.IsEnabled() { opts = append(opts, withDefaultProfile()) } if apparmorProfile != "" { @@ -78,6 +81,29 @@ func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) { }, nil } +func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) { + if len(ulimits) == 0 { + return nil, nil + } + var rlimits []specs.POSIXRlimit + for _, u := range ulimits { + if u == nil { + continue + } + rlimits = append(rlimits, specs.POSIXRlimit{ + Type: fmt.Sprintf("RLIMIT_%s", strings.ToUpper(u.Name)), + Hard: uint64(u.Hard), + Soft: uint64(u.Soft), + }) + } + return []oci.SpecOpts{ + func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { + s.Process.Rlimits = rlimits + return nil + }, + }, nil +} + // withDefaultProfile sets the default seccomp profile to the spec. // Note: must follow the setting of process capabilities func withDefaultProfile() oci.SpecOpts { diff --git a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go index ea3afe86a4..bc1a6261e2 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go +++ b/vendor/github.com/moby/buildkit/executor/oci/spec_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package oci @@ -35,3 +36,10 @@ func generateIDmapOpts(idmap *idtools.IdentityMapping) ([]oci.SpecOpts, error) { } return nil, errors.New("no support for IdentityMapping on Windows") } + +func generateRlimitOpts(ulimits []*pb.Ulimit) ([]oci.SpecOpts, error) { + if len(ulimits) == 0 { + return nil, nil + } + return nil, errors.New("no support for POSIXRlimit on Windows") +} diff --git a/vendor/github.com/moby/buildkit/executor/oci/user.go b/vendor/github.com/moby/buildkit/executor/oci/user.go index e49eb8e4d3..eb459f391f 100644 --- a/vendor/github.com/moby/buildkit/executor/oci/user.go +++ b/vendor/github.com/moby/buildkit/executor/oci/user.go @@ -15,6 +15,12 @@ import ( ) func GetUser(root, username string) (uint32, uint32, []uint32, error) { + var isDefault bool + if username == "" { + username = "0" + isDefault = true + } + // fast path from uid/gid if uid, gid, err := ParseUIDGID(username); err == nil { return uid, gid, nil, nil @@ -31,6 +37,9 @@ func GetUser(root, username string) (uint32, uint32, []uint32, error) { execUser, err := user.GetExecUser(username, nil, passwdFile, groupFile) if err != nil { + if isDefault { + return 0, 0, nil, nil + } return 0, 0, nil, err } var sgids []uint32 diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go index 799063250c..702d513102 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor.go @@ -7,11 +7,14 @@ import ( "os" "os/exec" "path/filepath" - "strings" "sync" "syscall" "time" + "github.com/moby/buildkit/util/bklog" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" + "github.com/containerd/containerd/mount" containerdoci "github.com/containerd/containerd/oci" "github.com/containerd/continuity/fs" @@ -19,7 +22,7 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/executor/oci" - "github.com/moby/buildkit/frontend/gateway/errdefs" + gatewayapi "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/network" @@ -27,7 +30,6 @@ import ( "github.com/moby/buildkit/util/stack" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type Opt struct { @@ -46,6 +48,7 @@ type Opt struct { DNS *oci.DNSConfig OOMScoreAdj *int ApparmorProfile string + TracingSocket string } var defaultCommandCandidates = []string{"buildkit-runc", "runc"} @@ -64,6 +67,7 @@ type runcExecutor struct { running map[string]chan error mu sync.Mutex apparmorProfile string + tracingSocket string } func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Executor, error) { @@ -127,6 +131,7 @@ func New(opt Opt, networkProviders map[pb.NetMode]network.Provider) (executor.Ex oomScoreAdj: opt.OOMScoreAdj, running: make(map[string]chan error), apparmorProfile: opt.ApparmorProfile, + tracingSocket: opt.TracingSocket, } return w, nil } @@ -163,7 +168,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, defer namespace.Close() if meta.NetMode == pb.NetMode_HOST { - logrus.Info("enabling HostNetworking") + bklog.G(ctx).Info("enabling HostNetworking") } resolvConf, err := oci.GetResolvConf(ctx, w.root, w.idmap, w.dns) @@ -246,17 +251,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } } - if w.cgroupParent != "" { - var cgroupsPath string - lastSeparator := w.cgroupParent[len(w.cgroupParent)-1:] - if strings.Contains(w.cgroupParent, ".slice") && lastSeparator == ":" { - cgroupsPath = w.cgroupParent + id - } else { - cgroupsPath = filepath.Join("/", w.cgroupParent, "buildkit", id) - } - opts = append(opts, containerdoci.WithCgroup(cgroupsPath)) - } - spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.processMode, w.idmap, w.apparmorProfile, opts...) + spec, cleanup, err := oci.GenerateSpec(ctx, meta, mounts, id, resolvConf, hostsFile, namespace, w.cgroupParent, w.processMode, w.idmap, w.apparmorProfile, w.tracingSocket, opts...) if err != nil { return err } @@ -300,7 +295,7 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, case <-ctx.Done(): killCtx, timeout := context.WithTimeout(context.Background(), 7*time.Second) if err := w.runc.Kill(killCtx, id, int(syscall.SIGKILL), nil); err != nil { - logrus.Errorf("failed to kill runc %s: %+v", id, err) + bklog.G(ctx).Errorf("failed to kill runc %s: %+v", id, err) select { case <-killCtx.Done(): timeout() @@ -321,31 +316,39 @@ func (w *runcExecutor) Run(ctx context.Context, id string, root executor.Mount, } }() - logrus.Debugf("> creating %s %v", id, meta.Args) - // this is a cheat, we have not actually started, but as close as we can get with runc for now - if started != nil { - startedOnce.Do(func() { - close(started) - }) - } + bklog.G(ctx).Debugf("> creating %s %v", id, meta.Args) - err = w.run(runCtx, id, bundle, process) + trace.SpanFromContext(ctx).AddEvent("Container created") + err = w.run(runCtx, id, bundle, process, func() { + startedOnce.Do(func() { + trace.SpanFromContext(ctx).AddEvent("Container started") + if started != nil { + close(started) + } + }) + }) close(ended) return exitError(ctx, err) } func exitError(ctx context.Context, err error) error { if err != nil { - exitErr := &errdefs.ExitError{ - ExitCode: errdefs.UnknownExitStatus, + exitErr := &gatewayapi.ExitError{ + ExitCode: gatewayapi.UnknownExitStatus, Err: err, } var runcExitError *runc.ExitError if errors.As(err, &runcExitError) { - exitErr = &errdefs.ExitError{ + exitErr = &gatewayapi.ExitError{ ExitCode: uint32(runcExitError.Status), } } + trace.SpanFromContext(ctx).AddEvent( + "Container exited", + trace.WithAttributes( + attribute.Int("exit.code", int(exitErr.ExitCode)), + ), + ) select { case <-ctx.Done(): exitErr.Err = errors.Wrapf(ctx.Err(), exitErr.Error()) @@ -355,6 +358,10 @@ func exitError(ctx context.Context, err error) error { } } + trace.SpanFromContext(ctx).AddEvent( + "Container exited", + trace.WithAttributes(attribute.Int("exit.code", 0)), + ) return nil } @@ -421,7 +428,7 @@ func (w *runcExecutor) Exec(ctx context.Context, id string, process executor.Pro spec.Process.Env = process.Meta.Env } - err = w.exec(ctx, id, state.Bundle, spec.Process, process) + err = w.exec(ctx, id, state.Bundle, spec.Process, process, nil) return exitError(ctx, err) } @@ -451,3 +458,78 @@ func (s *forwardIO) Stdout() io.ReadCloser { func (s *forwardIO) Stderr() io.ReadCloser { return nil } + +// startingProcess is to track the os process so we can send signals to it. +type startingProcess struct { + Process *os.Process + ready chan struct{} +} + +// Release will free resources with a startingProcess. +func (p *startingProcess) Release() { + if p.Process != nil { + p.Process.Release() + } +} + +// WaitForReady will wait until the Process has been populated or the +// provided context was cancelled. This should be called before using +// the Process field. +func (p *startingProcess) WaitForReady(ctx context.Context) error { + select { + case <-ctx.Done(): + return ctx.Err() + case <-p.ready: + return nil + } +} + +// WaitForStart will record the pid reported by Runc via the channel. +// We wait for up to 10s for the runc process to start. If the started +// callback is non-nil it will be called after receiving the pid. +func (p *startingProcess) WaitForStart(ctx context.Context, startedCh <-chan int, started func()) error { + startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second) + defer timeout() + var err error + select { + case <-startedCtx.Done(): + return errors.New("runc started message never received") + case pid, ok := <-startedCh: + if !ok { + return errors.New("runc process failed to send pid") + } + if started != nil { + started() + } + p.Process, err = os.FindProcess(pid) + if err != nil { + return errors.Wrapf(err, "unable to find runc process for pid %d", pid) + } + close(p.ready) + } + return nil +} + +// handleSignals will wait until the runcProcess is ready then will +// send each signal received on the channel to the process. +func handleSignals(ctx context.Context, runcProcess *startingProcess, signals <-chan syscall.Signal) error { + if signals == nil { + return nil + } + err := runcProcess.WaitForReady(ctx) + if err != nil { + return err + } + for { + select { + case <-ctx.Done(): + return nil + case sig := <-signals: + err := runcProcess.Process.Signal(sig) + if err != nil { + bklog.G(ctx).Errorf("failed to signal %s to process: %s", sig, err) + return err + } + } + } +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go index 3751b9009c..447c4a96b9 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_common.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package runcexecutor @@ -9,28 +10,63 @@ import ( "github.com/moby/buildkit/executor" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" + "golang.org/x/sync/errgroup" ) var unsupportedConsoleError = errors.New("tty for runc is only supported on linux") func updateRuncFieldsForHostOS(runtime *runc.Runc) {} -func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo) error { +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error { if process.Meta.Tty { return unsupportedConsoleError } - _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ - IO: &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, - NoPivot: w.noPivot, + return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ + NoPivot: w.noPivot, + Started: started, + IO: io, + }) + return err }) - return err } -func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo) error { +func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error { if process.Meta.Tty { return unsupportedConsoleError } - return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ - IO: &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}, + return w.commonCall(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { + return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ + Started: started, + IO: io, + }) }) } + +type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error + +// commonCall is the common run/exec logic used for non-linux runtimes. A tty +// is only supported for linux, so this really just handles signal propagation +// to the started runc process. +func (w *runcExecutor) commonCall(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error { + runcProcess := &startingProcess{ + ready: make(chan struct{}), + } + defer runcProcess.Release() + + var eg errgroup.Group + egCtx, cancel := context.WithCancel(ctx) + defer eg.Wait() + defer cancel() + + startedCh := make(chan int, 1) + eg.Go(func() error { + return runcProcess.WaitForStart(egCtx, startedCh, started) + }) + + eg.Go(func() error { + return handleSignals(egCtx, runcProcess, process.Signal) + }) + + return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) +} diff --git a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go index b01040ce9f..15ea812a5a 100644 --- a/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go +++ b/vendor/github.com/moby/buildkit/executor/runcexecutor/executor_linux.go @@ -5,15 +5,14 @@ import ( "io" "os" "syscall" - "time" "github.com/containerd/console" runc "github.com/containerd/go-runc" - "github.com/docker/docker/pkg/signal" "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/sys/signal" "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) @@ -22,8 +21,8 @@ func updateRuncFieldsForHostOS(runtime *runc.Runc) { runtime.PdeathSignal = syscall.SIGKILL // this can still leak the process } -func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo) error { - return w.callWithIO(ctx, id, bundle, process, func(ctx context.Context, started chan<- int, io runc.IO) error { +func (w *runcExecutor) run(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func()) error { + return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { _, err := w.runc.Run(ctx, id, bundle, &runc.CreateOpts{ NoPivot: w.noPivot, Started: started, @@ -33,8 +32,8 @@ func (w *runcExecutor) run(ctx context.Context, id, bundle string, process execu }) } -func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo) error { - return w.callWithIO(ctx, id, bundle, process, func(ctx context.Context, started chan<- int, io runc.IO) error { +func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess *specs.Process, process executor.ProcessInfo, started func()) error { + return w.callWithIO(ctx, id, bundle, process, started, func(ctx context.Context, started chan<- int, io runc.IO) error { return w.runc.Exec(ctx, id, *specsProcess, &runc.ExecOpts{ Started: started, IO: io, @@ -44,12 +43,28 @@ func (w *runcExecutor) exec(ctx context.Context, id, bundle string, specsProcess type runcCall func(ctx context.Context, started chan<- int, io runc.IO) error -func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, call runcCall) error { - ctx, cancel := context.WithCancel(ctx) +func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, process executor.ProcessInfo, started func(), call runcCall) error { + runcProcess := &startingProcess{ + ready: make(chan struct{}), + } + defer runcProcess.Release() + + var eg errgroup.Group + egCtx, cancel := context.WithCancel(ctx) + defer eg.Wait() defer cancel() + startedCh := make(chan int, 1) + eg.Go(func() error { + return runcProcess.WaitForStart(egCtx, startedCh, started) + }) + + eg.Go(func() error { + return handleSignals(egCtx, runcProcess, process.Signal) + }) + if !process.Meta.Tty { - return call(ctx, nil, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) + return call(ctx, startedCh, &forwardIO{stdin: process.Stdin, stdout: process.Stdout, stderr: process.Stderr}) } ptm, ptsName, err := console.NewPty() @@ -63,18 +78,16 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces return err } - eg, ctx := errgroup.WithContext(ctx) - defer func() { if process.Stdin != nil { process.Stdin.Close() } pts.Close() ptm.Close() - cancel() // this will shutdown resize loop + cancel() // this will shutdown resize and signal loops err := eg.Wait() if err != nil { - logrus.Warningf("error while shutting down tty io: %s", err) + bklog.G(ctx).Warningf("error while shutting down tty io: %s", err) } }() @@ -105,29 +118,14 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces }) } - started := make(chan int, 1) - eg.Go(func() error { - startedCtx, timeout := context.WithTimeout(ctx, 10*time.Second) - defer timeout() - var runcProcess *os.Process - select { - case <-startedCtx.Done(): - return errors.New("runc started message never received") - case pid, ok := <-started: - if !ok { - return errors.New("runc process failed to send pid") - } - runcProcess, err = os.FindProcess(pid) - if err != nil { - return errors.Wrapf(err, "unable to find runc process for pid %d", pid) - } - defer runcProcess.Release() + err := runcProcess.WaitForReady(egCtx) + if err != nil { + return err } - for { select { - case <-ctx.Done(): + case <-egCtx.Done(): return nil case resize := <-process.Resize: err = ptm.Resize(console.WinSize{ @@ -135,11 +133,11 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces Width: uint16(resize.Cols), }) if err != nil { - logrus.Errorf("failed to resize ptm: %s", err) + bklog.G(ctx).Errorf("failed to resize ptm: %s", err) } - err = runcProcess.Signal(signal.SIGWINCH) + err = runcProcess.Process.Signal(signal.SIGWINCH) if err != nil { - logrus.Errorf("failed to send SIGWINCH to process: %s", err) + bklog.G(ctx).Errorf("failed to send SIGWINCH to process: %s", err) } } } @@ -156,5 +154,5 @@ func (w *runcExecutor) callWithIO(ctx context.Context, id, bundle string, proces runcIO.stderr = pts } - return call(ctx, started, runcIO) + return call(ctx, startedCh, runcIO) } diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go index b428afd0e6..a18d660a5c 100644 --- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go +++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go @@ -1,15 +1,19 @@ package exptypes import ( - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -const ExporterImageConfigKey = "containerimage.config" -const ExporterInlineCache = "containerimage.inlinecache" -const ExporterPlatformsKey = "refs.platforms" - -const EmptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1") +const ( + ExporterConfigDigestKey = "config.digest" + ExporterImageDigestKey = "containerimage.digest" + ExporterImageConfigKey = "containerimage.config" + ExporterImageConfigDigestKey = "containerimage.config.digest" + ExporterImageDescriptorKey = "containerimage.descriptor" + ExporterInlineCache = "containerimage.inlinecache" + ExporterBuildInfo = "containerimage.buildinfo" + ExporterPlatformsKey = "refs.platforms" +) type Platforms struct { Platforms []Platform @@ -17,5 +21,5 @@ type Platforms struct { type Platform struct { ID string - Platform specs.Platform + Platform ocispecs.Platform } diff --git a/vendor/github.com/moby/buildkit/exporter/exporter.go b/vendor/github.com/moby/buildkit/exporter/exporter.go index 02c6a99913..610481b710 100644 --- a/vendor/github.com/moby/buildkit/exporter/exporter.go +++ b/vendor/github.com/moby/buildkit/exporter/exporter.go @@ -4,6 +4,7 @@ import ( "context" "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/util/compression" ) type Exporter interface { @@ -12,6 +13,7 @@ type Exporter interface { type ExporterInstance interface { Name() string + Config() Config Export(ctx context.Context, src Source, sessionID string) (map[string]string, error) } @@ -20,3 +22,7 @@ type Source struct { Refs map[string]cache.ImmutableRef Metadata map[string][]byte } + +type Config struct { + Compression compression.Config +} diff --git a/vendor/github.com/moby/buildkit/exporter/local/export.go b/vendor/github.com/moby/buildkit/exporter/local/export.go index d772776a9a..5daa4aa426 100644 --- a/vendor/github.com/moby/buildkit/exporter/local/export.go +++ b/vendor/github.com/moby/buildkit/exporter/local/export.go @@ -46,8 +46,11 @@ func (e *localExporterInstance) Name() string { return "exporting to client" } -func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { +func (e *localExporter) Config() exporter.Config { + return exporter.Config{} +} +func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) defer cancel() @@ -143,7 +146,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, func newProgressHandler(ctx context.Context, id string) func(int, bool) { limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, diff --git a/vendor/github.com/moby/buildkit/exporter/tar/export.go b/vendor/github.com/moby/buildkit/exporter/tar/export.go index 79e98cd6a1..0febefd0b0 100644 --- a/vendor/github.com/moby/buildkit/exporter/tar/export.go +++ b/vendor/github.com/moby/buildkit/exporter/tar/export.go @@ -4,6 +4,7 @@ import ( "context" "io/ioutil" "os" + "strconv" "strings" "time" @@ -14,10 +15,18 @@ import ( "github.com/moby/buildkit/session/filesync" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/util/progress" + "github.com/pkg/errors" "github.com/tonistiigi/fsutil" fstypes "github.com/tonistiigi/fsutil/types" ) +const ( + // preferNondistLayersKey is an exporter option which can be used to mark a layer as non-distributable if the layer reference was + // already found to use a non-distributable media type. + // When this option is not set, the exporter will change the media type of the layer to a distributable one. + preferNondistLayersKey = "prefer-nondist-layers" +) + type Opt struct { SessionManager *session.Manager } @@ -34,17 +43,32 @@ func New(opt Opt) (exporter.Exporter, error) { func (e *localExporter) Resolve(ctx context.Context, opt map[string]string) (exporter.ExporterInstance, error) { li := &localExporterInstance{localExporter: e} + + v, ok := opt[preferNondistLayersKey] + if ok { + b, err := strconv.ParseBool(v) + if err != nil { + return nil, errors.Wrapf(err, "non-bool value for %s: %s", preferNondistLayersKey, v) + } + li.preferNonDist = b + } + return li, nil } type localExporterInstance struct { *localExporter + preferNonDist bool } func (e *localExporterInstance) Name() string { return "exporting to client" } +func (e *localExporterInstance) Config() exporter.Config { + return exporter.Config{} +} + func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, sessionID string) (map[string]string, error) { var defers []func() @@ -153,7 +177,7 @@ func (e *localExporterInstance) Export(ctx context.Context, inp exporter.Source, } func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go index 5e67a83370..e0a1806901 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/build.go @@ -14,6 +14,8 @@ import ( "strings" "github.com/containerd/containerd/platforms" + "github.com/docker/distribution/reference" + "github.com/docker/go-units" controlapi "github.com/moby/buildkit/api/services/control" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/exporter/containerimage/exptypes" @@ -25,7 +27,8 @@ import ( "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" - specs "github.com/opencontainers/image-spec/specs-go/v1" + binfotypes "github.com/moby/buildkit/util/buildinfo/types" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -33,29 +36,38 @@ import ( const ( DefaultLocalNameContext = "context" DefaultLocalNameDockerfile = "dockerfile" - keyTarget = "target" - keyFilename = "filename" - keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports - keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry - keyCacheNS = "build-arg:BUILDKIT_CACHE_MOUNT_NS" defaultDockerfileName = "Dockerfile" dockerignoreFilename = ".dockerignore" - buildArgPrefix = "build-arg:" - labelPrefix = "label:" - keyNoCache = "no-cache" - keyTargetPlatform = "platform" - keyMultiPlatform = "multi-platform" - keyImageResolveMode = "image-resolve-mode" - keyGlobalAddHosts = "add-hosts" - keyForceNetwork = "force-network-mode" - keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented - keyNameContext = "contextkey" - keyNameDockerfile = "dockerfilekey" - keyContextSubDir = "contextsubdir" - keyContextKeepGitDir = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" - keySyntax = "build-arg:BUILDKIT_SYNTAX" - keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" - keyHostname = "hostname" + + buildArgPrefix = "build-arg:" + labelPrefix = "label:" + + keyTarget = "target" + keyFilename = "filename" + keyCacheFrom = "cache-from" // for registry only. deprecated in favor of keyCacheImports + keyCacheImports = "cache-imports" // JSON representation of []CacheOptionsEntry + keyCgroupParent = "cgroup-parent" + keyContextSubDir = "contextsubdir" + keyForceNetwork = "force-network-mode" + keyGlobalAddHosts = "add-hosts" + keyHostname = "hostname" + keyImageResolveMode = "image-resolve-mode" + keyMultiPlatform = "multi-platform" + keyNameContext = "contextkey" + keyNameDockerfile = "dockerfilekey" + keyNoCache = "no-cache" + keyOverrideCopyImage = "override-copy-image" // remove after CopyOp implemented + keyShmSize = "shm-size" + keyTargetPlatform = "platform" + keyUlimit = "ulimit" + + // Don't forget to update frontend documentation if you add + // a new build-arg: frontend/dockerfile/docs/syntax.md + keyCacheNSArg = "build-arg:BUILDKIT_CACHE_MOUNT_NS" + keyContextKeepGitDirArg = "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR" + keyHostnameArg = "build-arg:BUILDKIT_SANDBOX_HOSTNAME" + keyMultiPlatformArg = "build-arg:BUILDKIT_MULTI_PLATFORM" + keySyntaxArg = "build-arg:BUILDKIT_SYNTAX" ) var httpPrefix = regexp.MustCompile(`^https?://`) @@ -90,8 +102,8 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { defaultBuildPlatform = workers[0].Platforms[0] } - buildPlatforms := []specs.Platform{defaultBuildPlatform} - targetPlatforms := []*specs.Platform{nil} + buildPlatforms := []ocispecs.Platform{defaultBuildPlatform} + targetPlatforms := []*ocispecs.Platform{nil} if v := opts[keyTargetPlatform]; v != "" { var err error targetPlatforms, err = parsePlatforms(v) @@ -110,6 +122,16 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Wrap(err, "failed to parse additional hosts") } + shmSize, err := parseShmSize(opts[keyShmSize]) + if err != nil { + return nil, errors.Wrap(err, "failed to parse shm size") + } + + ulimit, err := parseUlimits(opts[keyUlimit]) + if err != nil { + return nil, errors.Wrap(err, "failed to parse ulimit") + } + defaultNetMode, err := parseNetMode(opts[keyForceNetwork]) if err != nil { return nil, err @@ -143,13 +165,14 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { llb.SessionID(c.BuildOpts().SessionID), llb.SharedKeyHint(localNameDockerfile), dockerfile2llb.WithInternalName(name), + llb.Differ(llb.DiffNone, false), ) fileop := useFileOp(opts, &caps) var buildContext *llb.State isNotLocalContext := false - if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDir]); ok { + if st, ok := detectGitContext(opts[localNameContext], opts[keyContextKeepGitDirArg]); ok { if !forceLocalDockerfile { src = *st } @@ -243,6 +266,11 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return nil, errors.Wrapf(err, "failed to marshal local source") } + defVtx, err := def.Head() + if err != nil { + return nil, err + } + var sourceMap *llb.SourceMap eg, ctx2 := errgroup.WithContext(ctx) @@ -302,6 +330,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { llb.FollowPaths([]string{dockerignoreFilename}), llb.SharedKeyHint(localNameContext+"-"+dockerignoreFilename), dockerfile2llb.WithInternalName("load "+dockerignoreFilename), + llb.Differ(llb.DiffNone, false), ) dockerignoreState = &st } @@ -344,11 +373,11 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { } if _, ok := opts["cmdline"]; !ok { - if cmdline, ok := opts[keySyntax]; ok { + if cmdline, ok := opts[keySyntaxArg]; ok { p := strings.SplitN(strings.TrimSpace(cmdline), " ", 2) res, err := forwardGateway(ctx, c, p[0], cmdline) if err != nil && len(errdefs.Sources(err)) == 0 { - return nil, errors.Wrapf(err, "failed with %s = %s", keySyntax, cmdline) + return nil, errors.Wrapf(err, "failed with %s = %s", keySyntaxArg, cmdline) } return res, err } else if ref, cmdline, loc, ok := dockerfile2llb.DetectSyntax(bytes.NewBuffer(dtDockerfile)); ok { @@ -389,10 +418,14 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { } res := client.NewResult() + if v, ok := opts[keyHostnameArg]; ok && len(v) > 0 { + opts[keyHostname] = v + } + eg, ctx = errgroup.WithContext(ctx) for i, tp := range targetPlatforms { - func(i int, tp *specs.Platform) { + func(i int, tp *ocispecs.Platform) { eg.Go(func() (err error) { defer func() { var el *parser.ErrorLocation @@ -400,12 +433,13 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { err = wrapSource(err, sourceMap, el.Location) } }() - st, img, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ + + st, img, bi, err := dockerfile2llb.Dockerfile2LLB(ctx, dtDockerfile, dockerfile2llb.ConvertOpt{ Target: opts[keyTarget], MetaResolver: c, BuildArgs: filter(opts, buildArgPrefix), Labels: filter(opts, labelPrefix), - CacheIDNamespace: opts[keyCacheNS], + CacheIDNamespace: opts[keyCacheNSArg], SessionID: c.BuildOpts().SessionID, BuildContext: buildContext, Excludes: excludes, @@ -415,15 +449,25 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { ImageResolveMode: resolveMode, PrefixPlatform: exportMap, ExtraHosts: extraHosts, + ShmSize: shmSize, + Ulimit: ulimit, + CgroupParent: opts[keyCgroupParent], ForceNetMode: defaultNetMode, OverrideCopyImage: opts[keyOverrideCopyImage], LLBCaps: &caps, SourceMap: sourceMap, Hostname: opts[keyHostname], + Warn: func(msg, url string, detail [][]byte, location *parser.Range) { + if i != 0 { + return + } + c.Warn(ctx, defVtx, msg, warnOpts(sourceMap, location, detail, url)) + }, + ContextByName: contextByNameFunc(c, tp), }) if err != nil { - return errors.Wrapf(err, "failed to create LLB definition") + return err } def, err := st.Marshal(ctx) @@ -475,8 +519,14 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { return err } + buildinfo, err := json.Marshal(bi) + if err != nil { + return errors.Wrapf(err, "failed to marshal build info") + } + if !exportMap { res.AddMeta(exptypes.ExporterImageConfigKey, config) + res.AddMeta(exptypes.ExporterBuildInfo, buildinfo) res.SetRef(ref) } else { p := platforms.DefaultSpec() @@ -486,6 +536,7 @@ func Build(ctx context.Context, c client.Client) (*client.Result, error) { k := platforms.Format(p) res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterImageConfigKey, k), config) + res.AddMeta(fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), buildinfo) res.AddRef(k, ref) expPlatforms.Platforms[i] = exptypes.Platform{ ID: k, @@ -611,8 +662,8 @@ func isArchive(header []byte) bool { return err == nil } -func parsePlatforms(v string) ([]*specs.Platform, error) { - var pp []*specs.Platform +func parsePlatforms(v string) ([]*ocispecs.Platform, error) { + var pp []*ocispecs.Platform for _, v := range strings.Split(v, ",") { p, err := platforms.Parse(v) if err != nil { @@ -663,6 +714,41 @@ func parseExtraHosts(v string) ([]llb.HostIP, error) { return out, nil } +func parseShmSize(v string) (int64, error) { + if len(v) == 0 { + return 0, nil + } + kb, err := strconv.ParseInt(v, 10, 64) + if err != nil { + return 0, err + } + return kb, nil +} + +func parseUlimits(v string) ([]pb.Ulimit, error) { + if v == "" { + return nil, nil + } + out := make([]pb.Ulimit, 0) + csvReader := csv.NewReader(strings.NewReader(v)) + fields, err := csvReader.Read() + if err != nil { + return nil, err + } + for _, field := range fields { + ulimit, err := units.ParseUlimit(field) + if err != nil { + return nil, err + } + out = append(out, pb.Ulimit{ + Name: ulimit.Name, + Soft: ulimit.Soft, + Hard: ulimit.Hard, + }) + } + return out, nil +} + func parseNetMode(v string) (pb.NetMode, error) { if v == "" { return llb.NetModeSandbox, nil @@ -703,6 +789,176 @@ func scopeToSubDir(c *llb.State, fileop bool, dir string) *llb.State { return &bc } +func warnOpts(sm *llb.SourceMap, r *parser.Range, detail [][]byte, url string) client.WarnOpts { + opts := client.WarnOpts{Level: 1, Detail: detail, URL: url} + if r == nil { + return opts + } + opts.SourceInfo = &pb.SourceInfo{ + Data: sm.Data, + Filename: sm.Filename, + Definition: sm.Definition.ToPB(), + } + opts.Range = []*pb.Range{{ + Start: pb.Position{ + Line: int32(r.Start.Line), + Character: int32(r.Start.Character), + }, + End: pb.Position{ + Line: int32(r.End.Line), + Character: int32(r.End.Character), + }, + }} + return opts +} + +func contextByNameFunc(c client.Client, p *ocispecs.Platform) func(context.Context, string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { + return func(ctx context.Context, name string) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { + named, err := reference.ParseNormalizedNamed(name) + if err != nil { + return nil, nil, nil, errors.Wrapf(err, "invalid context name %s", name) + } + name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") + + if p == nil { + pp := platforms.Normalize(platforms.DefaultSpec()) + p = &pp + } + if p != nil { + name := name + "::" + platforms.Format(platforms.Normalize(*p)) + st, img, bi, err := contextByName(ctx, c, name, p) + if err != nil { + return nil, nil, nil, err + } + if st != nil { + return st, img, bi, nil + } + } + return contextByName(ctx, c, name, p) + } +} + +func contextByName(ctx context.Context, c client.Client, name string, platform *ocispecs.Platform) (*llb.State, *dockerfile2llb.Image, *binfotypes.BuildInfo, error) { + opts := c.BuildOpts().Opts + v, ok := opts["context:"+name] + if !ok { + return nil, nil, nil, nil + } + + vv := strings.SplitN(v, ":", 2) + if len(vv) != 2 { + return nil, nil, nil, errors.Errorf("invalid context specifier %s for %s", v, name) + } + switch vv[0] { + case "docker-image": + ref := strings.TrimPrefix(vv[1], "//") + imgOpt := []llb.ImageOption{ + llb.WithCustomName("[context " + name + "] " + ref), + llb.WithMetaResolver(c), + } + if platform != nil { + imgOpt = append(imgOpt, llb.Platform(*platform)) + } + st := llb.Image(ref, imgOpt...) + return &st, nil, nil, nil + case "git": + st, ok := detectGitContext(v, "1") + if !ok { + return nil, nil, nil, errors.Errorf("invalid git context %s", v) + } + return st, nil, nil, nil + case "http", "https": + st, ok := detectGitContext(v, "1") + if !ok { + httpst := llb.HTTP(v, llb.WithCustomName("[context "+name+"] "+v)) + st = &httpst + } + return st, nil, nil, nil + case "local": + st := llb.Local(vv[1], + llb.SessionID(c.BuildOpts().SessionID), + llb.FollowPaths([]string{dockerignoreFilename}), + llb.SharedKeyHint("context:"+name+"-"+dockerignoreFilename), + llb.WithCustomName("[context "+name+"] load "+dockerignoreFilename), + llb.Differ(llb.DiffNone, false), + ) + def, err := st.Marshal(ctx) + if err != nil { + return nil, nil, nil, err + } + res, err := c.Solve(ctx, client.SolveRequest{ + Evaluate: true, + Definition: def.ToPB(), + }) + if err != nil { + return nil, nil, nil, err + } + ref, err := res.SingleRef() + if err != nil { + return nil, nil, nil, err + } + dt, _ := ref.ReadFile(ctx, client.ReadRequest{ + Filename: dockerignoreFilename, + }) // error ignored + var excludes []string + if len(dt) != 0 { + excludes, err = dockerignore.ReadAll(bytes.NewBuffer(dt)) + if err != nil { + return nil, nil, nil, err + } + } + st = llb.Local(vv[1], + llb.WithCustomName("[context "+name+"] load from client"), + llb.SessionID(c.BuildOpts().SessionID), + llb.SharedKeyHint("context:"+name), + llb.ExcludePatterns(excludes), + ) + return &st, nil, nil, nil + case "input": + inputs, err := c.Inputs(ctx) + if err != nil { + return nil, nil, nil, err + } + st, ok := inputs[vv[1]] + if !ok { + return nil, nil, nil, errors.Errorf("invalid input %s for %s", vv[1], name) + } + md, ok := opts["input-metadata:"+vv[1]] + if ok { + m := make(map[string][]byte) + if err := json.Unmarshal([]byte(md), &m); err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md) + } + var bi *binfotypes.BuildInfo + if dtbi, ok := m[exptypes.ExporterBuildInfo]; ok { + var depbi binfotypes.BuildInfo + if err := json.Unmarshal(dtbi, &depbi); err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to parse buildinfo for %s", name) + } + bi = &binfotypes.BuildInfo{ + Deps: map[string]binfotypes.BuildInfo{ + strings.SplitN(vv[1], "::", 2)[0]: depbi, + }, + } + } + var img *dockerfile2llb.Image + if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok { + st, err = st.WithImageConfig(dtic) + if err != nil { + return nil, nil, nil, err + } + if err := json.Unmarshal(dtic, &img); err != nil { + return nil, nil, nil, errors.Wrapf(err, "failed to parse image config for %s", name) + } + } + return &st, img, bi, nil + } + return &st, nil, nil, nil + default: + return nil, nil, nil, errors.Errorf("unsupported context source %s for %s", vv[0], name) + } +} + func wrapSource(err error, sm *llb.SourceMap, ranges []parser.Range) error { if sm == nil { return err diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/caps.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/caps.go index 279701154e..3c78cd56c4 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/caps.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/builder/caps.go @@ -12,6 +12,7 @@ import ( var enabledCaps = map[string]struct{}{ "moby.buildkit.frontend.inputs": {}, "moby.buildkit.frontend.subrequests": {}, + "moby.buildkit.frontend.contexts": {}, } func validateCaps(req string) (forward bool, err error) { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go index d31e35e25c..7ac6b9bdd7 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert.go @@ -16,17 +16,20 @@ import ( "github.com/containerd/containerd/platforms" "github.com/docker/distribution/reference" - "github.com/docker/docker/pkg/signal" "github.com/docker/go-connections/nat" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/client/llb/imagemetaresolver" "github.com/moby/buildkit/frontend/dockerfile/instructions" "github.com/moby/buildkit/frontend/dockerfile/parser" "github.com/moby/buildkit/frontend/dockerfile/shell" + "github.com/moby/buildkit/identity" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" + binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/util/suggest" "github.com/moby/buildkit/util/system" - specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/moby/sys/signal" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "golang.org/x/sync/errgroup" ) @@ -53,21 +56,49 @@ type ConvertOpt struct { // CacheIDNamespace scopes the IDs for different cache mounts CacheIDNamespace string ImageResolveMode llb.ResolveMode - TargetPlatform *specs.Platform - BuildPlatforms []specs.Platform + TargetPlatform *ocispecs.Platform + BuildPlatforms []ocispecs.Platform PrefixPlatform bool ExtraHosts []llb.HostIP + ShmSize int64 + Ulimit []pb.Ulimit + CgroupParent string ForceNetMode pb.NetMode OverrideCopyImage string LLBCaps *apicaps.CapSet ContextLocalName string SourceMap *llb.SourceMap Hostname string + Warn func(short, url string, detail [][]byte, location *parser.Range) + ContextByName func(context.Context, string) (*llb.State, *Image, *binfotypes.BuildInfo, error) } -func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, error) { +func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, *Image, *binfotypes.BuildInfo, error) { + buildInfo := &binfotypes.BuildInfo{} + contextByName := opt.ContextByName + opt.ContextByName = func(ctx context.Context, name string) (*llb.State, *Image, *binfotypes.BuildInfo, error) { + if !strings.EqualFold(name, "scratch") && !strings.EqualFold(name, "context") { + if contextByName != nil { + st, img, bi, err := contextByName(ctx, name) + if err != nil { + return nil, nil, nil, err + } + if bi != nil && bi.Deps != nil { + for k := range bi.Deps { + if buildInfo.Deps == nil { + buildInfo.Deps = make(map[string]binfotypes.BuildInfo) + } + buildInfo.Deps[k] = bi.Deps[k] + } + } + return st, img, bi, nil + } + } + return nil, nil, nil, nil + } + if len(dt) == 0 { - return nil, nil, errors.Errorf("the Dockerfile cannot be empty") + return nil, nil, nil, errors.Errorf("the Dockerfile cannot be empty") } if opt.ContextLocalName == "" { @@ -83,14 +114,18 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, dockerfile, err := parser.Parse(bytes.NewReader(dt)) if err != nil { - return nil, nil, err + return nil, nil, nil, err + } + + for _, w := range dockerfile.Warnings { + opt.Warn(w.Short, w.URL, w.Detail, w.Location) } proxyEnv := proxyEnvFromBuildArgs(opt.BuildArgs) stages, metaArgs, err := instructions.Parse(dockerfile.AST) if err != nil { - return nil, nil, err + return nil, nil, nil, err } shlex := shell.NewLex(dockerfile.EscapeToken) @@ -115,21 +150,41 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, for i, st := range stages { name, err := shlex.ProcessWordWithMap(st.BaseName, metaArgsToMap(optMetaArgs)) if err != nil { - return nil, nil, parser.WithLocation(err, st.Location) + return nil, nil, nil, parser.WithLocation(err, st.Location) } if name == "" { - return nil, nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) + return nil, nil, nil, parser.WithLocation(errors.Errorf("base name (%s) should not be blank", st.BaseName), st.Location) } st.BaseName = name ds := &dispatchState{ - stage: st, deps: make(map[*dispatchState]struct{}), ctxPaths: make(map[string]struct{}), stageName: st.Name, prefixPlatform: opt.PrefixPlatform, } + if st.Name != "" { + s, img, bi, err := opt.ContextByName(ctx, st.Name) + if err != nil { + return nil, nil, nil, err + } + if s != nil { + ds.noinit = true + ds.state = *s + if img != nil { + ds.image = *img + } + if bi != nil { + ds.buildInfo = *bi + } + allDispatchStates.addState(ds) + continue + } + } + + ds.stage = st + if st.Name == "" { ds.stageName = fmt.Sprintf("stage-%d", i) } @@ -137,12 +192,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, if v := st.Platform; v != "" { v, err := shlex.ProcessWordWithMap(v, metaArgsToMap(optMetaArgs)) if err != nil { - return nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) + return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to process arguments for platform %s", v), st.Location) } p, err := platforms.Parse(v) if err != nil { - return nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) + return nil, nil, nil, parser.WithLocation(errors.Wrapf(err, "failed to parse platform %s", v), st.Location) } ds.platform = &p } @@ -184,7 +239,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, var ok bool target, ok = allDispatchStates.findStateByName(opt.Target) if !ok { - return nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) + return nil, nil, nil, errors.Errorf("target stage %s could not be found", opt.Target) } } @@ -194,7 +249,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, for i, cmd := range d.stage.Commands { newCmd, err := toCommand(cmd, allDispatchStates) if err != nil { - return nil, nil, err + return nil, nil, nil, err } d.commands[i] = newCmd for _, src := range newCmd.sources { @@ -209,36 +264,67 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if has, state := hasCircularDependency(allDispatchStates.states); has { - return nil, nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) + return nil, nil, nil, errors.Errorf("circular dependency detected on stage: %s", state.stageName) } if len(allDispatchStates.states) == 1 { allDispatchStates.states[0].stageName = "" } + allStageNames := make([]string, 0, len(allDispatchStates.states)) + for _, s := range allDispatchStates.states { + if s.stageName != "" { + allStageNames = append(allStageNames, s.stageName) + } + } + eg, ctx := errgroup.WithContext(ctx) for i, d := range allDispatchStates.states { reachable := isReachable(target, d) // resolve image config for every stage - if d.base == nil { + if d.base == nil && !d.noinit { if d.stage.BaseName == emptyImageName { d.state = llb.Scratch() d.image = emptyImage(platformOpt.targetPlatform) continue } func(i int, d *dispatchState) { - eg.Go(func() error { + eg.Go(func() (err error) { + defer func() { + if err != nil { + err = parser.WithLocation(err, d.stage.Location) + } + }() + origName := d.stage.BaseName ref, err := reference.ParseNormalizedNamed(d.stage.BaseName) if err != nil { - return parser.WithLocation(errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName), d.stage.Location) + return errors.Wrapf(err, "failed to parse stage name %q", d.stage.BaseName) } platform := d.platform if platform == nil { platform = &platformOpt.targetPlatform } d.stage.BaseName = reference.TagNameOnly(ref).String() + var isScratch bool - if metaResolver != nil && reachable && !d.unregistered { + st, img, bi, err := opt.ContextByName(ctx, d.stage.BaseName) + if err != nil { + return err + } + if st != nil { + if img != nil { + d.image = *img + } else { + d.image = emptyImage(platformOpt.targetPlatform) + } + if bi != nil { + d.buildInfo = *bi + } + d.state = *st + d.platform = platform + return nil + } + if reachable { prefix := "[" if opt.PrefixPlatform && platform != nil { prefix += platforms.Format(*platform) + " " @@ -250,11 +336,11 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, LogName: fmt.Sprintf("%s load metadata for %s", prefix, d.stage.BaseName), }) if err != nil { - return err + return suggest.WrapError(errors.Wrap(err, origName), origName, append(allStageNames, commonImageNames()...), true) } var img Image if err := json.Unmarshal(dt, &img); err != nil { - return err + return errors.Wrap(err, "failed to parse image config") } img.Created = nil // if there is no explicit target platform, try to match based on image config @@ -262,7 +348,6 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, p := autoDetectPlatform(img, *platform, platformOpt.buildPlatforms) platform = &p } - d.image = img if dgst != "" { ref, err = reference.WithDigest(ref, dgst) if err != nil { @@ -280,6 +365,17 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } } } + if !isScratch { + // if image not scratch set original image name as ref + // and actual reference as alias in binfotypes.Source + d.buildInfo.Sources = append(d.buildInfo.Sources, binfotypes.Source{ + Type: binfotypes.SourceTypeDockerImage, + Ref: origName, + Alias: ref.String(), + Pin: dgst.String(), + }) + } + d.image = img } if isScratch { d.state = llb.Scratch() @@ -288,7 +384,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, dfCmd(d.stage.SourceCode), llb.Platform(*platform), opt.ImageResolveMode, - llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform)), + llb.WithCustomName(prefixCommand(d, "FROM "+d.stage.BaseName, opt.PrefixPlatform, platform, nil)), location(opt.SourceMap, d.stage.Location), ) } @@ -300,7 +396,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if err := eg.Wait(); err != nil { - return nil, nil, err + return nil, nil, nil, err } buildContext := &mutableOutput{} @@ -310,6 +406,20 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, if !isReachable(target, d) { continue } + + // collect build sources and dependencies + if len(d.buildInfo.Sources) > 0 { + buildInfo.Sources = append(buildInfo.Sources, d.buildInfo.Sources...) + } + if d.buildInfo.Deps != nil { + for name, bi := range d.buildInfo.Deps { + if buildInfo.Deps == nil { + buildInfo.Deps = make(map[string]binfotypes.BuildInfo) + } + buildInfo.Deps[name] = bi + } + } + if d.base != nil { d.state = d.base.state d.platform = d.base.platform @@ -335,12 +445,12 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if d.image.Config.WorkingDir != "" { if err = dispatchWorkdir(d, &instructions.WorkdirCommand{Path: d.image.Config.WorkingDir}, false, nil); err != nil { - return nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, nil, nil, parser.WithLocation(err, d.stage.Location) } } if d.image.Config.User != "" { if err = dispatchUser(d, &instructions.UserCommand{User: d.image.Config.User}, false); err != nil { - return nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, nil, nil, parser.WithLocation(err, d.stage.Location) } } d.state = d.state.Network(opt.ForceNetMode) @@ -357,6 +467,9 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, buildPlatforms: platformOpt.buildPlatforms, targetPlatform: platformOpt.targetPlatform, extraHosts: opt.ExtraHosts, + shmSize: opt.ShmSize, + ulimit: opt.Ulimit, + cgroupParent: opt.CgroupParent, copyImage: opt.OverrideCopyImage, llbCaps: opt.LLBCaps, sourceMap: opt.SourceMap, @@ -366,13 +479,13 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } if err = dispatchOnBuildTriggers(d, d.image.Config.OnBuild, opt); err != nil { - return nil, nil, parser.WithLocation(err, d.stage.Location) + return nil, nil, nil, parser.WithLocation(err, d.stage.Location) } d.image.Config.OnBuild = nil for _, cmd := range d.commands { if err := dispatch(d, cmd, opt); err != nil { - return nil, nil, parser.WithLocation(err, cmd.Location()) + return nil, nil, nil, parser.WithLocation(err, cmd.Location()) } } @@ -381,6 +494,13 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, } } + // sort build sources + if len(buildInfo.Sources) > 0 { + sort.Slice(buildInfo.Sources, func(i, j int) bool { + return buildInfo.Sources[i].Ref < buildInfo.Sources[j].Ref + }) + } + if len(opt.Labels) != 0 && target.image.Config.Labels == nil { target.image.Config.Labels = make(map[string]string, len(opt.Labels)) } @@ -418,7 +538,7 @@ func Dockerfile2LLB(ctx context.Context, dt []byte, opt ConvertOpt) (*llb.State, target.image.Variant = platformOpt.targetPlatform.Variant } - return &st, &target.image, nil + return &st, &target.image, buildInfo, nil } func metaArgsToMap(metaArgs []instructions.KeyValuePairOptional) map[string]string { @@ -472,9 +592,12 @@ type dispatchOpt struct { buildContext llb.State proxyEnv *llb.ProxyEnv cacheIDNamespace string - targetPlatform specs.Platform - buildPlatforms []specs.Platform + targetPlatform ocispecs.Platform + buildPlatforms []ocispecs.Platform extraHosts []llb.HostIP + shmSize int64 + ulimit []pb.Ulimit + cgroupParent string copyImage string llbCaps *apicaps.CapSet sourceMap *llb.SourceMap @@ -493,6 +616,21 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { return err } } + if ex, ok := cmd.Command.(instructions.SupportsSingleWordExpansionRaw); ok { + err := ex.ExpandRaw(func(word string) (string, error) { + env, err := d.state.Env(context.TODO()) + if err != nil { + return "", err + } + + lex := shell.NewLex('\\') + lex.SkipProcessQuotes = true + return lex.ProcessWord(word, env) + }) + if err != nil { + return err + } + } var err error switch c := cmd.Command.(type) { @@ -505,7 +643,17 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { case *instructions.WorkdirCommand: err = dispatchWorkdir(d, c, true, &opt) case *instructions.AddCommand: - err = dispatchCopy(d, c.SourcesAndDest, opt.buildContext, true, c, c.Chown, c.Chmod, c.Location(), opt) + err = dispatchCopy(d, copyConfig{ + params: c.SourcesAndDest, + source: opt.buildContext, + isAddCommand: true, + cmdToPrint: c, + chown: c.Chown, + chmod: c.Chmod, + link: c.Link, + location: c.Location(), + opt: opt, + }) if err == nil { for _, src := range c.SourcePaths { if !strings.HasPrefix(src, "http://") && !strings.HasPrefix(src, "https://") { @@ -540,7 +688,17 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { if len(cmd.sources) != 0 { l = cmd.sources[0].state } - err = dispatchCopy(d, c.SourcesAndDest, l, false, c, c.Chown, c.Chmod, c.Location(), opt) + err = dispatchCopy(d, copyConfig{ + params: c.SourcesAndDest, + source: l, + isAddCommand: false, + cmdToPrint: c, + chown: c.Chown, + chmod: c.Chmod, + link: c.Link, + location: c.Location(), + opt: opt, + }) if err == nil && len(cmd.sources) == 0 { for _, src := range c.SourcePaths { d.ctxPaths[path.Join("/", filepath.ToSlash(src))] = struct{}{} @@ -554,9 +712,10 @@ func dispatch(d *dispatchState, cmd command, opt dispatchOpt) error { type dispatchState struct { state llb.State image Image - platform *specs.Platform + platform *ocispecs.Platform stage instructions.Stage base *dispatchState + noinit bool deps map[*dispatchState]struct{} buildArgs []instructions.KeyValuePairOptional commands []command @@ -568,6 +727,7 @@ type dispatchState struct { cmdIndex int cmdTotal int prefixPlatform bool + buildInfo binfotypes.BuildInfo } type dispatchStates struct { @@ -649,9 +809,11 @@ func dispatchEnv(d *dispatchState, c *instructions.EnvCommand) error { func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyEnv, sources []*dispatchState, dopt dispatchOpt) error { var opt []llb.RunOption + customname := c.String() + var args []string = c.CmdLine if len(c.Files) > 0 { - if len(args) != 1 { + if len(args) != 1 || !c.PrependShell { return fmt.Errorf("parsing produced an invalid run command: %v", args) } @@ -669,12 +831,15 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE if c.Files[0].Chomp { data = parser.ChompHeredocContent(data) } - st := llb.Scratch().Dir(sourcePath).File(llb.Mkfile(f, 0755, []byte(data))) + st := llb.Scratch().Dir(sourcePath).File( + llb.Mkfile(f, 0755, []byte(data)), + WithInternalName("preparing inline document"), + ) mount := llb.AddMount(destPath, st, llb.SourcePath(sourcePath), llb.Readonly) opt = append(opt, mount) - args[0] = path.Join(destPath, f) + args = []string{path.Join(destPath, f)} } else { // Just a simple heredoc, so just run the contents in the // shell: this creates the effect of a "fake"-heredoc, so that @@ -685,14 +850,17 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE if c.Files[0].Chomp { data = parser.ChompHeredocContent(data) } - args[0] = data + args = []string{data} } + customname += fmt.Sprintf(" (%s)", summarizeHeredoc(c.Files[0].Data)) } else { // More complex heredoc, so reconstitute it, and pass it to the // shell to handle. + full := args[0] for _, file := range c.Files { - args[0] += "\n" + file.Data + file.Name + full += "\n" + file.Data + file.Name } + args = []string{full} } } if c.PrependShell { @@ -733,6 +901,12 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE opt = append(opt, networkOpt) } + if dopt.llbCaps != nil && dopt.llbCaps.Supports(pb.CapExecMetaUlimit) == nil { + for _, u := range dopt.ulimit { + opt = append(opt, llb.AddUlimit(llb.UlimitName(u.Name), u.Soft, u.Hard)) + } + } + shlex := *dopt.shlex shlex.RawQuotes = true shlex.SkipUnsetEnv = true @@ -741,10 +915,23 @@ func dispatchRun(d *dispatchState, c *instructions.RunCommand, proxy *llb.ProxyE if err != nil { return err } - opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(&shlex, c.String(), env)), d.prefixPlatform, pl))) + opt = append(opt, llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(&shlex, customname, env)), d.prefixPlatform, pl, env))) for _, h := range dopt.extraHosts { opt = append(opt, llb.AddExtraHost(h.Host, h.IP)) } + + if dopt.llbCaps != nil && dopt.llbCaps.Supports(pb.CapExecMountTmpfsSize) == nil { + if dopt.shmSize > 0 { + opt = append(opt, llb.AddMount("/dev/shm", llb.Scratch(), llb.Tmpfs(llb.TmpfsSize(dopt.shmSize)))) + } + } + + if dopt.llbCaps != nil && dopt.llbCaps.Supports(pb.CapExecMetaCgroupParent) == nil { + if len(dopt.cgroupParent) > 0 { + opt = append(opt, llb.WithCgroupParent(dopt.cgroupParent)) + } + } + d.state = d.state.Run(opt...).Root() return commitToHistory(&d.image, "RUN "+runCommandString(args, d.buildArgs, shell.BuildEnvs(env)), true, &d.state) } @@ -772,7 +959,7 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo return err } d.state = d.state.File(llb.Mkdir(wd, 0755, mkdirOpt...), - llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), env)), d.prefixPlatform, &platform)), + llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, c.String(), env)), d.prefixPlatform, &platform, env)), location(opt.sourceMap, c.Location()), ) withLayer = true @@ -782,25 +969,25 @@ func dispatchWorkdir(d *dispatchState, c *instructions.WorkdirCommand, commit bo return nil } -func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, chmod string, loc []parser.Range, opt dispatchOpt) error { - pp, err := pathRelativeToWorkingDir(d.state, c.DestPath) +func dispatchCopyFileOp(d *dispatchState, cfg copyConfig) error { + pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) if err != nil { return err } dest := path.Join("/", pp) - if c.DestPath == "." || c.DestPath == "" || c.DestPath[len(c.DestPath)-1] == filepath.Separator { + if cfg.params.DestPath == "." || cfg.params.DestPath == "" || cfg.params.DestPath[len(cfg.params.DestPath)-1] == filepath.Separator { dest += string(filepath.Separator) } var copyOpt []llb.CopyOption - if chown != "" { - copyOpt = append(copyOpt, llb.WithUser(chown)) + if cfg.chown != "" { + copyOpt = append(copyOpt, llb.WithUser(cfg.chown)) } var mode *os.FileMode - if chmod != "" { - p, err := strconv.ParseUint(chmod, 8, 32) + if cfg.chmod != "" { + p, err := strconv.ParseUint(cfg.chmod, 8, 32) if err == nil { perm := os.FileMode(p) mode = &perm @@ -808,7 +995,7 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS } commitMessage := bytes.NewBufferString("") - if isAddCommand { + if cfg.isAddCommand { commitMessage.WriteString("ADD") } else { commitMessage.WriteString("COPY") @@ -816,10 +1003,10 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS var a *llb.FileAction - for _, src := range c.SourcePaths { + for _, src := range cfg.params.SourcePaths { commitMessage.WriteString(" " + src) if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - if !isAddCommand { + if !cfg.isAddCommand { return errors.New("source can't be a URL for COPY") } @@ -836,7 +1023,7 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS } } - st := llb.HTTP(src, llb.Filename(f), dfCmd(c)) + st := llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params)) opts := append([]llb.CopyOption{&llb.CopyInfo{ Mode: mode, @@ -853,24 +1040,29 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS Mode: mode, FollowSymlinks: true, CopyDirContentsOnly: true, - AttemptUnpack: isAddCommand, + AttemptUnpack: cfg.isAddCommand, CreateDestPath: true, AllowWildcard: true, AllowEmptyWildcard: true, }}, copyOpt...) if a == nil { - a = llb.Copy(sourceState, filepath.Join("/", src), dest, opts...) + a = llb.Copy(cfg.source, filepath.Join("/", src), dest, opts...) } else { - a = a.Copy(sourceState, filepath.Join("/", src), dest, opts...) + a = a.Copy(cfg.source, filepath.Join("/", src), dest, opts...) } } } - for _, src := range c.SourceContents { + for _, src := range cfg.params.SourceContents { + commitMessage.WriteString(" <<" + src.Path) + data := src.Data f := src.Path - st := llb.Scratch().Dir("/").File(llb.Mkfile(f, 0664, []byte(data))) + st := llb.Scratch().File( + llb.Mkfile(f, 0664, []byte(data)), + WithInternalName("preparing inline document"), + ) opts := append([]llb.CopyOption{&llb.CopyInfo{ Mode: mode, @@ -884,9 +1076,9 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS } } - commitMessage.WriteString(" " + c.DestPath) + commitMessage.WriteString(" " + cfg.params.DestPath) - platform := opt.targetPlatform + platform := cfg.opt.targetPlatform if d.platform != nil { platform = *d.platform } @@ -896,50 +1088,81 @@ func dispatchCopyFileOp(d *dispatchState, c instructions.SourcesAndDest, sourceS return err } + name := uppercaseCmd(processCmdEnv(cfg.opt.shlex, cfg.cmdToPrint.String(), env)) fileOpt := []llb.ConstraintsOpt{ - llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), env)), d.prefixPlatform, &platform)), - location(opt.sourceMap, loc), + llb.WithCustomName(prefixCommand(d, name, d.prefixPlatform, &platform, env)), + location(cfg.opt.sourceMap, cfg.location), } if d.ignoreCache { fileOpt = append(fileOpt, llb.IgnoreCache) } - d.state = d.state.File(a, fileOpt...) + if cfg.opt.llbCaps.Supports(pb.CapMergeOp) == nil && cfg.link && cfg.chmod == "" { + pgID := identity.NewID() + d.cmdIndex-- // prefixCommand increases it + pgName := prefixCommand(d, name, d.prefixPlatform, &platform, env) + + var copyOpts []llb.ConstraintsOpt + copy(copyOpts, fileOpt) + copyOpts = append(copyOpts, llb.ProgressGroup(pgID, pgName, true)) + + var mergeOpts []llb.ConstraintsOpt + copy(mergeOpts, fileOpt) + d.cmdIndex-- + mergeOpts = append(mergeOpts, llb.ProgressGroup(pgID, pgName, false), llb.WithCustomName(prefixCommand(d, "LINK "+name, d.prefixPlatform, &platform, env))) + + d.state = d.state.WithOutput(llb.Merge([]llb.State{d.state, llb.Scratch().File(a, copyOpts...)}, mergeOpts...).Output()) + } else { + d.state = d.state.File(a, fileOpt...) + } + return commitToHistory(&d.image, commitMessage.String(), true, &d.state) } -func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState llb.State, isAddCommand bool, cmdToPrint fmt.Stringer, chown string, chmod string, loc []parser.Range, opt dispatchOpt) error { - if useFileOp(opt.buildArgValues, opt.llbCaps) { - return dispatchCopyFileOp(d, c, sourceState, isAddCommand, cmdToPrint, chown, chmod, loc, opt) +type copyConfig struct { + params instructions.SourcesAndDest + source llb.State + isAddCommand bool + cmdToPrint fmt.Stringer + chown string + chmod string + link bool + location []parser.Range + opt dispatchOpt +} + +func dispatchCopy(d *dispatchState, cfg copyConfig) error { + if useFileOp(cfg.opt.buildArgValues, cfg.opt.llbCaps) { + return dispatchCopyFileOp(d, cfg) } - if len(c.SourceContents) > 0 { + if len(cfg.params.SourceContents) > 0 { return errors.New("inline content copy is not supported") } - if chmod != "" { - if opt.llbCaps != nil && opt.llbCaps.Supports(pb.CapFileBase) != nil { - return errors.Wrap(opt.llbCaps.Supports(pb.CapFileBase), "chmod is not supported") + if cfg.chmod != "" { + if cfg.opt.llbCaps != nil && cfg.opt.llbCaps.Supports(pb.CapFileBase) != nil { + return errors.Wrap(cfg.opt.llbCaps.Supports(pb.CapFileBase), "chmod is not supported") } return errors.New("chmod is not supported") } - img := llb.Image(opt.copyImage, llb.MarkImageInternal, llb.Platform(opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) - pp, err := pathRelativeToWorkingDir(d.state, c.DestPath) + img := llb.Image(cfg.opt.copyImage, llb.MarkImageInternal, llb.Platform(cfg.opt.buildPlatforms[0]), WithInternalName("helper image for file operations")) + pp, err := pathRelativeToWorkingDir(d.state, cfg.params.DestPath) if err != nil { return err } dest := path.Join(".", pp) - if c.DestPath == "." || c.DestPath == "" || c.DestPath[len(c.DestPath)-1] == filepath.Separator { + if cfg.params.DestPath == "." || cfg.params.DestPath == "" || cfg.params.DestPath[len(cfg.params.DestPath)-1] == filepath.Separator { dest += string(filepath.Separator) } args := []string{"copy"} - unpack := isAddCommand + unpack := cfg.isAddCommand - mounts := make([]llb.RunOption, 0, len(c.SourcePaths)) - if chown != "" { - args = append(args, fmt.Sprintf("--chown=%s", chown)) - _, _, err := parseUser(chown) + mounts := make([]llb.RunOption, 0, len(cfg.params.SourcePaths)) + if cfg.chown != "" { + args = append(args, fmt.Sprintf("--chown=%s", cfg.chown)) + _, _, err := parseUser(cfg.chown) if err != nil { mounts = append(mounts, llb.AddMount("/etc/passwd", d.state, llb.SourcePath("/etc/passwd"), llb.Readonly)) mounts = append(mounts, llb.AddMount("/etc/group", d.state, llb.SourcePath("/etc/group"), llb.Readonly)) @@ -947,16 +1170,16 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l } commitMessage := bytes.NewBufferString("") - if isAddCommand { + if cfg.isAddCommand { commitMessage.WriteString("ADD") } else { commitMessage.WriteString("COPY") } - for i, src := range c.SourcePaths { + for i, src := range cfg.params.SourcePaths { commitMessage.WriteString(" " + src) if strings.HasPrefix(src, "http://") || strings.HasPrefix(src, "https://") { - if !isAddCommand { + if !cfg.isAddCommand { return errors.New("source can't be a URL for COPY") } @@ -975,7 +1198,7 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l } target := path.Join(fmt.Sprintf("/src-%d", i), f) args = append(args, target) - mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(c)), llb.Readonly)) + mounts = append(mounts, llb.AddMount(path.Dir(target), llb.HTTP(src, llb.Filename(f), dfCmd(cfg.params)), llb.Readonly)) } else { d, f := splitWildcards(src) targetCmd := fmt.Sprintf("/src-%d", i) @@ -986,18 +1209,18 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l } targetCmd = path.Join(targetCmd, f) args = append(args, targetCmd) - mounts = append(mounts, llb.AddMount(targetMount, sourceState, llb.SourcePath(d), llb.Readonly)) + mounts = append(mounts, llb.AddMount(targetMount, cfg.source, llb.SourcePath(d), llb.Readonly)) } } - commitMessage.WriteString(" " + c.DestPath) + commitMessage.WriteString(" " + cfg.params.DestPath) args = append(args, dest) if unpack { args = append(args[:1], append([]string{"--unpack"}, args[1:]...)...) } - platform := opt.targetPlatform + platform := cfg.opt.targetPlatform if d.platform != nil { platform = *d.platform } @@ -1011,16 +1234,16 @@ func dispatchCopy(d *dispatchState, c instructions.SourcesAndDest, sourceState l llb.Args(args), llb.Dir("/dest"), llb.ReadonlyRootFS(), - dfCmd(cmdToPrint), - llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(opt.shlex, cmdToPrint.String(), env)), d.prefixPlatform, &platform)), - location(opt.sourceMap, loc), + dfCmd(cfg.cmdToPrint), + llb.WithCustomName(prefixCommand(d, uppercaseCmd(processCmdEnv(cfg.opt.shlex, cfg.cmdToPrint.String(), env)), d.prefixPlatform, &platform, env)), + location(cfg.opt.sourceMap, cfg.location), } if d.ignoreCache { runOpt = append(runOpt, llb.IgnoreCache) } - if opt.llbCaps != nil { - if err := opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil { + if cfg.opt.llbCaps != nil { + if err := cfg.opt.llbCaps.Supports(pb.CapExecMetaNetwork); err == nil { runOpt = append(runOpt, llb.Network(llb.NetModeNone)) } } @@ -1279,7 +1502,7 @@ func commitToHistory(img *Image, msg string, withLayer bool, st *llb.State) erro msg += " # buildkit" } - img.History = append(img.History, specs.History{ + img.History = append(img.History, ocispecs.History{ CreatedBy: msg, Comment: historyComment, EmptyLayer: !withLayer, @@ -1433,7 +1656,7 @@ func withShell(img Image, args []string) []string { return append(shell, strings.Join(args, " ")) } -func autoDetectPlatform(img Image, target specs.Platform, supported []specs.Platform) specs.Platform { +func autoDetectPlatform(img Image, target ocispecs.Platform, supported []ocispecs.Platform) ocispecs.Platform { os := img.OS arch := img.Architecture if target.OS == os && target.Architecture == arch { @@ -1465,13 +1688,13 @@ func processCmdEnv(shlex *shell.Lex, cmd string, env []string) string { return w } -func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *specs.Platform) string { +func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform *ocispecs.Platform, env []string) string { if ds.cmdTotal == 0 { return str } out := "[" if prefixPlatform && platform != nil { - out += platforms.Format(*platform) + " " + out += platforms.Format(*platform) + formatTargetPlatform(*platform, platformFromEnv(env)) + " " } if ds.stageName != "" { out += ds.stageName + " " @@ -1481,6 +1704,62 @@ func prefixCommand(ds *dispatchState, str string, prefixPlatform bool, platform return out + str } +// formatTargetPlatform formats a secondary platform string for cross compilation cases +func formatTargetPlatform(base ocispecs.Platform, target *ocispecs.Platform) string { + if target == nil { + return "" + } + if target.OS == "" { + target.OS = base.OS + } + if target.Architecture == "" { + target.Architecture = base.Architecture + } + p := platforms.Normalize(*target) + + if p.OS == base.OS && p.Architecture != base.Architecture { + archVariant := p.Architecture + if p.Variant != "" { + archVariant += "/" + p.Variant + } + return "->" + archVariant + } + if p.OS != base.OS { + return "->" + platforms.Format(p) + } + return "" +} + +// platformFromEnv returns defined platforms based on TARGET* environment variables +func platformFromEnv(env []string) *ocispecs.Platform { + var p ocispecs.Platform + var set bool + for _, v := range env { + parts := strings.SplitN(v, "=", 2) + switch parts[0] { + case "TARGETPLATFORM": + p, err := platforms.Parse(parts[1]) + if err != nil { + continue + } + return &p + case "TARGETOS": + p.OS = parts[1] + set = true + case "TARGETARCH": + p.Architecture = parts[1] + set = true + case "TARGETVARIANT": + p.Variant = parts[1] + set = true + } + } + if !set { + return nil + } + return &p +} + func useFileOp(args map[string]string, caps *apicaps.CapSet) bool { enabled := true if v, ok := args["BUILDKIT_DISABLE_FILEOP"]; ok { @@ -1507,3 +1786,24 @@ func location(sm *llb.SourceMap, locations []parser.Range) llb.ConstraintsOpt { } return sm.Location(loc) } + +func summarizeHeredoc(doc string) string { + doc = strings.TrimSpace(doc) + lines := strings.Split(strings.ReplaceAll(doc, "\r\n", "\n"), "\n") + summary := lines[0] + if len(lines) > 1 { + summary += "..." + } + return summary +} + +func commonImageNames() []string { + repos := []string{ + "alpine", "busybox", "centos", "debian", "golang", "ubuntu", "fedora", + } + out := make([]string, 0, len(repos)*4) + for _, name := range repos { + out = append(out, name, "docker.io/library"+name, name+":latest", "docker.io/library"+name+":latest") + } + return out +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go deleted file mode 100644 index 300b9d85f1..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunnetwork.go +++ /dev/null @@ -1,12 +0,0 @@ -// +build !dfrunnetwork - -package dockerfile2llb - -import ( - "github.com/moby/buildkit/client/llb" - "github.com/moby/buildkit/frontend/dockerfile/instructions" -) - -func dispatchRunNetwork(c *instructions.RunCommand) (llb.RunOption, error) { - return nil, nil -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go index 10184b42e0..f2583ddef2 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_norunsecurity.go @@ -1,3 +1,4 @@ +//go:build !dfrunsecurity // +build !dfrunsecurity package dockerfile2llb diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go index 87589860fe..7777fba91a 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runmount.go @@ -95,7 +95,9 @@ func dispatchRunMounts(d *dispatchState, c *instructions.RunCommand, sources []* var mountOpts []llb.MountOption if mount.Type == instructions.MountTypeTmpfs { st = llb.Scratch() - mountOpts = append(mountOpts, llb.Tmpfs()) + mountOpts = append(mountOpts, llb.Tmpfs( + llb.TmpfsSize(mount.SizeLimit), + )) } if mount.Type == instructions.MountTypeSecret { secret, err := dispatchSecret(mount) diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go index 01313e23be..e09153560a 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runnetwork.go @@ -1,5 +1,3 @@ -// +build dfrunnetwork - package dockerfile2llb import ( diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go index 764424a2c1..55c675e797 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/convert_runsecurity.go @@ -1,3 +1,4 @@ +//go:build dfrunsecurity // +build dfrunsecurity package dockerfile2llb diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go index 6eb4cf6e7a..d4c82700e3 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/image.go @@ -5,7 +5,7 @@ import ( "github.com/docker/docker/api/types/strslice" "github.com/moby/buildkit/util/system" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) // HealthConfig holds configuration settings for the HEALTHCHECK feature. @@ -31,7 +31,7 @@ type HealthConfig struct { // ImageConfig is a docker compatible config for an image type ImageConfig struct { - specs.ImageConfig + ocispecs.ImageConfig Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific) @@ -46,7 +46,7 @@ type ImageConfig struct { // Image is the JSON structure which describes some basic information about the image. // This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON. type Image struct { - specs.Image + ocispecs.Image // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` @@ -64,9 +64,9 @@ func clone(src Image) Image { return img } -func emptyImage(platform specs.Platform) Image { +func emptyImage(platform ocispecs.Platform) Image { img := Image{ - Image: specs.Image{ + Image: ocispecs.Image{ Architecture: platform.Architecture, OS: platform.OS, }, diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go index e1ef78f8bd..bf8f639c72 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/dockerfile2llb/platform.go @@ -3,12 +3,12 @@ package dockerfile2llb import ( "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/frontend/dockerfile/instructions" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) type platformOpt struct { - targetPlatform specs.Platform - buildPlatforms []specs.Platform + targetPlatform ocispecs.Platform + buildPlatforms []ocispecs.Platform implicitTarget bool } @@ -18,10 +18,10 @@ func buildPlatformOpt(opt *ConvertOpt) *platformOpt { implicitTargetPlatform := false if opt.TargetPlatform != nil && opt.BuildPlatforms == nil { - buildPlatforms = []specs.Platform{*opt.TargetPlatform} + buildPlatforms = []ocispecs.Platform{*opt.TargetPlatform} } if len(buildPlatforms) == 0 { - buildPlatforms = []specs.Platform{platforms.DefaultSpec()} + buildPlatforms = []ocispecs.Platform{platforms.DefaultSpec()} } if opt.TargetPlatform == nil { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go index 66c53add05..1cfbf76000 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/bflag.go @@ -3,6 +3,8 @@ package instructions import ( "fmt" "strings" + + "github.com/moby/buildkit/util/suggest" ) // FlagType is the type of the build flag @@ -139,12 +141,12 @@ func (bf *BFlags) Parse() error { // go ahead and bubble it back up here since we didn't do it // earlier in the processing if bf.Err != nil { - return fmt.Errorf("Error setting up flags: %s", bf.Err) + return fmt.Errorf("error setting up flags: %s", bf.Err) } for _, arg := range bf.Args { if !strings.HasPrefix(arg, "--") { - return fmt.Errorf("Arg should start with -- : %s", arg) + return fmt.Errorf("arg should start with -- : %s", arg) } if arg == "--" { @@ -162,11 +164,11 @@ func (bf *BFlags) Parse() error { flag, ok := bf.flags[arg] if !ok { - return fmt.Errorf("Unknown flag: %s", arg) + return suggest.WrapError(fmt.Errorf("unknown flag: %s", arg), arg, allFlags(bf.flags), true) } if _, ok = bf.used[arg]; ok && flag.flagType != stringsType { - return fmt.Errorf("Duplicate flag specified: %s", arg) + return fmt.Errorf("duplicate flag specified: %s", arg) } bf.used[arg] = flag @@ -175,7 +177,7 @@ func (bf *BFlags) Parse() error { case boolType: // value == "" is only ok if no "=" was specified if index >= 0 && value == "" { - return fmt.Errorf("Missing a value on flag: %s", arg) + return fmt.Errorf("missing a value on flag: %s", arg) } lower := strings.ToLower(value) @@ -184,26 +186,33 @@ func (bf *BFlags) Parse() error { } else if lower == "true" || lower == "false" { flag.Value = lower } else { - return fmt.Errorf("Expecting boolean value for flag %s, not: %s", arg, value) + return fmt.Errorf("expecting boolean value for flag %s, not: %s", arg, value) } case stringType: if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) + return fmt.Errorf("missing a value on flag: %s", arg) } flag.Value = value case stringsType: if index < 0 { - return fmt.Errorf("Missing a value on flag: %s", arg) + return fmt.Errorf("missing a value on flag: %s", arg) } flag.StringValues = append(flag.StringValues, value) default: panic("No idea what kind of flag we have! Should never get here!") } - } return nil } + +func allFlags(flags map[string]*Flag) []string { + var names []string + for name := range flags { + names = append(names, name) + } + return names +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go index c7d5c49599..48ebf183a9 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands.go @@ -71,11 +71,18 @@ func newWithNameAndCode(req parseRequest) withNameAndCode { // SingleWordExpander is a provider for variable expansion where 1 word => 1 output type SingleWordExpander func(word string) (string, error) -// SupportsSingleWordExpansion interface marks a command as supporting variable expansion +// SupportsSingleWordExpansion interface marks a command as supporting variable +// expansion type SupportsSingleWordExpansion interface { Expand(expander SingleWordExpander) error } +// SupportsSingleWordExpansionRaw interface marks a command as supporting +// variable expansion, while ensuring that quotes are preserved +type SupportsSingleWordExpansionRaw interface { + ExpandRaw(expander SingleWordExpander) error +} + // PlatformSpecific adds platform checks to a command type PlatformSpecific interface { CheckPlatform(platform string) error @@ -180,18 +187,6 @@ type SourcesAndDest struct { } func (s *SourcesAndDest) Expand(expander SingleWordExpander) error { - for i, content := range s.SourceContents { - if !content.Expand { - continue - } - - expandedData, err := expander(content.Data) - if err != nil { - return err - } - s.SourceContents[i].Data = expandedData - } - err := expandSliceInPlace(s.SourcePaths, expander) if err != nil { return err @@ -206,6 +201,21 @@ func (s *SourcesAndDest) Expand(expander SingleWordExpander) error { return nil } +func (s *SourcesAndDest) ExpandRaw(expander SingleWordExpander) error { + for i, content := range s.SourceContents { + if !content.Expand { + continue + } + + expandedData, err := expander(content.Data) + if err != nil { + return err + } + s.SourceContents[i].Data = expandedData + } + return nil +} + // AddCommand : ADD foo /path // // Add the file 'foo' to '/path'. Tarball and Remote URL (http, https) handling @@ -216,6 +226,7 @@ type AddCommand struct { SourcesAndDest Chown string Chmod string + Link bool } // Expand variables @@ -239,6 +250,7 @@ type CopyCommand struct { From string Chown string Chmod string + Link bool } // Expand variables diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go index 2499677f4b..517ded7d67 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runmount.go @@ -6,6 +6,8 @@ import ( "strconv" "strings" + dockeropts "github.com/docker/docker/opts" + "github.com/moby/buildkit/util/suggest" "github.com/pkg/errors" ) @@ -57,6 +59,20 @@ func isValidMountType(s string) bool { return ok } +func allMountTypes() []string { + types := make([]string, 0, len(allowedMountTypes)+2) + for k := range allowedMountTypes { + types = append(types, k) + } + if isSecretMountsSupported() { + types = append(types, "secret") + } + if isSSHMountsSupported() { + types = append(types, "ssh") + } + return types +} + func runMountPreHook(cmd *RunCommand, req parseRequest) error { st := &mountState{} st.flag = req.flags.AddStrings("mount") @@ -108,6 +124,7 @@ type Mount struct { Source string Target string ReadOnly bool + SizeLimit int64 CacheID string CacheSharing string Required bool @@ -132,6 +149,9 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { key := strings.ToLower(parts[0]) if len(parts) == 1 { + if expander == nil { + continue // evaluate later + } switch key { case "readonly", "ro": m.ReadOnly = true @@ -173,10 +193,11 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { // if we don't have an expander, defer evaluation to later continue } + switch key { case "type": if !isValidMountType(strings.ToLower(value)) { - return nil, errors.Errorf("unsupported mount type %q", value) + return nil, suggest.WrapError(errors.Errorf("unsupported mount type %q", value), value, allMountTypes(), true) } m.Type = strings.ToLower(value) case "from": @@ -208,6 +229,16 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } else { return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) } + case "size": + if m.Type == "tmpfs" { + tmpfsSize := new(dockeropts.MemBytes) + if err := tmpfsSize.Set(value); err != nil { + return nil, errors.Errorf("invalid value for %s: %s", key, value) + } + m.SizeLimit = tmpfsSize.Value() + } else { + return nil, errors.Errorf("unexpected key '%s' for mount type '%s'", key, m.Type) + } case "id": m.CacheID = value case "sharing": @@ -234,7 +265,10 @@ func parseMount(value string, expander SingleWordExpander) (*Mount, error) { } m.GID = &gid default: - return nil, errors.Errorf("unexpected key '%s' in '%s'", key, field) + allKeys := []string{ + "type", "from", "source", "target", "readonly", "id", "sharing", "required", "mode", "uid", "gid", "src", "dst", "ro", "rw", "readwrite", + } + return nil, suggest.WrapError(errors.Errorf("unexpected key '%s' in '%s'", key, field), key, allKeys, true) } } diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go index adef3fd7aa..142c3075b5 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runnetwork.go @@ -1,5 +1,3 @@ -// +build dfrunnetwork - package instructions import ( diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go index 0c0be80664..ce243b6d83 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/commands_runsecurity.go @@ -1,3 +1,4 @@ +//go:build dfrunsecurity // +build dfrunsecurity package instructions diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go index 0b03b34cd1..610aed7cc0 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/errors_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package instructions diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go index 7a6c69e383..d3b7326ce2 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/instructions/parse.go @@ -12,6 +12,7 @@ import ( "github.com/docker/docker/api/types/strslice" "github.com/moby/buildkit/frontend/dockerfile/command" "github.com/moby/buildkit/frontend/dockerfile/parser" + "github.com/moby/buildkit/util/suggest" "github.com/pkg/errors" ) @@ -63,7 +64,7 @@ func ParseInstruction(node *parser.Node) (v interface{}, err error) { err = parser.WithLocation(err, node.Location()) }() req := newParseRequestFromNode(node) - switch node.Value { + switch strings.ToLower(node.Value) { case command.Env: return parseEnv(req) case command.Maintainer: @@ -101,8 +102,7 @@ func ParseInstruction(node *parser.Node) (v interface{}, err error) { case command.Shell: return parseShell(req) } - - return nil, &UnknownInstruction{Instruction: node.Value, Line: node.StartLine} + return nil, suggest.WrapError(&UnknownInstructionError{Instruction: node.Value, Line: node.StartLine}, node.Value, allInstructionNames(), false) } // ParseCommand converts an AST to a typed Command @@ -117,14 +117,14 @@ func ParseCommand(node *parser.Node) (Command, error) { return nil, parser.WithLocation(errors.Errorf("%T is not a command type", s), node.Location()) } -// UnknownInstruction represents an error occurring when a command is unresolvable -type UnknownInstruction struct { +// UnknownInstructionError represents an error occurring when a command is unresolvable +type UnknownInstructionError struct { Line int Instruction string } -func (e *UnknownInstruction) Error() string { - return fmt.Sprintf("unknown instruction: %s", strings.ToUpper(e.Instruction)) +func (e *UnknownInstructionError) Error() string { + return fmt.Sprintf("unknown instruction: %s", e.Instruction) } type parseError struct { @@ -133,7 +133,7 @@ type parseError struct { } func (e *parseError) Error() string { - return fmt.Sprintf("dockerfile parse error line %d: %v", e.node.StartLine, e.inner.Error()) + return fmt.Sprintf("dockerfile parse error on line %d: %v", e.node.StartLine, e.inner.Error()) } func (e *parseError) Unwrap() error { @@ -167,7 +167,6 @@ func Parse(ast *parser.Node) (stages []Stage, metaArgs []ArgCommand, err error) default: return nil, nil, parser.WithLocation(errors.Errorf("%T is not a command type", cmd), n.Location()) } - } return stages, metaArgs, nil } @@ -193,7 +192,6 @@ func parseKvps(args []string, cmdName string) (KeyValuePairs, error) { } func parseEnv(req parseRequest) (*EnvCommand, error) { - if err := req.flags.Parse(); err != nil { return nil, err } @@ -222,7 +220,6 @@ func parseMaintainer(req parseRequest) (*MaintainerCommand, error) { } func parseLabel(req parseRequest) (*LabelCommand, error) { - if err := req.flags.Parse(); err != nil { return nil, err } @@ -283,6 +280,7 @@ func parseAdd(req parseRequest) (*AddCommand, error) { } flChown := req.flags.AddString("chown", "") flChmod := req.flags.AddString("chmod", "") + flLink := req.flags.AddBool("link", false) if err := req.flags.Parse(); err != nil { return nil, err } @@ -297,6 +295,7 @@ func parseAdd(req parseRequest) (*AddCommand, error) { SourcesAndDest: *sourcesAndDest, Chown: flChown.Value, Chmod: flChmod.Value, + Link: flLink.Value == "true", }, nil } @@ -307,6 +306,7 @@ func parseCopy(req parseRequest) (*CopyCommand, error) { flChown := req.flags.AddString("chown", "") flFrom := req.flags.AddString("from", "") flChmod := req.flags.AddString("chmod", "") + flLink := req.flags.AddBool("link", false) if err := req.flags.Parse(); err != nil { return nil, err } @@ -322,6 +322,7 @@ func parseCopy(req parseRequest) (*CopyCommand, error) { From: flFrom.Value, Chown: flChown.Value, Chmod: flChmod.Value, + Link: flLink.Value == "true", }, nil } @@ -346,7 +347,6 @@ func parseFrom(req parseRequest) (*Stage, error) { Location: req.location, Comment: getComment(req.comments, stageName), }, nil - } func parseBuildStageName(args []string) (string, error) { @@ -381,11 +381,14 @@ func parseOnBuild(req parseRequest) (*OnbuildCommand, error) { } original := regexp.MustCompile(`(?i)^\s*ONBUILD\s*`).ReplaceAllString(req.original, "") + for _, heredoc := range req.heredocs { + original += "\n" + heredoc.Content + heredoc.Name + } + return &OnbuildCommand{ Expression: original, withNameAndCode: newWithNameAndCode(req), }, nil - } func parseWorkdir(req parseRequest) (*WorkdirCommand, error) { @@ -401,7 +404,6 @@ func parseWorkdir(req parseRequest) (*WorkdirCommand, error) { Path: req.args[0], withNameAndCode: newWithNameAndCode(req), }, nil - } func parseShellDependentCommand(req parseRequest, command string, emptyAsNil bool) (ShellDependantCmdLine, error) { @@ -525,7 +527,6 @@ func parseHealthcheck(req parseRequest) (*HealthCheckCommand, error) { Test: test, } } else { - healthcheck := container.HealthConfig{} flInterval := req.flags.AddString("interval", "") @@ -642,7 +643,6 @@ func parseVolume(req parseRequest) (*VolumeCommand, error) { cmd.Volumes = append(cmd.Volumes, v) } return cmd, nil - } func parseStopSignal(req parseRequest) (*StopSignalCommand, error) { @@ -656,7 +656,6 @@ func parseStopSignal(req parseRequest) (*StopSignalCommand, error) { withNameAndCode: newWithNameAndCode(req), } return cmd, nil - } func parseArg(req parseRequest) (*ArgCommand, error) { @@ -752,3 +751,13 @@ func getComment(comments []string, name string) string { } return "" } + +func allInstructionNames() []string { + out := make([]string, len(command.Commands)) + i := 0 + for name := range command.Commands { + out[i] = strings.ToUpper(name) + i++ + } + return out +} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go index 083c75f89f..53165e0a48 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go @@ -50,7 +50,7 @@ func (node *Node) Location() []Range { // Returns a string suitable for printing. func (node *Node) Dump() string { str := "" - str += node.Value + str += strings.ToLower(node.Value) if len(node.Flags) > 0 { str += fmt.Sprintf(" %q", node.Flags) @@ -77,10 +77,17 @@ func (node *Node) lines(start, end int) { } func (node *Node) canContainHeredoc() bool { - if _, allowedDirective := heredocDirectives[node.Value]; !allowedDirective { + // check for compound commands, like ONBUILD + if ok := heredocCompoundDirectives[strings.ToLower(node.Value)]; ok { + if node.Next != nil && len(node.Next.Children) > 0 { + node = node.Next.Children[0] + } + } + + if ok := heredocDirectives[strings.ToLower(node.Value)]; !ok { return false } - if _, isJSON := node.Attributes["json"]; isJSON { + if isJSON := node.Attributes["json"]; isJSON { return false } @@ -106,13 +113,12 @@ type Heredoc struct { } var ( - dispatch map[string]func(string, *directives) (*Node, map[string]bool, error) - heredocDirectives map[string]bool - reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) - reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) - reComment = regexp.MustCompile(`^#.*$`) - reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)(['"]?)([a-zA-Z][a-zA-Z0-9]*)(['"]?)$`) - reLeadingTabs = regexp.MustCompile(`(?m)^\t+`) + dispatch map[string]func(string, *directives) (*Node, map[string]bool, error) + reWhitespace = regexp.MustCompile(`[\t\v\f\r ]+`) + reDirectives = regexp.MustCompile(`^#\s*([a-zA-Z][a-zA-Z0-9]*)\s*=\s*(.+?)\s*$`) + reComment = regexp.MustCompile(`^#.*$`) + reHeredoc = regexp.MustCompile(`^(\d*)<<(-?)([^<]*)$`) + reLeadingTabs = regexp.MustCompile(`(?m)^\t+`) ) // DefaultEscapeToken is the default escape token @@ -123,6 +129,20 @@ var validDirectives = map[string]struct{}{ "syntax": {}, } +var ( + // Directives allowed to contain heredocs + heredocDirectives = map[string]bool{ + command.Add: true, + command.Copy: true, + command.Run: true, + } + + // Directives allowed to contain directives containing heredocs + heredocCompoundDirectives = map[string]bool{ + command.Onbuild: true, + } +) + // directive is the structure used during a build run to hold the state of // parsing directives. type directives struct { @@ -234,7 +254,7 @@ func newNodeFromLine(line string, d *directives, comments []string) (*Node, erro return nil, err } - fn := dispatch[cmd] + fn := dispatch[strings.ToLower(cmd)] // Ignore invalid Dockerfile instructions if fn == nil { fn = parseIgnore @@ -258,7 +278,14 @@ func newNodeFromLine(line string, d *directives, comments []string) (*Node, erro type Result struct { AST *Node EscapeToken rune - Warnings []string + Warnings []Warning +} + +type Warning struct { + Short string + Detail [][]byte + URL string + Location *Range } // PrintWarnings to the writer @@ -266,7 +293,12 @@ func (r *Result) PrintWarnings(out io.Writer) { if len(r.Warnings) == 0 { return } - fmt.Fprintf(out, strings.Join(r.Warnings, "\n")+"\n") + for _, w := range r.Warnings { + fmt.Fprintf(out, "[WARNING]: %s\n", w.Short) + } + if len(r.Warnings) > 0 { + fmt.Fprintf(out, "[WARNING]: Empty continuation lines will become errors in a future release.\n") + } } // Parse reads lines from a Reader, parses the lines into an AST and returns @@ -277,7 +309,7 @@ func Parse(rwc io.Reader) (*Result, error) { root := &Node{StartLine: -1} scanner := bufio.NewScanner(rwc) scanner.Split(scanLines) - warnings := []string{} + warnings := []Warning{} var comments []string var err error @@ -330,7 +362,12 @@ func Parse(rwc io.Reader) (*Result, error) { } if hasEmptyContinuationLine { - warnings = append(warnings, "[WARNING]: Empty continuation line found in:\n "+line) + warnings = append(warnings, Warning{ + Short: "Empty continuation line found in: " + line, + Detail: [][]byte{[]byte("Empty continuation lines will become errors in a future release")}, + URL: "https://github.com/moby/moby/pull/33719", + Location: &Range{Start: Position{Line: currentLine}, End: Position{Line: currentLine}}, + }) } child, err := newNodeFromLine(line, d, comments) @@ -373,10 +410,6 @@ func Parse(rwc io.Reader) (*Result, error) { comments = nil } - if len(warnings) > 0 { - warnings = append(warnings, "[WARNING]: Empty continuation lines will become errors in a future release.") - } - if root.StartLine < 0 { return nil, withLocation(errors.New("file with no instructions"), currentLine, 0) } @@ -388,30 +421,57 @@ func Parse(rwc io.Reader) (*Result, error) { }, withLocation(handleScannerError(scanner.Err()), currentLine, 0) } +// Extracts a heredoc from a possible heredoc regex match func heredocFromMatch(match []string) (*Heredoc, error) { if len(match) == 0 { return nil, nil } - fileDescriptor, _ := strconv.ParseUint(match[1], 10, 0) + fd, _ := strconv.ParseUint(match[1], 10, 0) chomp := match[2] == "-" - quoteOpen := match[3] - name := match[4] - quoteClose := match[5] + rest := match[3] - expand := true - if quoteOpen != "" || quoteClose != "" { - if quoteOpen != quoteClose { - return nil, errors.New("quoted heredoc quotes do not match") - } - expand = false + if len(rest) == 0 { + return nil, nil } + shlex := shell.NewLex('\\') + shlex.SkipUnsetEnv = true + + // Attempt to parse both the heredoc both with *and* without quotes. + // If there are quotes in one but not the other, then we know that some + // part of the heredoc word is quoted, so we shouldn't expand the content. + shlex.RawQuotes = false + words, err := shlex.ProcessWords(rest, []string{}) + if err != nil { + return nil, err + } + // quick sanity check that rest is a single word + if len(words) != 1 { + return nil, nil + } + + shlex.RawQuotes = true + wordsRaw, err := shlex.ProcessWords(rest, []string{}) + if err != nil { + return nil, err + } + if len(wordsRaw) != len(words) { + return nil, fmt.Errorf("internal lexing of heredoc produced inconsistent results: %s", rest) + } + + word := words[0] + wordQuoteCount := strings.Count(word, `'`) + strings.Count(word, `"`) + wordRaw := wordsRaw[0] + wordRawQuoteCount := strings.Count(wordRaw, `'`) + strings.Count(wordRaw, `"`) + + expand := wordQuoteCount == wordRawQuoteCount + return &Heredoc{ - Name: name, + Name: word, Expand: expand, Chomp: chomp, - FileDescriptor: uint(fileDescriptor), + FileDescriptor: uint(fd), }, nil } @@ -426,6 +486,8 @@ func MustParseHeredoc(src string) *Heredoc { func heredocsFromLine(line string) ([]Heredoc, error) { shlex := shell.NewLex('\\') shlex.RawQuotes = true + shlex.RawEscapes = true + shlex.SkipUnsetEnv = true words, _ := shlex.ProcessWords(line, []string{}) var docs []Heredoc diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_heredoc.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_heredoc.go deleted file mode 100644 index a053b9a2ac..0000000000 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser_heredoc.go +++ /dev/null @@ -1,13 +0,0 @@ -// +build dfheredoc - -package parser - -import "github.com/moby/buildkit/frontend/dockerfile/command" - -func init() { - heredocDirectives = map[string]bool{ - command.Add: true, - command.Copy: true, - command.Run: true, - } -} diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go index 3378167181..c0261652f8 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go @@ -13,7 +13,6 @@ func splitCommand(line string) (string, []string, string, error) { // Make sure we get the same results irrespective of leading/trailing spaces cmdline := reWhitespace.Split(strings.TrimSpace(line), 2) - cmd := strings.ToLower(cmdline[0]) if len(cmdline) == 2 { var err error @@ -23,7 +22,7 @@ func splitCommand(line string) (string, []string, string, error) { } } - return cmd, flags, strings.TrimSpace(args), nil + return cmdline[0], flags, strings.TrimSpace(args), nil } func extractBuilderFlags(line string) (string, []string, error) { diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go index 36903ec58d..bf0887f236 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/equal_env_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package shell diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go index d65913fffb..23ab81f25c 100644 --- a/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go +++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/shell/lex.go @@ -18,9 +18,11 @@ import ( // It doesn't support all flavors of ${xx:...} formats but new ones can // be added by adding code to the "special ${} format processing" section type Lex struct { - escapeToken rune - RawQuotes bool - SkipUnsetEnv bool + escapeToken rune + RawQuotes bool + RawEscapes bool + SkipProcessQuotes bool + SkipUnsetEnv bool } // NewLex creates a new Lex which uses escapeToken to escape quotes. @@ -54,28 +56,47 @@ func (s *Lex) ProcessWordWithMap(word string, env map[string]string) (string, er return word, err } +// ProcessWordWithMatches will use the 'env' list of environment variables, +// replace any env var references in 'word' and return the env that were used. +func (s *Lex) ProcessWordWithMatches(word string, env map[string]string) (string, map[string]struct{}, error) { + sw := s.init(word, env) + word, _, err := sw.process(word) + return word, sw.matches, err +} + func (s *Lex) ProcessWordsWithMap(word string, env map[string]string) ([]string, error) { _, words, err := s.process(word, env) return words, err } -func (s *Lex) process(word string, env map[string]string) (string, []string, error) { +func (s *Lex) init(word string, env map[string]string) *shellWord { sw := &shellWord{ - envs: env, - escapeToken: s.escapeToken, - skipUnsetEnv: s.SkipUnsetEnv, - rawQuotes: s.RawQuotes, + envs: env, + escapeToken: s.escapeToken, + skipUnsetEnv: s.SkipUnsetEnv, + skipProcessQuotes: s.SkipProcessQuotes, + rawQuotes: s.RawQuotes, + rawEscapes: s.RawEscapes, + matches: make(map[string]struct{}), } sw.scanner.Init(strings.NewReader(word)) + return sw +} + +func (s *Lex) process(word string, env map[string]string) (string, []string, error) { + sw := s.init(word, env) return sw.process(word) } type shellWord struct { - scanner scanner.Scanner - envs map[string]string - escapeToken rune - rawQuotes bool - skipUnsetEnv bool + scanner scanner.Scanner + envs map[string]string + escapeToken rune + rawQuotes bool + rawEscapes bool + skipUnsetEnv bool + skipProcessQuotes bool + matches map[string]struct{} } func (sw *shellWord) process(source string) (string, []string, error) { @@ -138,9 +159,11 @@ func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { var words wordsStruct var charFuncMapping = map[rune]func() (string, error){ - '\'': sw.processSingleQuote, - '"': sw.processDoubleQuote, - '$': sw.processDollar, + '$': sw.processDollar, + } + if !sw.skipProcessQuotes { + charFuncMapping['\''] = sw.processSingleQuote + charFuncMapping['"'] = sw.processDoubleQuote } for sw.scanner.Peek() != scanner.EOF { @@ -168,6 +191,11 @@ func (sw *shellWord) processStopOn(stopChar rune) (string, []string, error) { ch = sw.scanner.Next() if ch == sw.escapeToken { + if sw.rawEscapes { + words.addRawChar(ch) + result.WriteRune(ch) + } + // '\' (default escape token, but ` allowed) escapes, except end of line ch = sw.scanner.Next() @@ -260,6 +288,10 @@ func (sw *shellWord) processDoubleQuote() (string, error) { default: ch := sw.scanner.Next() if ch == sw.escapeToken { + if sw.rawEscapes { + result.WriteRune(ch) + } + switch sw.scanner.Peek() { case scanner.EOF: // Ignore \ at end of word @@ -439,6 +471,7 @@ func isSpecialParam(char rune) bool { func (sw *shellWord) getEnv(name string) (string, bool) { for key, value := range sw.envs { if EqualEnvKeys(name, key) { + sw.matches[name] = struct{}{} return value, true } } diff --git a/vendor/github.com/moby/buildkit/frontend/frontend.go b/vendor/github.com/moby/buildkit/frontend/frontend.go index 091813a097..dedda54c61 100644 --- a/vendor/github.com/moby/buildkit/frontend/frontend.go +++ b/vendor/github.com/moby/buildkit/frontend/frontend.go @@ -17,8 +17,11 @@ type Frontend interface { type FrontendLLBBridge interface { Solve(ctx context.Context, req SolveRequest, sid string) (*Result, error) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error) + Warn(ctx context.Context, dgst digest.Digest, msg string, opts WarnOpts) error } type SolveRequest = gw.SolveRequest type CacheOptionsEntry = gw.CacheOptionsEntry + +type WarnOpts = gw.WarnOpts diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go index 09d86cad82..61bc018ff5 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/client/client.go @@ -3,12 +3,13 @@ package client import ( "context" "io" + "syscall" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" fstypes "github.com/tonistiigi/fsutil/types" ) @@ -18,6 +19,7 @@ type Client interface { BuildOpts() BuildOpts Inputs(ctx context.Context) (map[string]llb.State, error) NewContainer(ctx context.Context, req NewContainerRequest) (Container, error) + Warn(ctx context.Context, dgst digest.Digest, msg string, opts WarnOpts) error } // NewContainerRequest encapsulates the requirements for a client to define a @@ -25,6 +27,7 @@ type Client interface { type NewContainerRequest struct { Mounts []Mount NetMode pb.NetMode + ExtraHosts []*pb.HostIP Platform *pb.Platform Constraints *pb.WorkerConstraints } @@ -74,7 +77,7 @@ type WinSize struct { type ContainerProcess interface { Wait() error Resize(ctx context.Context, size WinSize) error - // TODO Signal(ctx context.Context, sig os.Signal) + Signal(ctx context.Context, sig syscall.Signal) error } type Reference interface { @@ -121,7 +124,7 @@ type CacheOptionsEntry struct { type WorkerInfo struct { ID string Labels map[string]string - Platforms []specs.Platform + Platforms []ocispecs.Platform } type BuildOpts struct { @@ -132,3 +135,11 @@ type BuildOpts struct { LLBCaps apicaps.CapSet Caps apicaps.CapSet } + +type WarnOpts struct { + Level int + SourceInfo *pb.SourceInfo + Range []*pb.Range + Detail [][]byte + URL string +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/container.go b/vendor/github.com/moby/buildkit/frontend/gateway/container.go index f234401f20..824a503ffb 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/container.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/container.go @@ -8,6 +8,9 @@ import ( "sort" "strings" "sync" + "syscall" + + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/executor" @@ -20,13 +23,13 @@ import ( utilsystem "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/worker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/sync/errgroup" ) type NewContainerRequest struct { ContainerID string NetMode opspb.NetMode + ExtraHosts []executor.HostIP Mounts []Mount Platform *opspb.Platform Constraints *opspb.WorkerConstraints @@ -51,13 +54,14 @@ func NewContainer(ctx context.Context, w worker.Worker, sm *session.Manager, g s platform = *req.Platform } ctr := &gatewayContainer{ - id: req.ContainerID, - netMode: req.NetMode, - platform: platform, - executor: w.Executor(), - errGroup: eg, - ctx: ctx, - cancel: cancel, + id: req.ContainerID, + netMode: req.NetMode, + extraHosts: req.ExtraHosts, + platform: platform, + executor: w.Executor(), + errGroup: eg, + ctx: ctx, + cancel: cancel, } var ( @@ -75,14 +79,13 @@ func NewContainer(ctx context.Context, w worker.Worker, sm *session.Manager, g s } name := fmt.Sprintf("container %s", req.ContainerID) - mm := mounts.NewMountManager(name, w.CacheManager(), sm, w.MetadataStore()) + mm := mounts.NewMountManager(name, w.CacheManager(), sm) p, err := PrepareMounts(ctx, mm, w.CacheManager(), g, "", mnts, refs, func(m *opspb.Mount, ref cache.ImmutableRef) (cache.MutableRef, error) { cm := w.CacheManager() if m.Input != opspb.Empty { cm = refs[m.Input].Worker.CacheManager() } return cm.New(ctx, ref, g) - }) if err != nil { for i := len(p.Actives) - 1; i >= 0; i-- { // call in LIFO order @@ -208,7 +211,7 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage } case opspb.MountType_TMPFS: - mountable = mm.MountableTmpFS() + mountable = mm.MountableTmpFS(m) case opspb.MountType_SECRET: var err error mountable, err = mm.MountableSecret(ctx, m, g) @@ -275,22 +278,24 @@ func PrepareMounts(ctx context.Context, mm *mounts.MountManager, cm cache.Manage } type gatewayContainer struct { - id string - netMode opspb.NetMode - platform opspb.Platform - rootFS executor.Mount - mounts []executor.Mount - executor executor.Executor - started bool - errGroup *errgroup.Group - mu sync.Mutex - cleanup []func() error - ctx context.Context - cancel func() + id string + netMode opspb.NetMode + extraHosts []executor.HostIP + platform opspb.Platform + rootFS executor.Mount + mounts []executor.Mount + executor executor.Executor + started bool + errGroup *errgroup.Group + mu sync.Mutex + cleanup []func() error + ctx context.Context + cancel func() } func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartRequest) (client.ContainerProcess, error) { resize := make(chan executor.WinSize) + signal := make(chan syscall.Signal) procInfo := executor.ProcessInfo{ Meta: executor.Meta{ Args: req.Args, @@ -299,12 +304,14 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques Cwd: req.Cwd, Tty: req.Tty, NetMode: gwCtr.netMode, + ExtraHosts: gwCtr.extraHosts, SecurityMode: req.SecurityMode, }, Stdin: req.Stdin, Stdout: req.Stdout, Stderr: req.Stderr, Resize: resize, + Signal: signal, } if procInfo.Meta.Cwd == "" { procInfo.Meta.Cwd = "/" @@ -324,6 +331,7 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques eg, ctx := errgroup.WithContext(gwCtr.ctx) gwProc := &gatewayContainerProcess{ resize: resize, + signal: signal, errGroup: eg, groupCtx: ctx, } @@ -331,7 +339,7 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques if !started { startedCh := make(chan struct{}) gwProc.errGroup.Go(func() error { - logrus.Debugf("Starting new container for %s with args: %q", gwCtr.id, procInfo.Meta.Args) + bklog.G(gwCtr.ctx).Debugf("Starting new container for %s with args: %q", gwCtr.id, procInfo.Meta.Args) err := gwCtr.executor.Run(ctx, gwCtr.id, gwCtr.rootFS, gwCtr.mounts, procInfo, startedCh) return stack.Enable(err) }) @@ -341,7 +349,7 @@ func (gwCtr *gatewayContainer) Start(ctx context.Context, req client.StartReques } } else { gwProc.errGroup.Go(func() error { - logrus.Debugf("Execing into container %s with args: %q", gwCtr.id, procInfo.Meta.Args) + bklog.G(gwCtr.ctx).Debugf("Execing into container %s with args: %q", gwCtr.id, procInfo.Meta.Args) err := gwCtr.executor.Exec(ctx, gwCtr.id, procInfo) return stack.Enable(err) }) @@ -374,6 +382,7 @@ type gatewayContainerProcess struct { errGroup *errgroup.Group groupCtx context.Context resize chan<- executor.WinSize + signal chan<- syscall.Signal mu sync.Mutex } @@ -382,6 +391,7 @@ func (gwProc *gatewayContainerProcess) Wait() error { gwProc.mu.Lock() defer gwProc.mu.Unlock() close(gwProc.resize) + close(gwProc.signal) return err } @@ -389,7 +399,7 @@ func (gwProc *gatewayContainerProcess) Resize(ctx context.Context, size client.W gwProc.mu.Lock() defer gwProc.mu.Unlock() - // is the container done or should we proceed with sending event? + // is the container done or should we proceed with sending event? select { case <-gwProc.groupCtx.Done(): return nil @@ -410,6 +420,31 @@ func (gwProc *gatewayContainerProcess) Resize(ctx context.Context, size client.W return nil } +func (gwProc *gatewayContainerProcess) Signal(ctx context.Context, sig syscall.Signal) error { + gwProc.mu.Lock() + defer gwProc.mu.Unlock() + + // is the container done or should we proceed with sending event? + select { + case <-gwProc.groupCtx.Done(): + return nil + case <-ctx.Done(): + return nil + default: + } + + // now we select on contexts again in case p.signal blocks b/c + // container no longer reading from it. In that case when + // the errgroup finishes we want to unblock on the write + // and exit + select { + case <-gwProc.groupCtx.Done(): + case <-ctx.Done(): + case gwProc.signal <- sig: + } + return nil +} + func addDefaultEnvvar(env []string, k, v string) []string { for _, e := range env { if strings.HasPrefix(e, k+"=") { diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go index 1476ae69a8..2964ad02bd 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/forwarder/forward.go @@ -19,6 +19,7 @@ import ( opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" "github.com/moby/buildkit/worker" + digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" fstypes "github.com/tonistiigi/fsutil/types" "golang.org/x/sync/errgroup" @@ -216,6 +217,10 @@ func (c *bridgeClient) discard(err error) { } } +func (c *bridgeClient) Warn(ctx context.Context, dgst digest.Digest, msg string, opts client.WarnOpts) error { + return c.FrontendLLBBridge.Warn(ctx, dgst, msg, opts) +} + func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainerRequest) (client.Container, error) { ctrReq := gateway.NewContainerRequest{ ContainerID: identity.NewID(), @@ -272,6 +277,11 @@ func (c *bridgeClient) NewContainer(ctx context.Context, req client.NewContainer return nil, err } + ctrReq.ExtraHosts, err = gateway.ParseExtraHosts(req.ExtraHosts) + if err != nil { + return nil, err + } + w, err := c.workers.GetDefault() if err != nil { return nil, err diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go index 1e6e486906..842e3252f3 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/gateway.go @@ -7,12 +7,16 @@ import ( "io" "net" "os" + "path/filepath" "strconv" "strings" "sync" + "syscall" "time" + "github.com/containerd/containerd/mount" "github.com/docker/distribution/reference" + "github.com/docker/docker/pkg/idtools" "github.com/gogo/googleapis/google/rpc" gogotypes "github.com/gogo/protobuf/types" "github.com/golang/protobuf/ptypes/any" @@ -25,23 +29,25 @@ import ( "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" gwclient "github.com/moby/buildkit/frontend/gateway/client" - gwerrdefs "github.com/moby/buildkit/frontend/gateway/errdefs" pb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/errdefs" llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/buildkit/util/stack" "github.com/moby/buildkit/util/tracing" "github.com/moby/buildkit/worker" - "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/moby/sys/signal" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "golang.org/x/net/http2" "golang.org/x/sync/errgroup" spb "google.golang.org/genproto/googleapis/rpc/status" @@ -84,11 +90,13 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten } _, isDevel := opts[keyDevel] - var img specs.Image + var img ocispecs.Image var mfstDigest digest.Digest var rootFS cache.MutableRef var readonly bool // TODO: try to switch to read-only by default. + var frontendDef *opspb.Definition + if isDevel { devRes, err := llbBridge.Solve(ctx, frontend.SolveRequest{ @@ -107,10 +115,12 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten if devRes.Ref == nil { return nil, errors.Errorf("development gateway didn't return default result") } + frontendDef = devRes.Ref.Definition() res, err := devRes.Ref.Result(ctx) if err != nil { return nil, err } + workerRef, ok := res.Sys().(*worker.WorkerRef) if !ok { return nil, errors.Errorf("invalid ref: %T", res.Sys()) @@ -170,8 +180,8 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten }() if res.Ref == nil { return nil, errors.Errorf("gateway source didn't return default result") - } + frontendDef = res.Ref.Definition() r, err := res.Ref.Result(ctx) if err != nil { return nil, err @@ -253,7 +263,19 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten return nil, err } - err = w.Executor().Run(ctx, "", mountWithSession(rootFS, session.NewGroup(sid)), nil, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) + mdmnt, release, err := metadataMount(frontendDef) + if err != nil { + return nil, err + } + if release != nil { + defer release() + } + var mnts []executor.Mount + if mdmnt != nil { + mnts = append(mnts, *mdmnt) + } + + err = w.Executor().Run(ctx, "", mountWithSession(rootFS, session.NewGroup(sid)), mnts, executor.ProcessInfo{Meta: meta, Stdin: lbf.Stdin, Stdout: lbf.Stdout, Stderr: os.Stderr}, nil) if err != nil { if errdefs.IsCanceled(err) && lbf.isErrServerClosed { @@ -273,6 +295,53 @@ func (gf *gatewayFrontend) Solve(ctx context.Context, llbBridge frontend.Fronten return lbf.Result() } +func metadataMount(def *opspb.Definition) (*executor.Mount, func(), error) { + dt, err := def.Marshal() + if err != nil { + return nil, nil, err + } + dir, err := os.MkdirTemp("", "buildkit-metadata") + if err != nil { + return nil, nil, err + } + + if err := os.WriteFile(filepath.Join(dir, "frontend.bin"), dt, 0400); err != nil { + return nil, nil, err + } + + return &executor.Mount{ + Src: &bind{dir}, + Dest: "/run/config/buildkit/metadata", + Readonly: true, + }, func() { + os.RemoveAll(dir) + }, nil +} + +type bind struct { + dir string +} + +func (b *bind) Mount(ctx context.Context, readonly bool) (snapshot.Mountable, error) { + return &bindMount{b.dir, readonly}, nil +} + +type bindMount struct { + dir string + readonly bool +} + +func (b *bindMount) Mount() ([]mount.Mount, func() error, error) { + return []mount.Mount{{ + Type: "bind", + Source: b.dir, + Options: []string{"bind", "ro"}, + }}, func() error { return nil }, nil +} +func (b *bindMount) IdentityMapping() *idtools.IdentityMapping { + return nil +} + func (lbf *llbBridgeForwarder) Discard() { lbf.mu.Lock() defer lbf.mu.Unlock() @@ -460,9 +529,9 @@ type llbBridgeForwarder struct { func (lbf *llbBridgeForwarder) ResolveImageConfig(ctx context.Context, req *pb.ResolveImageConfigRequest) (*pb.ResolveImageConfigResponse, error) { ctx = tracing.ContextWithSpanFromContext(ctx, lbf.callCtx) - var platform *specs.Platform + var platform *ocispecs.Platform if p := req.Platform; p != nil { - platform = &specs.Platform{ + platform = &ocispecs.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, @@ -595,6 +664,16 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { + dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), ref.BuildSources()) + if err != nil { + return nil, err + } + if dtbi != nil && len(dtbi) > 0 { + if pbRes.Metadata == nil { + pbRes.Metadata = make(map[string][]byte) + } + pbRes.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k)] = dtbi + } lbf.refs[id] = ref } ids[k] = id @@ -618,6 +697,16 @@ func (lbf *llbBridgeForwarder) Solve(ctx context.Context, req *pb.SolveRequest) if ref == nil { id = "" } else { + dtbi, err := buildinfo.Encode(ctx, pbRes.Metadata, exptypes.ExporterBuildInfo, ref.BuildSources()) + if err != nil { + return nil, err + } + if dtbi != nil && len(dtbi) > 0 { + if pbRes.Metadata == nil { + pbRes.Metadata = make(map[string][]byte) + } + pbRes.Metadata[exptypes.ExporterBuildInfo] = dtbi + } def = ref.Definition() lbf.refs[id] = ref } @@ -669,7 +758,7 @@ func (lbf *llbBridgeForwarder) getImmutableRef(ctx context.Context, id, path str return nil, errors.Errorf("no such ref: %v", id) } if ref == nil { - return nil, errors.Wrapf(os.ErrNotExist, "%s not found", path) + return nil, errors.Wrap(os.ErrNotExist, path) } r, err := ref.Result(ctx) @@ -760,7 +849,6 @@ func (lbf *llbBridgeForwarder) StatFile(ctx context.Context, req *pb.StatFileReq } func (lbf *llbBridgeForwarder) Ping(context.Context, *pb.PingRequest) (*pb.PongResponse, error) { - workers := lbf.workers.WorkerInfos() pbWorkers := make([]*apitypes.WorkerRecord, 0, len(workers)) for _, w := range workers { @@ -834,7 +922,7 @@ func (lbf *llbBridgeForwarder) Inputs(ctx context.Context, in *pb.InputsRequest) } func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewContainerRequest) (_ *pb.NewContainerResponse, err error) { - logrus.Debugf("|<--- NewContainer %s", in.ContainerID) + bklog.G(ctx).Debugf("|<--- NewContainer %s", in.ContainerID) ctrReq := NewContainerRequest{ ContainerID: in.ContainerID, NetMode: in.Network, @@ -863,7 +951,6 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta return nil, errors.Errorf("invalid reference %T", res.Sys()) } } - } ctrReq.Mounts = append(ctrReq.Mounts, Mount{ WorkerRef: workerRef, @@ -888,6 +975,11 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta return nil, stack.Enable(err) } + ctrReq.ExtraHosts, err = ParseExtraHosts(in.ExtraHosts) + if err != nil { + return nil, stack.Enable(err) + } + ctr, err := NewContainer(context.Background(), w, lbf.sm, group, ctrReq) if err != nil { return nil, stack.Enable(err) @@ -909,7 +1001,7 @@ func (lbf *llbBridgeForwarder) NewContainer(ctx context.Context, in *pb.NewConta } func (lbf *llbBridgeForwarder) ReleaseContainer(ctx context.Context, in *pb.ReleaseContainerRequest) (*pb.ReleaseContainerResponse, error) { - logrus.Debugf("|<--- ReleaseContainer %s", in.ContainerID) + bklog.G(ctx).Debugf("|<--- ReleaseContainer %s", in.ContainerID) lbf.ctrsMu.Lock() ctr, ok := lbf.ctrs[in.ContainerID] delete(lbf.ctrs, in.ContainerID) @@ -921,10 +1013,25 @@ func (lbf *llbBridgeForwarder) ReleaseContainer(ctx context.Context, in *pb.Rele return &pb.ReleaseContainerResponse{}, stack.Enable(err) } +func (lbf *llbBridgeForwarder) Warn(ctx context.Context, in *pb.WarnRequest) (*pb.WarnResponse, error) { + err := lbf.llbBridge.Warn(ctx, in.Digest, string(in.Short), frontend.WarnOpts{ + Level: int(in.Level), + SourceInfo: in.Info, + Range: in.Ranges, + Detail: in.Detail, + URL: in.Url, + }) + if err != nil { + return nil, err + } + return &pb.WarnResponse{}, nil +} + type processIO struct { id string mu sync.Mutex resize func(context.Context, gwclient.WinSize) error + signal func(context.Context, syscall.Signal) error done chan struct{} doneOnce sync.Once // these track the process side of the io pipe for @@ -1024,7 +1131,7 @@ type outputWriter struct { } func (w *outputWriter) Write(msg []byte) (int, error) { - logrus.Debugf("|---> File Message %s, fd=%d, %d bytes", w.processID, w.fd, len(msg)) + bklog.G(w.stream.Context()).Debugf("|---> File Message %s, fd=%d, %d bytes", w.processID, w.fd, len(msg)) err := w.stream.Send(&pb.ExecMessage{ ProcessID: w.processID, Input: &pb.ExecMessage_File{ @@ -1054,15 +1161,17 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e } switch m := execMsg.GetInput().(type) { case *pb.ExecMessage_Init: - logrus.Debugf("|<--- Init Message %s", execMsg.ProcessID) + bklog.G(ctx).Debugf("|<--- Init Message %s", execMsg.ProcessID) case *pb.ExecMessage_File: if m.File.EOF { - logrus.Debugf("|<--- File Message %s, fd=%d, EOF", execMsg.ProcessID, m.File.Fd) + bklog.G(ctx).Debugf("|<--- File Message %s, fd=%d, EOF", execMsg.ProcessID, m.File.Fd) } else { - logrus.Debugf("|<--- File Message %s, fd=%d, %d bytes", execMsg.ProcessID, m.File.Fd, len(m.File.Data)) + bklog.G(ctx).Debugf("|<--- File Message %s, fd=%d, %d bytes", execMsg.ProcessID, m.File.Fd, len(m.File.Data)) } case *pb.ExecMessage_Resize: - logrus.Debugf("|<--- Resize Message %s", execMsg.ProcessID) + bklog.G(ctx).Debugf("|<--- Resize Message %s", execMsg.ProcessID) + case *pb.ExecMessage_Signal: + bklog.G(ctx).Debugf("|<--- Signal Message %s: %s", execMsg.ProcessID, m.Signal.Name) } select { case <-ctx.Done(): @@ -1115,6 +1224,15 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e Cols: resize.Cols, Rows: resize.Rows, }) + } else if sig := execMsg.GetSignal(); sig != nil { + if !pioFound { + return stack.Enable(status.Errorf(codes.NotFound, "IO for process %q not found", pid)) + } + syscallSignal, ok := signal.SignalMap[sig.Name] + if !ok { + return stack.Enable(status.Errorf(codes.InvalidArgument, "Unknown signal %s", sig.Name)) + } + pio.signal(ctx, syscallSignal) } else if init := execMsg.GetInit(); init != nil { if pioFound { return stack.Enable(status.Errorf(codes.AlreadyExists, "Process %s already exists", pid)) @@ -1134,23 +1252,25 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e pios[pid] = pio proc, err := ctr.Start(initCtx, gwclient.StartRequest{ - Args: init.Meta.Args, - Env: init.Meta.Env, - User: init.Meta.User, - Cwd: init.Meta.Cwd, - Tty: init.Tty, - Stdin: pio.processReaders[0], - Stdout: pio.processWriters[1], - Stderr: pio.processWriters[2], + Args: init.Meta.Args, + Env: init.Meta.Env, + User: init.Meta.User, + Cwd: init.Meta.Cwd, + Tty: init.Tty, + Stdin: pio.processReaders[0], + Stdout: pio.processWriters[1], + Stderr: pio.processWriters[2], + SecurityMode: init.Security, }) if err != nil { return stack.Enable(err) } pio.resize = proc.Resize + pio.signal = proc.Signal eg.Go(func() error { <-pio.done - logrus.Debugf("|---> Done Message %s", pid) + bklog.G(ctx).Debugf("|---> Done Message %s", pid) err := srv.Send(&pb.ExecMessage{ ProcessID: pid, Input: &pb.ExecMessage_Done{ @@ -1167,10 +1287,10 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e err := proc.Wait() var statusCode uint32 - var exitError *gwerrdefs.ExitError + var exitError *pb.ExitError var statusError *rpc.Status if err != nil { - statusCode = gwerrdefs.UnknownExitStatus + statusCode = pb.UnknownExitStatus st, _ := status.FromError(grpcerrors.ToGRPC(err)) stp := st.Proto() statusError = &rpc.Status{ @@ -1182,7 +1302,7 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e if errors.As(err, &exitError) { statusCode = exitError.ExitCode } - logrus.Debugf("|---> Exit Message %s, code=%d, error=%s", pid, statusCode, err) + bklog.G(ctx).Debugf("|---> Exit Message %s, code=%d, error=%s", pid, statusCode, err) sendErr := srv.Send(&pb.ExecMessage{ ProcessID: pid, Input: &pb.ExecMessage_Exit{ @@ -1207,7 +1327,7 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e return stack.Enable(err) }) - logrus.Debugf("|---> Started Message %s", pid) + bklog.G(ctx).Debugf("|---> Started Message %s", pid) err = srv.Send(&pb.ExecMessage{ ProcessID: pid, Input: &pb.ExecMessage_Started{ @@ -1246,7 +1366,7 @@ func (lbf *llbBridgeForwarder) ExecProcess(srv pb.LLBBridge_ExecProcessServer) e return stack.Enable(err) } // no error so must be EOF - logrus.Debugf("|---> File Message %s, fd=%d, EOF", pid, fd) + bklog.G(ctx).Debugf("|---> File Message %s, fd=%d, EOF", pid, fd) err = srv.Send(&pb.ExecMessage{ ProcessID: pid, Input: &pb.ExecMessage_File{ @@ -1288,7 +1408,7 @@ func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { <-ctx.Done() conn.Close() }() - logrus.Debugf("serving grpc connection") + bklog.G(ctx).Debugf("serving grpc connection") (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go index e8a370b7a4..d8e2799ff0 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go @@ -9,6 +9,7 @@ import ( "os" "strings" "sync" + "syscall" "time" "github.com/gogo/googleapis/google/rpc" @@ -16,20 +17,21 @@ import ( "github.com/golang/protobuf/ptypes/any" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/frontend/gateway/client" - "github.com/moby/buildkit/frontend/gateway/errdefs" pb "github.com/moby/buildkit/frontend/gateway/pb" "github.com/moby/buildkit/identity" opspb "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/apicaps" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/grpcerrors" + "github.com/moby/sys/signal" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" fstypes "github.com/tonistiigi/fsutil/types" "golang.org/x/sync/errgroup" spb "google.golang.org/genproto/googleapis/rpc/status" "google.golang.org/grpc" "google.golang.org/grpc/codes" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/status" ) @@ -298,6 +300,19 @@ func (c *grpcClient) requestForRef(ref client.Reference) (*pb.SolveRequest, erro return req, nil } +func (c *grpcClient) Warn(ctx context.Context, dgst digest.Digest, msg string, opts client.WarnOpts) error { + _, err := c.client.Warn(ctx, &pb.WarnRequest{ + Digest: dgst, + Level: int64(opts.Level), + Short: []byte(msg), + Info: opts.SourceInfo, + Ranges: opts.Range, + Detail: opts.Detail, + Url: opts.URL, + }) + return err +} + func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *client.Result, err error) { if creq.Definition != nil { for _, md := range creq.Definition.Metadata { @@ -326,6 +341,24 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res * } } + // these options are added by go client in solve() + if _, ok := creq.FrontendOpt["cache-imports"]; !ok { + if v, ok := c.opts["cache-imports"]; ok { + if creq.FrontendOpt == nil { + creq.FrontendOpt = map[string]string{} + } + creq.FrontendOpt["cache-imports"] = v + } + } + if _, ok := creq.FrontendOpt["cache-from"]; !ok { + if v, ok := c.opts["cache-from"]; ok { + if creq.FrontendOpt == nil { + creq.FrontendOpt = map[string]string{} + } + creq.FrontendOpt["cache-from"] = v + } + } + req := &pb.SolveRequest{ Definition: creq.Definition, Frontend: creq.Frontend, @@ -457,6 +490,27 @@ func (c *grpcClient) BuildOpts() client.BuildOpts { } } +func (c *grpcClient) CurrentFrontend() (*llb.State, error) { + fp := "/run/config/buildkit/metadata/frontend.bin" + if _, err := os.Stat(fp); err != nil { + return nil, nil + } + dt, err := os.ReadFile(fp) + if err != nil { + return nil, err + } + var def opspb.Definition + if err := def.Unmarshal(dt); err != nil { + return nil, err + } + op, err := llb.NewDefinitionOp(&def) + if err != nil { + return nil, err + } + st := llb.NewState(op) + return &st, nil +} + func (c *grpcClient) Inputs(ctx context.Context) (map[string]llb.State, error) { err := c.caps.Supports(pb.CapFrontendInputs) if err != nil { @@ -576,7 +630,7 @@ func (m *messageForwarder) Start() (err error) { if errors.Is(err, io.EOF) || grpcerrors.Code(err) == codes.Canceled { return nil } - logrus.Debugf("|<--- %s", debugMessage(msg)) + bklog.G(m.ctx).Debugf("|<--- %s", debugMessage(msg)) if err != nil { return err @@ -587,7 +641,7 @@ func (m *messageForwarder) Start() (err error) { m.mu.Unlock() if !ok { - logrus.Debugf("Received exec message for unregistered process: %s", msg.String()) + bklog.G(m.ctx).Debugf("Received exec message for unregistered process: %s", msg.String()) continue } msgs.Send(m.ctx, msg) @@ -608,6 +662,8 @@ func debugMessage(msg *pb.ExecMessage) string { return fmt.Sprintf("File Message %s, fd=%d, %d bytes", msg.ProcessID, m.File.Fd, len(m.File.Data)) case *pb.ExecMessage_Resize: return fmt.Sprintf("Resize Message %s", msg.ProcessID) + case *pb.ExecMessage_Signal: + return fmt.Sprintf("Signal Message %s: %s", msg.ProcessID, m.Signal.Name) case *pb.ExecMessage_Started: return fmt.Sprintf("Started Message %s", msg.ProcessID) case *pb.ExecMessage_Exit: @@ -625,7 +681,7 @@ func (m *messageForwarder) Send(msg *pb.ExecMessage) error { if !ok { return errors.Errorf("process %s has ended, not sending message %#v", msg.ProcessID, msg.Input) } - logrus.Debugf("|---> %s", debugMessage(msg)) + bklog.G(m.ctx).Debugf("|---> %s", debugMessage(msg)) return m.stream.Send(msg) } @@ -703,12 +759,14 @@ func (c *grpcClient) NewContainer(ctx context.Context, req client.NewContainerRe }) } - logrus.Debugf("|---> NewContainer %s", id) + bklog.G(ctx).Debugf("|---> NewContainer %s", id) _, err = c.client.NewContainer(ctx, &pb.NewContainerRequest{ ContainerID: id, Mounts: mounts, Platform: req.Platform, Constraints: req.Constraints, + Network: req.NetMode, + ExtraHosts: req.ExtraHosts, }) if err != nil { return nil, err @@ -882,8 +940,8 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien Message: exit.Error.Message, Details: convertGogoAny(exit.Error.Details), })) - if exit.Code != errdefs.UnknownExitStatus { - exitError = &errdefs.ExitError{ExitCode: exit.Code, Err: exitError} + if exit.Code != pb.UnknownExitStatus { + exitError = &pb.ExitError{ExitCode: exit.Code, Err: exitError} } } else if serverDone := msg.GetDone(); serverDone != nil { return exitError @@ -897,7 +955,7 @@ func (ctr *container) Start(ctx context.Context, req client.StartRequest) (clien } func (ctr *container) Release(ctx context.Context) error { - logrus.Debugf("|---> ReleaseContainer %s", ctr.id) + bklog.G(ctx).Debugf("|---> ReleaseContainer %s", ctr.id) _, err := ctr.client.ReleaseContainer(ctx, &pb.ReleaseContainerRequest{ ContainerID: ctr.id, }) @@ -927,6 +985,29 @@ func (ctrProc *containerProcess) Resize(_ context.Context, size client.WinSize) }) } +var sigToName = map[syscall.Signal]string{} + +func init() { + for name, value := range signal.SignalMap { + sigToName[value] = name + } +} + +func (ctrProc *containerProcess) Signal(_ context.Context, sig syscall.Signal) error { + name := sigToName[sig] + if name == "" { + return errors.Errorf("unknown signal %v", sig) + } + return ctrProc.execMsgs.Send(&pb.ExecMessage{ + ProcessID: ctrProc.id, + Input: &pb.ExecMessage_Signal{ + Signal: &pb.SignalMessage{ + Name: name, + }, + }, + }) +} + type reference struct { c *grpcClient id string @@ -1006,7 +1087,7 @@ func grpcClientConn(ctx context.Context) (context.Context, *grpc.ClientConn, err return stdioConn(), nil }) - cc, err := grpc.DialContext(ctx, "localhost", dialOpt, grpc.WithInsecure(), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor)) + cc, err := grpc.DialContext(ctx, "localhost", dialOpt, grpc.WithTransportCredentials(insecure.NewCredentials()), grpc.WithUnaryInterceptor(grpcerrors.UnaryClientInterceptor), grpc.WithStreamInterceptor(grpcerrors.StreamClientInterceptor)) if err != nil { return nil, nil, errors.Wrap(err, "failed to create grpc client") } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go index efddd746d3..c4af39f3f0 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go @@ -1,4 +1,4 @@ -package moby_buildkit_v1_frontend //nolint:golint +package moby_buildkit_v1_frontend //nolint:revive import "github.com/moby/buildkit/util/apicaps" @@ -40,6 +40,14 @@ const ( // containers directly through the gateway CapGatewayExec apicaps.CapID = "gateway.exec" + // CapGatewayExecExtraHosts is the capability to add additional hosts to + // /etc/hosts for containers created via gateway exec. + CapGatewayExecExtraHosts apicaps.CapID = "gateway.exec.extrahosts" + + // CapGatewayExecExtraHosts is the capability to send signals to a process + // created via gateway exec. + CapGatewayExecSignals apicaps.CapID = "gateway.exec.signals" + // CapFrontendCaps can be used to check that frontends define support for certain capabilities CapFrontendCaps apicaps.CapID = "frontend.caps" @@ -47,10 +55,12 @@ const ( // results. This is generally used by the client to return and handle solve // errors. CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate" + + // CapGatewayWarnings is the capability to log warnings from frontend + CapGatewayWarnings apicaps.CapID = "gateway.warnings" ) func init() { - Caps.Init(apicaps.Cap{ ID: CapSolveBase, Enabled: true, @@ -156,6 +166,20 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapGatewayExecExtraHosts, + Name: "gateway exec extra-hosts", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + + Caps.Init(apicaps.Cap{ + ID: CapGatewayExecSignals, + Name: "gateway exec signals", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapFrontendCaps, Name: "frontend capabilities", @@ -169,4 +193,11 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapGatewayWarnings, + Name: "logging warnings", + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/errdefs/exit.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go similarity index 63% rename from vendor/github.com/moby/buildkit/frontend/gateway/errdefs/exit.go rename to vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go index f98a148de4..ec012f615c 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/errdefs/exit.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/exit.go @@ -1,6 +1,11 @@ -package errdefs +package moby_buildkit_v1_frontend //nolint:revive -import "fmt" +import ( + "fmt" + + "github.com/containerd/typeurl" + "github.com/moby/buildkit/util/grpcerrors" +) const ( // UnknownExitStatus might be returned in (*ExitError).ExitCode via @@ -12,6 +17,10 @@ const ( UnknownExitStatus = 255 ) +func init() { + typeurl.Register((*ExitMessage)(nil), "github.com/moby/buildkit", "gatewayapi.ExitMessage+json") +} + // ExitError will be returned when the container process exits with a non-zero // exit code. type ExitError struct { @@ -19,6 +28,12 @@ type ExitError struct { Err error } +func (err *ExitError) ToProto() grpcerrors.TypedErrorProto { + return &ExitMessage{ + Code: err.ExitCode, + } +} + func (err *ExitError) Error() string { if err.Err != nil { return err.Err.Error() @@ -27,8 +42,12 @@ func (err *ExitError) Error() string { } func (err *ExitError) Unwrap() error { - if err.Err == nil { - return fmt.Errorf("exit code: %d", err.ExitCode) - } return err.Err } + +func (e *ExitMessage) WrapError(err error) error { + return &ExitError{ + Err: err, + ExitCode: e.Code, + } +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go index 06fcb98ea4..e8e797ca7e 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go @@ -1330,6 +1330,133 @@ func (m *PongResponse) GetWorkers() []*types1.WorkerRecord { return nil } +type WarnRequest struct { + Digest github_com_opencontainers_go_digest.Digest `protobuf:"bytes,1,opt,name=digest,proto3,customtype=github.com/opencontainers/go-digest.Digest" json:"digest"` + Level int64 `protobuf:"varint,2,opt,name=level,proto3" json:"level,omitempty"` + Short []byte `protobuf:"bytes,3,opt,name=short,proto3" json:"short,omitempty"` + Detail [][]byte `protobuf:"bytes,4,rep,name=detail,proto3" json:"detail,omitempty"` + Url string `protobuf:"bytes,5,opt,name=url,proto3" json:"url,omitempty"` + Info *pb.SourceInfo `protobuf:"bytes,6,opt,name=info,proto3" json:"info,omitempty"` + Ranges []*pb.Range `protobuf:"bytes,7,rep,name=ranges,proto3" json:"ranges,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WarnRequest) Reset() { *m = WarnRequest{} } +func (m *WarnRequest) String() string { return proto.CompactTextString(m) } +func (*WarnRequest) ProtoMessage() {} +func (*WarnRequest) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{22} +} +func (m *WarnRequest) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WarnRequest) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WarnRequest.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WarnRequest) XXX_Merge(src proto.Message) { + xxx_messageInfo_WarnRequest.Merge(m, src) +} +func (m *WarnRequest) XXX_Size() int { + return m.Size() +} +func (m *WarnRequest) XXX_DiscardUnknown() { + xxx_messageInfo_WarnRequest.DiscardUnknown(m) +} + +var xxx_messageInfo_WarnRequest proto.InternalMessageInfo + +func (m *WarnRequest) GetLevel() int64 { + if m != nil { + return m.Level + } + return 0 +} + +func (m *WarnRequest) GetShort() []byte { + if m != nil { + return m.Short + } + return nil +} + +func (m *WarnRequest) GetDetail() [][]byte { + if m != nil { + return m.Detail + } + return nil +} + +func (m *WarnRequest) GetUrl() string { + if m != nil { + return m.Url + } + return "" +} + +func (m *WarnRequest) GetInfo() *pb.SourceInfo { + if m != nil { + return m.Info + } + return nil +} + +func (m *WarnRequest) GetRanges() []*pb.Range { + if m != nil { + return m.Ranges + } + return nil +} + +type WarnResponse struct { + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *WarnResponse) Reset() { *m = WarnResponse{} } +func (m *WarnResponse) String() string { return proto.CompactTextString(m) } +func (*WarnResponse) ProtoMessage() {} +func (*WarnResponse) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{23} +} +func (m *WarnResponse) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *WarnResponse) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_WarnResponse.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *WarnResponse) XXX_Merge(src proto.Message) { + xxx_messageInfo_WarnResponse.Merge(m, src) +} +func (m *WarnResponse) XXX_Size() int { + return m.Size() +} +func (m *WarnResponse) XXX_DiscardUnknown() { + xxx_messageInfo_WarnResponse.DiscardUnknown(m) +} + +var xxx_messageInfo_WarnResponse proto.InternalMessageInfo + type NewContainerRequest struct { ContainerID string `protobuf:"bytes,1,opt,name=ContainerID,proto3" json:"ContainerID,omitempty"` // For mount input values we can use random identifiers passed with ref @@ -1337,6 +1464,7 @@ type NewContainerRequest struct { Network pb.NetMode `protobuf:"varint,3,opt,name=Network,proto3,enum=pb.NetMode" json:"Network,omitempty"` Platform *pb.Platform `protobuf:"bytes,4,opt,name=platform,proto3" json:"platform,omitempty"` Constraints *pb.WorkerConstraints `protobuf:"bytes,5,opt,name=constraints,proto3" json:"constraints,omitempty"` + ExtraHosts []*pb.HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` XXX_sizecache int32 `json:"-"` @@ -1346,7 +1474,7 @@ func (m *NewContainerRequest) Reset() { *m = NewContainerRequest{} } func (m *NewContainerRequest) String() string { return proto.CompactTextString(m) } func (*NewContainerRequest) ProtoMessage() {} func (*NewContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{22} + return fileDescriptor_f1a937782ebbded5, []int{24} } func (m *NewContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1410,6 +1538,13 @@ func (m *NewContainerRequest) GetConstraints() *pb.WorkerConstraints { return nil } +func (m *NewContainerRequest) GetExtraHosts() []*pb.HostIP { + if m != nil { + return m.ExtraHosts + } + return nil +} + type NewContainerResponse struct { XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1420,7 +1555,7 @@ func (m *NewContainerResponse) Reset() { *m = NewContainerResponse{} } func (m *NewContainerResponse) String() string { return proto.CompactTextString(m) } func (*NewContainerResponse) ProtoMessage() {} func (*NewContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{23} + return fileDescriptor_f1a937782ebbded5, []int{25} } func (m *NewContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1460,7 +1595,7 @@ func (m *ReleaseContainerRequest) Reset() { *m = ReleaseContainerRequest func (m *ReleaseContainerRequest) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerRequest) ProtoMessage() {} func (*ReleaseContainerRequest) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{24} + return fileDescriptor_f1a937782ebbded5, []int{26} } func (m *ReleaseContainerRequest) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1506,7 +1641,7 @@ func (m *ReleaseContainerResponse) Reset() { *m = ReleaseContainerRespon func (m *ReleaseContainerResponse) String() string { return proto.CompactTextString(m) } func (*ReleaseContainerResponse) ProtoMessage() {} func (*ReleaseContainerResponse) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{25} + return fileDescriptor_f1a937782ebbded5, []int{27} } func (m *ReleaseContainerResponse) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1544,6 +1679,7 @@ type ExecMessage struct { // *ExecMessage_Started // *ExecMessage_Exit // *ExecMessage_Done + // *ExecMessage_Signal Input isExecMessage_Input `protobuf_oneof:"Input"` XXX_NoUnkeyedLiteral struct{} `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -1554,7 +1690,7 @@ func (m *ExecMessage) Reset() { *m = ExecMessage{} } func (m *ExecMessage) String() string { return proto.CompactTextString(m) } func (*ExecMessage) ProtoMessage() {} func (*ExecMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{26} + return fileDescriptor_f1a937782ebbded5, []int{28} } func (m *ExecMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1607,6 +1743,9 @@ type ExecMessage_Exit struct { type ExecMessage_Done struct { Done *DoneMessage `protobuf:"bytes,7,opt,name=Done,proto3,oneof" json:"Done,omitempty"` } +type ExecMessage_Signal struct { + Signal *SignalMessage `protobuf:"bytes,8,opt,name=Signal,proto3,oneof" json:"Signal,omitempty"` +} func (*ExecMessage_Init) isExecMessage_Input() {} func (*ExecMessage_File) isExecMessage_Input() {} @@ -1614,6 +1753,7 @@ func (*ExecMessage_Resize) isExecMessage_Input() {} func (*ExecMessage_Started) isExecMessage_Input() {} func (*ExecMessage_Exit) isExecMessage_Input() {} func (*ExecMessage_Done) isExecMessage_Input() {} +func (*ExecMessage_Signal) isExecMessage_Input() {} func (m *ExecMessage) GetInput() isExecMessage_Input { if m != nil { @@ -1671,6 +1811,13 @@ func (m *ExecMessage) GetDone() *DoneMessage { return nil } +func (m *ExecMessage) GetSignal() *SignalMessage { + if x, ok := m.GetInput().(*ExecMessage_Signal); ok { + return x.Signal + } + return nil +} + // XXX_OneofWrappers is for the internal use of the proto package. func (*ExecMessage) XXX_OneofWrappers() []interface{} { return []interface{}{ @@ -1680,6 +1827,7 @@ func (*ExecMessage) XXX_OneofWrappers() []interface{} { (*ExecMessage_Started)(nil), (*ExecMessage_Exit)(nil), (*ExecMessage_Done)(nil), + (*ExecMessage_Signal)(nil), } } @@ -1698,7 +1846,7 @@ func (m *InitMessage) Reset() { *m = InitMessage{} } func (m *InitMessage) String() string { return proto.CompactTextString(m) } func (*InitMessage) ProtoMessage() {} func (*InitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{27} + return fileDescriptor_f1a937782ebbded5, []int{29} } func (m *InitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1774,7 +1922,7 @@ func (m *ExitMessage) Reset() { *m = ExitMessage{} } func (m *ExitMessage) String() string { return proto.CompactTextString(m) } func (*ExitMessage) ProtoMessage() {} func (*ExitMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{28} + return fileDescriptor_f1a937782ebbded5, []int{30} } func (m *ExitMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1827,7 +1975,7 @@ func (m *StartedMessage) Reset() { *m = StartedMessage{} } func (m *StartedMessage) String() string { return proto.CompactTextString(m) } func (*StartedMessage) ProtoMessage() {} func (*StartedMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{29} + return fileDescriptor_f1a937782ebbded5, []int{31} } func (m *StartedMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1866,7 +2014,7 @@ func (m *DoneMessage) Reset() { *m = DoneMessage{} } func (m *DoneMessage) String() string { return proto.CompactTextString(m) } func (*DoneMessage) ProtoMessage() {} func (*DoneMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{30} + return fileDescriptor_f1a937782ebbded5, []int{32} } func (m *DoneMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1908,7 +2056,7 @@ func (m *FdMessage) Reset() { *m = FdMessage{} } func (m *FdMessage) String() string { return proto.CompactTextString(m) } func (*FdMessage) ProtoMessage() {} func (*FdMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{31} + return fileDescriptor_f1a937782ebbded5, []int{33} } func (m *FdMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1970,7 +2118,7 @@ func (m *ResizeMessage) Reset() { *m = ResizeMessage{} } func (m *ResizeMessage) String() string { return proto.CompactTextString(m) } func (*ResizeMessage) ProtoMessage() {} func (*ResizeMessage) Descriptor() ([]byte, []int) { - return fileDescriptor_f1a937782ebbded5, []int{32} + return fileDescriptor_f1a937782ebbded5, []int{34} } func (m *ResizeMessage) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2013,6 +2161,55 @@ func (m *ResizeMessage) GetCols() uint32 { return 0 } +type SignalMessage struct { + // we only send name (ie HUP, INT) because the int values + // are platform dependent. + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + XXX_NoUnkeyedLiteral struct{} `json:"-"` + XXX_unrecognized []byte `json:"-"` + XXX_sizecache int32 `json:"-"` +} + +func (m *SignalMessage) Reset() { *m = SignalMessage{} } +func (m *SignalMessage) String() string { return proto.CompactTextString(m) } +func (*SignalMessage) ProtoMessage() {} +func (*SignalMessage) Descriptor() ([]byte, []int) { + return fileDescriptor_f1a937782ebbded5, []int{35} +} +func (m *SignalMessage) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SignalMessage) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + if deterministic { + return xxx_messageInfo_SignalMessage.Marshal(b, m, deterministic) + } else { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil + } +} +func (m *SignalMessage) XXX_Merge(src proto.Message) { + xxx_messageInfo_SignalMessage.Merge(m, src) +} +func (m *SignalMessage) XXX_Size() int { + return m.Size() +} +func (m *SignalMessage) XXX_DiscardUnknown() { + xxx_messageInfo_SignalMessage.DiscardUnknown(m) +} + +var xxx_messageInfo_SignalMessage proto.InternalMessageInfo + +func (m *SignalMessage) GetName() string { + if m != nil { + return m.Name + } + return "" +} + func init() { proto.RegisterType((*Result)(nil), "moby.buildkit.v1.frontend.Result") proto.RegisterMapType((map[string][]byte)(nil), "moby.buildkit.v1.frontend.Result.MetadataEntry") @@ -2043,6 +2240,8 @@ func init() { proto.RegisterType((*StatFileResponse)(nil), "moby.buildkit.v1.frontend.StatFileResponse") proto.RegisterType((*PingRequest)(nil), "moby.buildkit.v1.frontend.PingRequest") proto.RegisterType((*PongResponse)(nil), "moby.buildkit.v1.frontend.PongResponse") + proto.RegisterType((*WarnRequest)(nil), "moby.buildkit.v1.frontend.WarnRequest") + proto.RegisterType((*WarnResponse)(nil), "moby.buildkit.v1.frontend.WarnResponse") proto.RegisterType((*NewContainerRequest)(nil), "moby.buildkit.v1.frontend.NewContainerRequest") proto.RegisterType((*NewContainerResponse)(nil), "moby.buildkit.v1.frontend.NewContainerResponse") proto.RegisterType((*ReleaseContainerRequest)(nil), "moby.buildkit.v1.frontend.ReleaseContainerRequest") @@ -2054,132 +2253,143 @@ func init() { proto.RegisterType((*DoneMessage)(nil), "moby.buildkit.v1.frontend.DoneMessage") proto.RegisterType((*FdMessage)(nil), "moby.buildkit.v1.frontend.FdMessage") proto.RegisterType((*ResizeMessage)(nil), "moby.buildkit.v1.frontend.ResizeMessage") + proto.RegisterType((*SignalMessage)(nil), "moby.buildkit.v1.frontend.SignalMessage") } func init() { proto.RegisterFile("gateway.proto", fileDescriptor_f1a937782ebbded5) } var fileDescriptor_f1a937782ebbded5 = []byte{ - // 1909 bytes of a gzipped FileDescriptorProto + // 2078 bytes of a gzipped FileDescriptorProto 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x58, 0x4f, 0x6f, 0x1b, 0xc7, - 0x15, 0xd7, 0x8a, 0xa4, 0x48, 0x3e, 0xfe, 0x31, 0x33, 0x4e, 0x53, 0x7a, 0x11, 0x38, 0xcc, 0x22, - 0x55, 0x69, 0x47, 0x59, 0xa6, 0x74, 0x02, 0xb9, 0x72, 0x90, 0xd4, 0x94, 0x28, 0x58, 0x8d, 0x24, - 0xab, 0xe3, 0x14, 0x06, 0x82, 0x14, 0xe8, 0x8a, 0x3b, 0xa4, 0x17, 0xa6, 0x76, 0xb7, 0xb3, 0x43, - 0xcb, 0x4c, 0x2e, 0xed, 0xad, 0xf7, 0x02, 0xbd, 0x16, 0xe8, 0x27, 0xe8, 0xa5, 0xd7, 0x9e, 0x73, - 0xec, 0xb9, 0x07, 0xa3, 0x10, 0xfa, 0x11, 0x7a, 0x6f, 0xf1, 0x66, 0x67, 0xc8, 0x25, 0x45, 0x2d, - 0x49, 0xe4, 0xc4, 0x99, 0xb7, 0xef, 0xf7, 0xe6, 0xfd, 0x9b, 0xf7, 0xde, 0x10, 0x2a, 0x03, 0x47, - 0xb0, 0x4b, 0x67, 0x6c, 0x87, 0x3c, 0x10, 0x01, 0xb9, 0x73, 0x11, 0x9c, 0x8f, 0xed, 0xf3, 0x91, - 0x37, 0x74, 0x5f, 0x7a, 0xc2, 0x7e, 0xf5, 0x33, 0xbb, 0xcf, 0x03, 0x5f, 0x30, 0xdf, 0x35, 0x3f, - 0x1a, 0x78, 0xe2, 0xc5, 0xe8, 0xdc, 0xee, 0x05, 0x17, 0xad, 0x41, 0x30, 0x08, 0x5a, 0x12, 0x71, - 0x3e, 0xea, 0xcb, 0x9d, 0xdc, 0xc8, 0x55, 0x2c, 0xc9, 0x6c, 0xcf, 0xb3, 0x0f, 0x82, 0x60, 0x30, - 0x64, 0x4e, 0xe8, 0x45, 0x6a, 0xd9, 0xe2, 0x61, 0xaf, 0x15, 0x09, 0x47, 0x8c, 0x22, 0x85, 0xd9, - 0x49, 0x60, 0x50, 0x91, 0x96, 0x56, 0xa4, 0x15, 0x05, 0xc3, 0x57, 0x8c, 0xb7, 0xc2, 0xf3, 0x56, - 0x10, 0x6a, 0xee, 0xd6, 0x8d, 0xdc, 0x4e, 0xe8, 0xb5, 0xc4, 0x38, 0x64, 0x51, 0xeb, 0x32, 0xe0, - 0x2f, 0x19, 0x57, 0x80, 0x07, 0x37, 0x02, 0x46, 0xc2, 0x1b, 0x22, 0xaa, 0xe7, 0x84, 0x11, 0x1e, - 0x82, 0xbf, 0x0a, 0x94, 0x34, 0x5b, 0x04, 0xbe, 0x17, 0x09, 0xcf, 0x1b, 0x78, 0xad, 0x7e, 0x24, - 0x31, 0xf1, 0x29, 0x68, 0x44, 0xcc, 0x6e, 0xfd, 0x31, 0x03, 0x5b, 0x94, 0x45, 0xa3, 0xa1, 0x20, - 0xdb, 0x50, 0xe1, 0xac, 0x7f, 0xc0, 0x42, 0xce, 0x7a, 0x8e, 0x60, 0x6e, 0xdd, 0x68, 0x18, 0xcd, - 0xe2, 0x93, 0x0d, 0x3a, 0x4b, 0x26, 0xbf, 0x86, 0x2a, 0x67, 0xfd, 0x28, 0xc1, 0xb8, 0xd9, 0x30, - 0x9a, 0xa5, 0xf6, 0x87, 0xf6, 0x8d, 0xc1, 0xb0, 0x29, 0xeb, 0x9f, 0x38, 0xe1, 0x14, 0xf2, 0x64, - 0x83, 0xce, 0x09, 0x21, 0x6d, 0xc8, 0x70, 0xd6, 0xaf, 0x67, 0xa4, 0xac, 0xbb, 0xe9, 0xb2, 0x9e, - 0x6c, 0x50, 0x64, 0x26, 0xbb, 0x90, 0x45, 0x29, 0xf5, 0xac, 0x04, 0xbd, 0xbf, 0x54, 0x81, 0x27, - 0x1b, 0x54, 0x02, 0xc8, 0x97, 0x50, 0xb8, 0x60, 0xc2, 0x71, 0x1d, 0xe1, 0xd4, 0xa1, 0x91, 0x69, - 0x96, 0xda, 0xad, 0x54, 0x30, 0x3a, 0xc8, 0x3e, 0x51, 0x88, 0xae, 0x2f, 0xf8, 0x98, 0x4e, 0x04, - 0x98, 0x8f, 0xa0, 0x32, 0xf3, 0x89, 0xd4, 0x20, 0xf3, 0x92, 0x8d, 0x63, 0xff, 0x51, 0x5c, 0x92, - 0xb7, 0x21, 0xf7, 0xca, 0x19, 0x8e, 0x98, 0x74, 0x55, 0x99, 0xc6, 0x9b, 0xbd, 0xcd, 0x87, 0x46, - 0xa7, 0x00, 0x5b, 0x5c, 0x8a, 0xb7, 0xfe, 0x6c, 0x40, 0x6d, 0xde, 0x4f, 0xe4, 0x48, 0x59, 0x68, - 0x48, 0x25, 0x3f, 0x5d, 0xc3, 0xc5, 0x48, 0x88, 0x62, 0x55, 0xa5, 0x08, 0x73, 0x17, 0x8a, 0x13, - 0xd2, 0x32, 0x15, 0x8b, 0x09, 0x15, 0xad, 0x5d, 0xc8, 0x50, 0xd6, 0x27, 0x55, 0xd8, 0xf4, 0x54, - 0x52, 0xd0, 0x4d, 0xcf, 0x25, 0x0d, 0xc8, 0xb8, 0xac, 0xaf, 0x82, 0x5f, 0xb5, 0xc3, 0x73, 0xfb, - 0x80, 0xf5, 0x3d, 0xdf, 0x13, 0x5e, 0xe0, 0x53, 0xfc, 0x64, 0xfd, 0xd5, 0xc0, 0xe4, 0x42, 0xb5, - 0xc8, 0x17, 0x33, 0x76, 0x2c, 0x4f, 0x95, 0x6b, 0xda, 0x3f, 0x4f, 0xd7, 0xfe, 0x93, 0xa4, 0xf6, - 0x4b, 0xf3, 0x27, 0x69, 0x9d, 0x80, 0x0a, 0x65, 0x62, 0xc4, 0x7d, 0xca, 0x7e, 0x37, 0x62, 0x91, - 0x20, 0x3f, 0xd7, 0x11, 0x91, 0xf2, 0x97, 0xa5, 0x15, 0x32, 0x52, 0x05, 0x20, 0x4d, 0xc8, 0x31, - 0xce, 0x03, 0xae, 0xb4, 0x20, 0x76, 0x5c, 0x39, 0x6c, 0x1e, 0xf6, 0xec, 0x67, 0xb2, 0x72, 0xd0, - 0x98, 0xc1, 0xaa, 0x41, 0x55, 0x9f, 0x1a, 0x85, 0x81, 0x1f, 0x31, 0xeb, 0x16, 0x54, 0x8e, 0xfc, - 0x70, 0x24, 0x22, 0xa5, 0x87, 0xf5, 0x0f, 0x03, 0xaa, 0x9a, 0x12, 0xf3, 0x90, 0x6f, 0xa0, 0x34, - 0xf5, 0xb1, 0x76, 0xe6, 0x5e, 0x8a, 0x7e, 0xb3, 0xf8, 0x44, 0x80, 0x94, 0x6f, 0x93, 0xe2, 0xcc, - 0x53, 0xa8, 0xcd, 0x33, 0x2c, 0xf0, 0xf4, 0x07, 0xb3, 0x9e, 0x9e, 0x0f, 0x7c, 0xc2, 0xb3, 0x7f, - 0x32, 0xe0, 0x0e, 0x65, 0xb2, 0x14, 0x1e, 0x5d, 0x38, 0x03, 0xb6, 0x1f, 0xf8, 0x7d, 0x6f, 0xa0, - 0xdd, 0x5c, 0x93, 0x59, 0xa5, 0x25, 0x63, 0x82, 0x35, 0xa1, 0x70, 0x36, 0x74, 0x44, 0x3f, 0xe0, - 0x17, 0x4a, 0x78, 0x19, 0x85, 0x6b, 0x1a, 0x9d, 0x7c, 0x25, 0x0d, 0x28, 0x29, 0xc1, 0x27, 0x81, - 0xcb, 0x64, 0xcd, 0x28, 0xd2, 0x24, 0x89, 0xd4, 0x21, 0x7f, 0x1c, 0x0c, 0x4e, 0x9d, 0x0b, 0x26, - 0x8b, 0x43, 0x91, 0xea, 0xad, 0xf5, 0x7b, 0x03, 0xcc, 0x45, 0x5a, 0x29, 0x17, 0xff, 0x12, 0xb6, - 0x0e, 0xbc, 0x01, 0x8b, 0xe2, 0xe8, 0x17, 0x3b, 0xed, 0xef, 0xdf, 0xbc, 0xb7, 0xf1, 0xaf, 0x37, - 0xef, 0xdd, 0x4f, 0xd4, 0xd5, 0x20, 0x64, 0x7e, 0x2f, 0xf0, 0x85, 0xe3, 0xf9, 0x8c, 0x63, 0x7b, - 0xf8, 0xc8, 0x95, 0x10, 0x3b, 0x46, 0x52, 0x25, 0x81, 0xbc, 0x03, 0x5b, 0xb1, 0x74, 0x75, 0xed, - 0xd5, 0xce, 0xfa, 0x6f, 0x0e, 0xca, 0xcf, 0x50, 0x01, 0xed, 0x0b, 0x1b, 0x60, 0xea, 0x42, 0x95, - 0x76, 0xf3, 0x8e, 0x4d, 0x70, 0x10, 0x13, 0x0a, 0x87, 0x2a, 0xc4, 0xea, 0xba, 0x4e, 0xf6, 0xe4, - 0x6b, 0x28, 0xe9, 0xf5, 0xd3, 0x50, 0xd4, 0x33, 0x32, 0x47, 0x1e, 0xa6, 0xe4, 0x48, 0x52, 0x13, - 0x3b, 0x01, 0x55, 0x19, 0x92, 0xa0, 0x90, 0xcf, 0xe0, 0xce, 0xd1, 0x45, 0x18, 0x70, 0xb1, 0xef, - 0xf4, 0x5e, 0x30, 0x3a, 0xdb, 0x05, 0xb2, 0x8d, 0x4c, 0xb3, 0x48, 0x6f, 0x66, 0x20, 0x3b, 0xf0, - 0x96, 0x33, 0x1c, 0x06, 0x97, 0xea, 0xd2, 0xc8, 0xf4, 0xaf, 0xe7, 0x1a, 0x46, 0xb3, 0x40, 0xaf, - 0x7f, 0x20, 0x1f, 0xc3, 0xed, 0x04, 0xf1, 0x31, 0xe7, 0xce, 0x18, 0xf3, 0x65, 0x4b, 0xf2, 0x2f, - 0xfa, 0x84, 0x15, 0xec, 0xd0, 0xf3, 0x9d, 0x61, 0x1d, 0x24, 0x4f, 0xbc, 0x21, 0x16, 0x94, 0xbb, - 0xaf, 0x51, 0x25, 0xc6, 0x1f, 0x0b, 0xc1, 0xeb, 0x25, 0x19, 0x8a, 0x19, 0x1a, 0x39, 0x83, 0xb2, - 0x54, 0x38, 0xd6, 0x3d, 0xaa, 0x97, 0xa5, 0xd3, 0x76, 0x52, 0x9c, 0x26, 0xd9, 0x9f, 0x86, 0x89, - 0xab, 0x34, 0x23, 0x81, 0xf4, 0xa0, 0xaa, 0x1d, 0x17, 0xdf, 0xc1, 0x7a, 0x45, 0xca, 0x7c, 0xb4, - 0x6e, 0x20, 0x62, 0x74, 0x7c, 0xc4, 0x9c, 0x48, 0x4c, 0x83, 0x2e, 0x5e, 0x37, 0x47, 0xb0, 0x7a, - 0x55, 0xda, 0x3c, 0xd9, 0x9b, 0x9f, 0x43, 0x6d, 0x3e, 0x96, 0xeb, 0x14, 0x7d, 0xf3, 0x57, 0x70, - 0x7b, 0x81, 0x0a, 0x3f, 0xa8, 0x1e, 0xfc, 0xcd, 0x80, 0xb7, 0xae, 0xf9, 0x8d, 0x10, 0xc8, 0x7e, - 0x35, 0x0e, 0x99, 0x12, 0x29, 0xd7, 0xe4, 0x04, 0x72, 0x18, 0x97, 0xa8, 0xbe, 0x29, 0x9d, 0xb6, - 0xbb, 0x4e, 0x20, 0x6c, 0x89, 0x8c, 0x1d, 0x16, 0x4b, 0x31, 0x1f, 0x02, 0x4c, 0x89, 0x6b, 0xb5, - 0xbe, 0x6f, 0xa0, 0xa2, 0xa2, 0xa2, 0xca, 0x43, 0x2d, 0x9e, 0x52, 0x14, 0x18, 0x67, 0x90, 0x69, - 0xbb, 0xc8, 0xac, 0xd9, 0x2e, 0xac, 0xef, 0xe0, 0x16, 0x65, 0x8e, 0x7b, 0xe8, 0x0d, 0xd9, 0xcd, - 0x55, 0x11, 0xef, 0xba, 0x37, 0x64, 0x67, 0x8e, 0x78, 0x31, 0xb9, 0xeb, 0x6a, 0x4f, 0xf6, 0x20, - 0x47, 0x1d, 0x7f, 0xc0, 0xd4, 0xd1, 0x1f, 0xa4, 0x1c, 0x2d, 0x0f, 0x41, 0x5e, 0x1a, 0x43, 0xac, - 0x47, 0x50, 0x9c, 0xd0, 0xb0, 0x52, 0x3d, 0xed, 0xf7, 0x23, 0x16, 0x57, 0xbd, 0x0c, 0x55, 0x3b, - 0xa4, 0x1f, 0x33, 0x7f, 0xa0, 0x8e, 0xce, 0x50, 0xb5, 0xb3, 0xb6, 0x71, 0x54, 0xd1, 0x9a, 0x2b, - 0xd7, 0x10, 0xc8, 0x1e, 0xe0, 0x3c, 0x65, 0xc8, 0x0b, 0x26, 0xd7, 0x96, 0x8b, 0x6d, 0xce, 0x71, - 0x0f, 0x3c, 0x7e, 0xb3, 0x81, 0x75, 0xc8, 0x1f, 0x78, 0x3c, 0x61, 0x9f, 0xde, 0x92, 0x6d, 0x6c, - 0x80, 0xbd, 0xe1, 0xc8, 0x45, 0x6b, 0x05, 0xe3, 0xbe, 0xaa, 0xf4, 0x73, 0x54, 0xeb, 0x8b, 0xd8, - 0x8f, 0xf2, 0x14, 0xa5, 0xcc, 0x0e, 0xe4, 0x99, 0x2f, 0xb8, 0xc7, 0x74, 0x97, 0x24, 0x76, 0x3c, - 0x02, 0xdb, 0x72, 0x04, 0x96, 0xdd, 0x98, 0x6a, 0x16, 0x6b, 0x17, 0x6e, 0x21, 0x21, 0x3d, 0x10, - 0x04, 0xb2, 0x09, 0x25, 0xe5, 0xda, 0xda, 0x83, 0xda, 0x14, 0xa8, 0x8e, 0xde, 0x86, 0x2c, 0x0e, - 0xd8, 0xaa, 0x8c, 0x2f, 0x3a, 0x57, 0x7e, 0xb7, 0x2a, 0x50, 0x3a, 0xf3, 0x7c, 0xdd, 0x0f, 0xad, - 0x2b, 0x03, 0xca, 0x67, 0x81, 0x3f, 0xed, 0x44, 0x67, 0x70, 0x4b, 0xdf, 0xc0, 0xc7, 0x67, 0x47, - 0xfb, 0x4e, 0xa8, 0x4d, 0x69, 0x5c, 0x0f, 0xb3, 0x7a, 0x0b, 0xd8, 0x31, 0x63, 0x27, 0x8b, 0x4d, - 0x8b, 0xce, 0xc3, 0xc9, 0x2f, 0x20, 0x7f, 0x7c, 0xdc, 0x91, 0x92, 0x36, 0xd7, 0x92, 0xa4, 0x61, - 0xe4, 0x73, 0xc8, 0x3f, 0x97, 0x4f, 0x94, 0x48, 0x35, 0x96, 0x05, 0x29, 0x17, 0x1b, 0x1a, 0xb3, - 0x51, 0xd6, 0x0b, 0xb8, 0x4b, 0x35, 0xc8, 0xfa, 0x8f, 0x01, 0xb7, 0x4f, 0xd9, 0xe5, 0xbe, 0x6e, - 0x9e, 0xda, 0xdb, 0x0d, 0x28, 0x4d, 0x68, 0x47, 0x07, 0xca, 0xeb, 0x49, 0x12, 0x79, 0x1f, 0xb6, - 0x4e, 0x82, 0x91, 0x2f, 0xb4, 0xea, 0x45, 0xac, 0x33, 0x92, 0x42, 0xd5, 0x07, 0xf2, 0x13, 0xc8, - 0x9f, 0x32, 0x81, 0x4f, 0x28, 0x99, 0x27, 0xd5, 0x76, 0x09, 0x79, 0x4e, 0x99, 0xc0, 0x89, 0x80, - 0xea, 0x6f, 0x38, 0x66, 0x84, 0x7a, 0xcc, 0xc8, 0x2e, 0x1a, 0x33, 0xf4, 0x57, 0xb2, 0x0b, 0xa5, - 0x5e, 0xe0, 0x47, 0x82, 0x3b, 0x1e, 0x1e, 0x9c, 0x93, 0xcc, 0x3f, 0x42, 0xe6, 0xd8, 0x9e, 0xfd, - 0xe9, 0x47, 0x9a, 0xe4, 0xb4, 0xde, 0x81, 0xb7, 0x67, 0xad, 0x54, 0x33, 0xde, 0x23, 0xf8, 0x31, - 0x65, 0x43, 0xe6, 0x44, 0x6c, 0x7d, 0x0f, 0x58, 0x26, 0xd4, 0xaf, 0x83, 0x95, 0xe0, 0xbf, 0x67, - 0xa0, 0xd4, 0x7d, 0xcd, 0x7a, 0x27, 0x2c, 0x8a, 0x9c, 0x01, 0x23, 0xef, 0x42, 0xf1, 0x8c, 0x07, - 0x3d, 0x16, 0x45, 0x13, 0x59, 0x53, 0x02, 0xf9, 0x0c, 0xb2, 0x47, 0xbe, 0x27, 0x54, 0xc5, 0xde, - 0x4e, 0x9d, 0x1f, 0x3d, 0xa1, 0x64, 0xe2, 0xdb, 0x09, 0xb7, 0x64, 0x0f, 0xb2, 0x98, 0xef, 0xab, - 0xd4, 0x1c, 0x37, 0x81, 0x45, 0x0c, 0xe9, 0xc8, 0xd7, 0xa6, 0xf7, 0x2d, 0x53, 0x9e, 0x6f, 0xa6, - 0x17, 0x4b, 0xef, 0x5b, 0x36, 0x95, 0xa0, 0x90, 0xa4, 0x0b, 0xf9, 0x67, 0xc2, 0xe1, 0x38, 0x72, - 0xc4, 0x11, 0xb9, 0x97, 0xd6, 0x53, 0x63, 0xce, 0xa9, 0x14, 0x8d, 0x45, 0x27, 0x74, 0x5f, 0x7b, - 0x42, 0x0e, 0x14, 0xe9, 0x4e, 0x40, 0xb6, 0x84, 0x21, 0xb8, 0x45, 0xf4, 0x41, 0xe0, 0xb3, 0x7a, - 0x7e, 0x29, 0x1a, 0xd9, 0x12, 0x68, 0xdc, 0x76, 0xf2, 0x90, 0x93, 0x4d, 0xd5, 0xfa, 0x8b, 0x01, - 0xa5, 0x84, 0x8f, 0x57, 0xb8, 0x07, 0xef, 0x42, 0x16, 0x1f, 0x9b, 0x2a, 0x76, 0x05, 0x79, 0x0b, - 0x98, 0x70, 0xa8, 0xa4, 0x62, 0xd5, 0x3a, 0x74, 0xe3, 0xbb, 0x59, 0xa1, 0xb8, 0x44, 0xca, 0x57, - 0x62, 0x2c, 0xdd, 0x5d, 0xa0, 0xb8, 0x24, 0x3b, 0x50, 0x78, 0xc6, 0x7a, 0x23, 0xee, 0x89, 0xb1, - 0x74, 0x60, 0xb5, 0x5d, 0x43, 0x29, 0x9a, 0x26, 0x2f, 0xcb, 0x84, 0xc3, 0xfa, 0x12, 0x13, 0x6b, - 0xaa, 0x20, 0x81, 0xec, 0x3e, 0x8e, 0xdc, 0xa8, 0x59, 0x85, 0xca, 0x35, 0xbe, 0x7a, 0xba, 0xcb, - 0x5e, 0x3d, 0x5d, 0xfd, 0xea, 0x99, 0x0d, 0x08, 0x16, 0xc1, 0x84, 0x83, 0xac, 0xc7, 0x50, 0x9c, - 0x24, 0x0d, 0x3e, 0x38, 0x0f, 0x5d, 0x75, 0xd2, 0xe6, 0xa1, 0x8b, 0xa6, 0x74, 0x9f, 0x1e, 0xca, - 0x53, 0x0a, 0x14, 0x97, 0x93, 0x96, 0x93, 0x49, 0xb4, 0x9c, 0x5d, 0x7c, 0xcf, 0x25, 0x32, 0x07, - 0x99, 0x68, 0x70, 0x19, 0x69, 0x95, 0x71, 0x1d, 0x9b, 0x31, 0x8c, 0xa4, 0x2c, 0x69, 0xc6, 0x30, - 0x6a, 0xff, 0xaf, 0x00, 0xc5, 0xe3, 0xe3, 0x4e, 0x87, 0x7b, 0xee, 0x80, 0x91, 0x3f, 0x18, 0x40, - 0xae, 0x3f, 0x13, 0xc8, 0x27, 0xe9, 0x09, 0xbb, 0xf8, 0xad, 0x63, 0x7e, 0xba, 0x26, 0x4a, 0x75, - 0x80, 0xaf, 0x21, 0x27, 0xa7, 0x0f, 0xf2, 0xd3, 0x15, 0xa7, 0x46, 0xb3, 0xb9, 0x9c, 0x51, 0xc9, - 0xee, 0x41, 0x41, 0x77, 0x70, 0x72, 0x3f, 0x55, 0xbd, 0x99, 0x01, 0xc5, 0xfc, 0x70, 0x25, 0x5e, - 0x75, 0xc8, 0x6f, 0x21, 0xaf, 0x1a, 0x33, 0xb9, 0xb7, 0x04, 0x37, 0x1d, 0x11, 0xcc, 0xfb, 0xab, - 0xb0, 0x4e, 0xcd, 0xd0, 0x0d, 0x38, 0xd5, 0x8c, 0xb9, 0xf6, 0x9e, 0x6a, 0xc6, 0xb5, 0x8e, 0xfe, - 0x1c, 0xb2, 0xd8, 0xa9, 0x49, 0xda, 0x35, 0x4f, 0xb4, 0x72, 0x33, 0x2d, 0x5c, 0x33, 0x2d, 0xfe, - 0x37, 0x58, 0x0e, 0xe5, 0x6b, 0x27, 0xbd, 0x10, 0x26, 0xfe, 0x9e, 0x30, 0xef, 0xad, 0xc0, 0x39, - 0x15, 0xaf, 0x5e, 0x0a, 0xcd, 0x15, 0xfe, 0x23, 0x58, 0x2e, 0x7e, 0xee, 0xdf, 0x88, 0x00, 0xca, - 0xc9, 0x2e, 0x47, 0xec, 0x14, 0xe8, 0x82, 0xa6, 0x6f, 0xb6, 0x56, 0xe6, 0x57, 0x07, 0x7e, 0x87, - 0x53, 0xe7, 0x6c, 0x07, 0x24, 0xed, 0x54, 0x77, 0x2c, 0xec, 0xb5, 0xe6, 0x83, 0xb5, 0x30, 0xea, - 0x70, 0x27, 0xee, 0xb0, 0xaa, 0x8b, 0x92, 0xf4, 0x86, 0x31, 0xe9, 0xc4, 0xe6, 0x8a, 0x7c, 0x4d, - 0xe3, 0x63, 0xa3, 0x53, 0xfe, 0xfe, 0xea, 0xae, 0xf1, 0xcf, 0xab, 0xbb, 0xc6, 0xbf, 0xaf, 0xee, - 0x1a, 0xe7, 0x5b, 0xf2, 0x1f, 0xda, 0x07, 0xff, 0x0f, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x9d, 0x1a, - 0x7c, 0xf3, 0x16, 0x00, 0x00, + 0x15, 0xd7, 0x8a, 0x94, 0x48, 0x3e, 0xfe, 0xb1, 0x32, 0x4e, 0x53, 0x7a, 0x11, 0x38, 0xca, 0x36, + 0x55, 0x69, 0x47, 0x59, 0xa6, 0x72, 0x02, 0xb9, 0x72, 0x90, 0xd4, 0xfa, 0x07, 0x29, 0x91, 0x64, + 0x75, 0x94, 0xc2, 0x40, 0x90, 0x02, 0x5d, 0x71, 0x87, 0xf4, 0xc2, 0xab, 0x9d, 0xed, 0xec, 0xd0, + 0xb2, 0x92, 0x4b, 0x7b, 0xeb, 0xb1, 0x40, 0x81, 0x5e, 0x0b, 0xf4, 0x13, 0xf4, 0x13, 0xf4, 0x9c, + 0x63, 0x8f, 0x45, 0x0f, 0x41, 0xe1, 0xcf, 0x50, 0x14, 0xe8, 0x2d, 0x78, 0x33, 0xb3, 0xe4, 0x92, + 0xa2, 0x96, 0x24, 0x7c, 0xe2, 0xcc, 0xdb, 0xf7, 0x7b, 0xf3, 0xfe, 0xcd, 0x7b, 0x6f, 0x08, 0xf5, + 0x9e, 0x27, 0xd9, 0xa5, 0x77, 0xe5, 0xc6, 0x82, 0x4b, 0x4e, 0xee, 0x5c, 0xf0, 0xf3, 0x2b, 0xf7, + 0xbc, 0x1f, 0x84, 0xfe, 0xf3, 0x40, 0xba, 0x2f, 0x7e, 0xee, 0x76, 0x05, 0x8f, 0x24, 0x8b, 0x7c, + 0xfb, 0x83, 0x5e, 0x20, 0x9f, 0xf5, 0xcf, 0xdd, 0x0e, 0xbf, 0x68, 0xf7, 0x78, 0x8f, 0xb7, 0x15, + 0xe2, 0xbc, 0xdf, 0x55, 0x3b, 0xb5, 0x51, 0x2b, 0x2d, 0xc9, 0xde, 0x18, 0x67, 0xef, 0x71, 0xde, + 0x0b, 0x99, 0x17, 0x07, 0x89, 0x59, 0xb6, 0x45, 0xdc, 0x69, 0x27, 0xd2, 0x93, 0xfd, 0xc4, 0x60, + 0xd6, 0x33, 0x18, 0x54, 0xa4, 0x9d, 0x2a, 0xd2, 0x4e, 0x78, 0xf8, 0x82, 0x89, 0x76, 0x7c, 0xde, + 0xe6, 0x71, 0xca, 0xdd, 0xbe, 0x91, 0xdb, 0x8b, 0x83, 0xb6, 0xbc, 0x8a, 0x59, 0xd2, 0xbe, 0xe4, + 0xe2, 0x39, 0x13, 0x06, 0xf0, 0xe0, 0x46, 0x40, 0x5f, 0x06, 0x21, 0xa2, 0x3a, 0x5e, 0x9c, 0xe0, + 0x21, 0xf8, 0x6b, 0x40, 0x59, 0xb3, 0x25, 0x8f, 0x82, 0x44, 0x06, 0x41, 0x2f, 0x68, 0x77, 0x13, + 0x85, 0xd1, 0xa7, 0xa0, 0x11, 0x9a, 0xdd, 0xf9, 0x63, 0x01, 0x96, 0x29, 0x4b, 0xfa, 0xa1, 0x24, + 0x6b, 0x50, 0x17, 0xac, 0xbb, 0xcb, 0x62, 0xc1, 0x3a, 0x9e, 0x64, 0x7e, 0xd3, 0x5a, 0xb5, 0x5a, + 0x95, 0x83, 0x05, 0x3a, 0x4a, 0x26, 0xbf, 0x86, 0x86, 0x60, 0xdd, 0x24, 0xc3, 0xb8, 0xb8, 0x6a, + 0xb5, 0xaa, 0x1b, 0xef, 0xbb, 0x37, 0x06, 0xc3, 0xa5, 0xac, 0x7b, 0xec, 0xc5, 0x43, 0xc8, 0xc1, + 0x02, 0x1d, 0x13, 0x42, 0x36, 0xa0, 0x20, 0x58, 0xb7, 0x59, 0x50, 0xb2, 0xee, 0xe6, 0xcb, 0x3a, + 0x58, 0xa0, 0xc8, 0x4c, 0x36, 0xa1, 0x88, 0x52, 0x9a, 0x45, 0x05, 0x7a, 0x77, 0xaa, 0x02, 0x07, + 0x0b, 0x54, 0x01, 0xc8, 0x17, 0x50, 0xbe, 0x60, 0xd2, 0xf3, 0x3d, 0xe9, 0x35, 0x61, 0xb5, 0xd0, + 0xaa, 0x6e, 0xb4, 0x73, 0xc1, 0xe8, 0x20, 0xf7, 0xd8, 0x20, 0xf6, 0x22, 0x29, 0xae, 0xe8, 0x40, + 0x80, 0xfd, 0x08, 0xea, 0x23, 0x9f, 0xc8, 0x0a, 0x14, 0x9e, 0xb3, 0x2b, 0xed, 0x3f, 0x8a, 0x4b, + 0xf2, 0x26, 0x2c, 0xbd, 0xf0, 0xc2, 0x3e, 0x53, 0xae, 0xaa, 0x51, 0xbd, 0xd9, 0x5a, 0x7c, 0x68, + 0x6d, 0x97, 0x61, 0x59, 0x28, 0xf1, 0xce, 0x5f, 0x2c, 0x58, 0x19, 0xf7, 0x13, 0x39, 0x34, 0x16, + 0x5a, 0x4a, 0xc9, 0x8f, 0xe7, 0x70, 0x31, 0x12, 0x12, 0xad, 0xaa, 0x12, 0x61, 0x6f, 0x42, 0x65, + 0x40, 0x9a, 0xa6, 0x62, 0x25, 0xa3, 0xa2, 0xb3, 0x09, 0x05, 0xca, 0xba, 0xa4, 0x01, 0x8b, 0x81, + 0x49, 0x0a, 0xba, 0x18, 0xf8, 0x64, 0x15, 0x0a, 0x3e, 0xeb, 0x9a, 0xe0, 0x37, 0xdc, 0xf8, 0xdc, + 0xdd, 0x65, 0xdd, 0x20, 0x0a, 0x64, 0xc0, 0x23, 0x8a, 0x9f, 0x9c, 0xbf, 0x59, 0x98, 0x5c, 0xa8, + 0x16, 0xf9, 0x6c, 0xc4, 0x8e, 0xe9, 0xa9, 0x72, 0x4d, 0xfb, 0xa7, 0xf9, 0xda, 0x7f, 0x94, 0xd5, + 0x7e, 0x6a, 0xfe, 0x64, 0xad, 0x93, 0x50, 0xa7, 0x4c, 0xf6, 0x45, 0x44, 0xd9, 0xef, 0xfa, 0x2c, + 0x91, 0xe4, 0x17, 0x69, 0x44, 0x94, 0xfc, 0x69, 0x69, 0x85, 0x8c, 0xd4, 0x00, 0x48, 0x0b, 0x96, + 0x98, 0x10, 0x5c, 0x18, 0x2d, 0x88, 0xab, 0x2b, 0x87, 0x2b, 0xe2, 0x8e, 0x7b, 0xa6, 0x2a, 0x07, + 0xd5, 0x0c, 0xce, 0x0a, 0x34, 0xd2, 0x53, 0x93, 0x98, 0x47, 0x09, 0x73, 0x6e, 0x41, 0xfd, 0x30, + 0x8a, 0xfb, 0x32, 0x31, 0x7a, 0x38, 0xff, 0xb0, 0xa0, 0x91, 0x52, 0x34, 0x0f, 0xf9, 0x1a, 0xaa, + 0x43, 0x1f, 0xa7, 0xce, 0xdc, 0xca, 0xd1, 0x6f, 0x14, 0x9f, 0x09, 0x90, 0xf1, 0x6d, 0x56, 0x9c, + 0x7d, 0x02, 0x2b, 0xe3, 0x0c, 0x13, 0x3c, 0xfd, 0xde, 0xa8, 0xa7, 0xc7, 0x03, 0x9f, 0xf1, 0xec, + 0x9f, 0x2d, 0xb8, 0x43, 0x99, 0x2a, 0x85, 0x87, 0x17, 0x5e, 0x8f, 0xed, 0xf0, 0xa8, 0x1b, 0xf4, + 0x52, 0x37, 0xaf, 0xa8, 0xac, 0x4a, 0x25, 0x63, 0x82, 0xb5, 0xa0, 0x7c, 0x1a, 0x7a, 0xb2, 0xcb, + 0xc5, 0x85, 0x11, 0x5e, 0x43, 0xe1, 0x29, 0x8d, 0x0e, 0xbe, 0x92, 0x55, 0xa8, 0x1a, 0xc1, 0xc7, + 0xdc, 0x67, 0xaa, 0x66, 0x54, 0x68, 0x96, 0x44, 0x9a, 0x50, 0x3a, 0xe2, 0xbd, 0x13, 0xef, 0x82, + 0xa9, 0xe2, 0x50, 0xa1, 0xe9, 0xd6, 0xf9, 0xbd, 0x05, 0xf6, 0x24, 0xad, 0x8c, 0x8b, 0x3f, 0x87, + 0xe5, 0xdd, 0xa0, 0xc7, 0x12, 0x1d, 0xfd, 0xca, 0xf6, 0xc6, 0x77, 0xdf, 0xbf, 0xb3, 0xf0, 0xef, + 0xef, 0xdf, 0xb9, 0x9f, 0xa9, 0xab, 0x3c, 0x66, 0x51, 0x87, 0x47, 0xd2, 0x0b, 0x22, 0x26, 0xb0, + 0x3d, 0x7c, 0xe0, 0x2b, 0x88, 0xab, 0x91, 0xd4, 0x48, 0x20, 0x6f, 0xc1, 0xb2, 0x96, 0x6e, 0xae, + 0xbd, 0xd9, 0x39, 0xff, 0x5d, 0x82, 0xda, 0x19, 0x2a, 0x90, 0xfa, 0xc2, 0x05, 0x18, 0xba, 0xd0, + 0xa4, 0xdd, 0xb8, 0x63, 0x33, 0x1c, 0xc4, 0x86, 0xf2, 0xbe, 0x09, 0xb1, 0xb9, 0xae, 0x83, 0x3d, + 0xf9, 0x0a, 0xaa, 0xe9, 0xfa, 0x49, 0x2c, 0x9b, 0x05, 0x95, 0x23, 0x0f, 0x73, 0x72, 0x24, 0xab, + 0x89, 0x9b, 0x81, 0x9a, 0x0c, 0xc9, 0x50, 0xc8, 0x27, 0x70, 0xe7, 0xf0, 0x22, 0xe6, 0x42, 0xee, + 0x78, 0x9d, 0x67, 0x8c, 0x8e, 0x76, 0x81, 0xe2, 0x6a, 0xa1, 0x55, 0xa1, 0x37, 0x33, 0x90, 0x75, + 0x78, 0xc3, 0x0b, 0x43, 0x7e, 0x69, 0x2e, 0x8d, 0x4a, 0xff, 0xe6, 0xd2, 0xaa, 0xd5, 0x2a, 0xd3, + 0xeb, 0x1f, 0xc8, 0x87, 0x70, 0x3b, 0x43, 0x7c, 0x2c, 0x84, 0x77, 0x85, 0xf9, 0xb2, 0xac, 0xf8, + 0x27, 0x7d, 0xc2, 0x0a, 0xb6, 0x1f, 0x44, 0x5e, 0xd8, 0x04, 0xc5, 0xa3, 0x37, 0xc4, 0x81, 0xda, + 0xde, 0x4b, 0x54, 0x89, 0x89, 0xc7, 0x52, 0x8a, 0x66, 0x55, 0x85, 0x62, 0x84, 0x46, 0x4e, 0xa1, + 0xa6, 0x14, 0xd6, 0xba, 0x27, 0xcd, 0x9a, 0x72, 0xda, 0x7a, 0x8e, 0xd3, 0x14, 0xfb, 0x93, 0x38, + 0x73, 0x95, 0x46, 0x24, 0x90, 0x0e, 0x34, 0x52, 0xc7, 0xe9, 0x3b, 0xd8, 0xac, 0x2b, 0x99, 0x8f, + 0xe6, 0x0d, 0x84, 0x46, 0xeb, 0x23, 0xc6, 0x44, 0x62, 0x1a, 0xec, 0xe1, 0x75, 0xf3, 0x24, 0x6b, + 0x36, 0x94, 0xcd, 0x83, 0xbd, 0xfd, 0x29, 0xac, 0x8c, 0xc7, 0x72, 0x9e, 0xa2, 0x6f, 0xff, 0x0a, + 0x6e, 0x4f, 0x50, 0xe1, 0xb5, 0xea, 0xc1, 0xdf, 0x2d, 0x78, 0xe3, 0x9a, 0xdf, 0x08, 0x81, 0xe2, + 0x97, 0x57, 0x31, 0x33, 0x22, 0xd5, 0x9a, 0x1c, 0xc3, 0x12, 0xc6, 0x25, 0x69, 0x2e, 0x2a, 0xa7, + 0x6d, 0xce, 0x13, 0x08, 0x57, 0x21, 0xb5, 0xc3, 0xb4, 0x14, 0xfb, 0x21, 0xc0, 0x90, 0x38, 0x57, + 0xeb, 0xfb, 0x1a, 0xea, 0x26, 0x2a, 0xa6, 0x3c, 0xac, 0xe8, 0x29, 0xc5, 0x80, 0x71, 0x06, 0x19, + 0xb6, 0x8b, 0xc2, 0x9c, 0xed, 0xc2, 0xf9, 0x16, 0x6e, 0x51, 0xe6, 0xf9, 0xfb, 0x41, 0xc8, 0x6e, + 0xae, 0x8a, 0x78, 0xd7, 0x83, 0x90, 0x9d, 0x7a, 0xf2, 0xd9, 0xe0, 0xae, 0x9b, 0x3d, 0xd9, 0x82, + 0x25, 0xea, 0x45, 0x3d, 0x66, 0x8e, 0x7e, 0x2f, 0xe7, 0x68, 0x75, 0x08, 0xf2, 0x52, 0x0d, 0x71, + 0x1e, 0x41, 0x65, 0x40, 0xc3, 0x4a, 0xf5, 0xa4, 0xdb, 0x4d, 0x98, 0xae, 0x7a, 0x05, 0x6a, 0x76, + 0x48, 0x3f, 0x62, 0x51, 0xcf, 0x1c, 0x5d, 0xa0, 0x66, 0xe7, 0xac, 0xe1, 0xa8, 0x92, 0x6a, 0x6e, + 0x5c, 0x43, 0xa0, 0xb8, 0x8b, 0xf3, 0x94, 0xa5, 0x2e, 0x98, 0x5a, 0x3b, 0x3e, 0xb6, 0x39, 0xcf, + 0xdf, 0x0d, 0xc4, 0xcd, 0x06, 0x36, 0xa1, 0xb4, 0x1b, 0x88, 0x8c, 0x7d, 0xe9, 0x96, 0xac, 0x61, + 0x03, 0xec, 0x84, 0x7d, 0x1f, 0xad, 0x95, 0x4c, 0x44, 0xa6, 0xd2, 0x8f, 0x51, 0x9d, 0xcf, 0xb4, + 0x1f, 0xd5, 0x29, 0x46, 0x99, 0x75, 0x28, 0xb1, 0x48, 0x8a, 0x80, 0xa5, 0x5d, 0x92, 0xb8, 0x7a, + 0x04, 0x76, 0xd5, 0x08, 0xac, 0xba, 0x31, 0x4d, 0x59, 0x9c, 0x4d, 0xb8, 0x85, 0x84, 0xfc, 0x40, + 0x10, 0x28, 0x66, 0x94, 0x54, 0x6b, 0x67, 0x0b, 0x56, 0x86, 0x40, 0x73, 0xf4, 0x1a, 0x14, 0x71, + 0xc0, 0x36, 0x65, 0x7c, 0xd2, 0xb9, 0xea, 0xbb, 0x53, 0x87, 0xea, 0x69, 0x10, 0xa5, 0xfd, 0xd0, + 0x79, 0x65, 0x41, 0xed, 0x94, 0x47, 0xc3, 0x4e, 0x74, 0x0a, 0xb7, 0xd2, 0x1b, 0xf8, 0xf8, 0xf4, + 0x70, 0xc7, 0x8b, 0x53, 0x53, 0x56, 0xaf, 0x87, 0xd9, 0xbc, 0x05, 0x5c, 0xcd, 0xb8, 0x5d, 0xc4, + 0xa6, 0x45, 0xc7, 0xe1, 0xe4, 0x97, 0x50, 0x3a, 0x3a, 0xda, 0x56, 0x92, 0x16, 0xe7, 0x92, 0x94, + 0xc2, 0xc8, 0xa7, 0x50, 0x7a, 0xaa, 0x9e, 0x28, 0x89, 0x69, 0x2c, 0x13, 0x52, 0x4e, 0x1b, 0xaa, + 0xd9, 0x28, 0xeb, 0x70, 0xe1, 0xd3, 0x14, 0xe4, 0xfc, 0xcf, 0x82, 0xea, 0x53, 0x6f, 0x38, 0x6b, + 0x7d, 0x0e, 0xcb, 0xfe, 0x6b, 0x77, 0x5b, 0xbd, 0xc5, 0x5b, 0x1c, 0xb2, 0x17, 0x2c, 0x34, 0xa9, + 0xaa, 0x37, 0x48, 0x4d, 0x9e, 0x71, 0xa1, 0x6f, 0x67, 0x8d, 0xea, 0x0d, 0xe6, 0xb5, 0xcf, 0xa4, + 0x17, 0x84, 0xaa, 0x6b, 0xd5, 0xa8, 0xd9, 0x61, 0xd4, 0xfb, 0x22, 0x54, 0x4d, 0xa9, 0x42, 0x71, + 0x49, 0x1c, 0x28, 0x06, 0x51, 0x97, 0xab, 0xbe, 0x63, 0xaa, 0xdb, 0x19, 0xef, 0x8b, 0x0e, 0x3b, + 0x8c, 0xba, 0x9c, 0xaa, 0x6f, 0xe4, 0x5d, 0x58, 0x16, 0x78, 0x8d, 0x92, 0x66, 0x49, 0x39, 0xa5, + 0x82, 0x5c, 0xfa, 0xb2, 0x99, 0x0f, 0x4e, 0x03, 0x6a, 0xda, 0x6e, 0x33, 0xed, 0xfd, 0x69, 0x11, + 0x6e, 0x9f, 0xb0, 0xcb, 0x9d, 0xd4, 0xae, 0xd4, 0x21, 0xab, 0x50, 0x1d, 0xd0, 0x0e, 0x77, 0x4d, + 0xfa, 0x65, 0x49, 0x78, 0xd8, 0x31, 0xef, 0x47, 0x32, 0x8d, 0xa1, 0x3a, 0x4c, 0x51, 0xa8, 0xf9, + 0x40, 0x7e, 0x0a, 0xa5, 0x13, 0x26, 0xf1, 0x2d, 0xa9, 0xac, 0x6e, 0x6c, 0x54, 0x91, 0xe7, 0x84, + 0x49, 0x1c, 0x8d, 0x68, 0xfa, 0x0d, 0xe7, 0xad, 0x38, 0x9d, 0xb7, 0x8a, 0x93, 0xe6, 0xad, 0xf4, + 0x2b, 0xd9, 0x84, 0x6a, 0x87, 0x47, 0x89, 0x14, 0x5e, 0x80, 0x07, 0x2f, 0x29, 0xe6, 0x1f, 0x21, + 0xb3, 0x0e, 0xec, 0xce, 0xf0, 0x23, 0xcd, 0x72, 0x92, 0xfb, 0x00, 0xec, 0xa5, 0x14, 0xde, 0x01, + 0x4f, 0x64, 0xd2, 0x5c, 0x56, 0x0a, 0x03, 0xe2, 0x90, 0x70, 0x78, 0x4a, 0x33, 0x5f, 0x9d, 0xb7, + 0xe0, 0xcd, 0x51, 0x8f, 0x18, 0x57, 0x3d, 0x82, 0x1f, 0x53, 0x16, 0x32, 0x2f, 0x61, 0xf3, 0x7b, + 0xcb, 0xb1, 0xa1, 0x79, 0x1d, 0x6c, 0x04, 0xff, 0xbf, 0x00, 0xd5, 0xbd, 0x97, 0xac, 0x73, 0xcc, + 0x92, 0xc4, 0xeb, 0x31, 0xf2, 0x36, 0x54, 0x4e, 0x05, 0xef, 0xb0, 0x24, 0x19, 0xc8, 0x1a, 0x12, + 0xc8, 0x27, 0x50, 0x3c, 0x8c, 0x02, 0x69, 0xda, 0xdc, 0x5a, 0xee, 0xd0, 0x1d, 0x48, 0x23, 0x13, + 0x1f, 0x9c, 0xb8, 0x25, 0x5b, 0x50, 0xc4, 0x22, 0x31, 0x4b, 0xa1, 0xf6, 0x33, 0x58, 0xc4, 0x90, + 0x6d, 0xf5, 0x44, 0x0f, 0xbe, 0x61, 0x26, 0x4a, 0xad, 0xfc, 0x0e, 0x13, 0x7c, 0xc3, 0x86, 0x12, + 0x0c, 0x92, 0xec, 0x41, 0xe9, 0x4c, 0x7a, 0x02, 0xe7, 0x34, 0x1d, 0xbd, 0x7b, 0x79, 0x83, 0x88, + 0xe6, 0x1c, 0x4a, 0x49, 0xb1, 0xe8, 0x84, 0xbd, 0x97, 0x81, 0x34, 0xb7, 0x21, 0xcf, 0x09, 0xc8, + 0x96, 0x31, 0x04, 0xb7, 0x88, 0xde, 0xe5, 0x11, 0x6b, 0x96, 0xa6, 0xa2, 0x91, 0x2d, 0x83, 0xc6, + 0x2d, 0xba, 0xe1, 0x2c, 0xe8, 0xe1, 0x7c, 0x57, 0x9e, 0xea, 0x06, 0xcd, 0x98, 0x71, 0x83, 0x26, + 0x6c, 0x97, 0x60, 0x49, 0x4d, 0x33, 0xce, 0x5f, 0x2d, 0xa8, 0x66, 0xe2, 0x34, 0xc3, 0xbd, 0x7b, + 0x1b, 0x8a, 0xf8, 0xca, 0x37, 0xf1, 0x2f, 0xab, 0x5b, 0xc7, 0xa4, 0x47, 0x15, 0x15, 0x0b, 0xc7, + 0xbe, 0xaf, 0x8b, 0x62, 0x9d, 0xe2, 0x12, 0x29, 0x5f, 0xca, 0x2b, 0x15, 0xb2, 0x32, 0xc5, 0x25, + 0x59, 0x87, 0xf2, 0x19, 0xeb, 0xf4, 0x45, 0x20, 0xaf, 0x54, 0x10, 0x1a, 0x1b, 0x2b, 0xaa, 0x9c, + 0x18, 0x9a, 0xba, 0x9c, 0x03, 0x0e, 0xe7, 0x0b, 0x4c, 0xce, 0xa1, 0x82, 0x04, 0x8a, 0x3b, 0xf8, + 0xd6, 0x41, 0xcd, 0xea, 0x54, 0xad, 0xf1, 0xb9, 0xb9, 0x37, 0xed, 0xb9, 0xb9, 0x97, 0x3e, 0x37, + 0x47, 0x83, 0x8a, 0xdd, 0x27, 0xe3, 0x64, 0xe7, 0x31, 0x54, 0x06, 0x89, 0x87, 0x2f, 0xfd, 0x7d, + 0xdf, 0x9c, 0xb4, 0xb8, 0xef, 0xa3, 0x29, 0x7b, 0x4f, 0xf6, 0xd5, 0x29, 0x65, 0x8a, 0xcb, 0x41, + 0xaf, 0x2f, 0x64, 0x7a, 0xfd, 0x26, 0x3e, 0xa4, 0x33, 0xd9, 0x87, 0x4c, 0x94, 0x5f, 0x26, 0xa9, + 0xca, 0xb8, 0xd6, 0x66, 0x84, 0x89, 0x92, 0xa5, 0xcc, 0x08, 0x13, 0xe7, 0x27, 0x50, 0x1f, 0x89, + 0x17, 0x32, 0xa9, 0x97, 0x9b, 0x19, 0x09, 0x71, 0xbd, 0xf1, 0xaf, 0x0a, 0x54, 0x8e, 0x8e, 0xb6, + 0xb7, 0x45, 0xe0, 0xf7, 0x18, 0xf9, 0x83, 0x05, 0xe4, 0xfa, 0x23, 0x8e, 0x7c, 0x94, 0x7f, 0x33, + 0x26, 0xbf, 0x44, 0xed, 0x8f, 0xe7, 0x44, 0x99, 0xfe, 0xfc, 0x15, 0x2c, 0xa9, 0xd9, 0x90, 0xfc, + 0x6c, 0xc6, 0x99, 0xde, 0x6e, 0x4d, 0x67, 0x34, 0xb2, 0x3b, 0x50, 0x4e, 0xe7, 0x2b, 0x72, 0x3f, + 0x57, 0xbd, 0x91, 0xf1, 0xd1, 0x7e, 0x7f, 0x26, 0x5e, 0x73, 0xc8, 0x6f, 0xa1, 0x64, 0xc6, 0x26, + 0x72, 0x6f, 0x0a, 0x6e, 0x38, 0xc0, 0xd9, 0xf7, 0x67, 0x61, 0x1d, 0x9a, 0x91, 0x8e, 0x47, 0xb9, + 0x66, 0x8c, 0x0d, 0x5f, 0xb9, 0x66, 0x5c, 0x9b, 0xb7, 0x9e, 0x42, 0x11, 0xe7, 0x28, 0x92, 0x57, + 0x4f, 0x32, 0x83, 0x96, 0x9d, 0x17, 0xae, 0x91, 0x01, 0xec, 0x37, 0x58, 0x77, 0xd5, 0x5b, 0x34, + 0xbf, 0xe2, 0x66, 0xfe, 0x3c, 0xb2, 0xef, 0xcd, 0xc0, 0x39, 0x14, 0x6f, 0xde, 0x71, 0xad, 0x19, + 0xfe, 0xc1, 0x99, 0x2e, 0x7e, 0xec, 0xbf, 0x22, 0x0e, 0xb5, 0x6c, 0x3b, 0x25, 0x6e, 0x0e, 0x74, + 0xc2, 0x24, 0x62, 0xb7, 0x67, 0xe6, 0x37, 0x07, 0x7e, 0x8b, 0x6f, 0x82, 0xd1, 0x56, 0x4b, 0x36, + 0x72, 0xdd, 0x31, 0xb1, 0xa9, 0xdb, 0x0f, 0xe6, 0xc2, 0x98, 0xc3, 0x3d, 0xdd, 0xca, 0x4d, 0xbb, + 0x26, 0xf9, 0x9d, 0x69, 0xd0, 0xf2, 0xed, 0x19, 0xf9, 0x5a, 0xd6, 0x87, 0x16, 0xe6, 0x19, 0x8e, + 0x70, 0xb9, 0xb2, 0x33, 0xb3, 0x6d, 0x6e, 0x9e, 0x65, 0x67, 0xc1, 0xed, 0xda, 0x77, 0xaf, 0xee, + 0x5a, 0xff, 0x7c, 0x75, 0xd7, 0xfa, 0xcf, 0xab, 0xbb, 0xd6, 0xf9, 0xb2, 0xfa, 0x63, 0xfe, 0xc1, + 0x0f, 0x01, 0x00, 0x00, 0xff, 0xff, 0x92, 0x5d, 0x25, 0xb8, 0xea, 0x18, 0x00, 0x00, } // Reference imports to suppress errors if they are not otherwise used. @@ -2211,6 +2421,8 @@ type LLBBridgeClient interface { NewContainer(ctx context.Context, in *NewContainerRequest, opts ...grpc.CallOption) (*NewContainerResponse, error) ReleaseContainer(ctx context.Context, in *ReleaseContainerRequest, opts ...grpc.CallOption) (*ReleaseContainerResponse, error) ExecProcess(ctx context.Context, opts ...grpc.CallOption) (LLBBridge_ExecProcessClient, error) + // apicaps:CapGatewayWarnings + Warn(ctx context.Context, in *WarnRequest, opts ...grpc.CallOption) (*WarnResponse, error) } type lLBBridgeClient struct { @@ -2342,6 +2554,15 @@ func (x *lLBBridgeExecProcessClient) Recv() (*ExecMessage, error) { return m, nil } +func (c *lLBBridgeClient) Warn(ctx context.Context, in *WarnRequest, opts ...grpc.CallOption) (*WarnResponse, error) { + out := new(WarnResponse) + err := c.cc.Invoke(ctx, "/moby.buildkit.v1.frontend.LLBBridge/Warn", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + // LLBBridgeServer is the server API for LLBBridge service. type LLBBridgeServer interface { // apicaps:CapResolveImage @@ -2361,6 +2582,8 @@ type LLBBridgeServer interface { NewContainer(context.Context, *NewContainerRequest) (*NewContainerResponse, error) ReleaseContainer(context.Context, *ReleaseContainerRequest) (*ReleaseContainerResponse, error) ExecProcess(LLBBridge_ExecProcessServer) error + // apicaps:CapGatewayWarnings + Warn(context.Context, *WarnRequest) (*WarnResponse, error) } // UnimplementedLLBBridgeServer can be embedded to have forward compatible implementations. @@ -2400,6 +2623,9 @@ func (*UnimplementedLLBBridgeServer) ReleaseContainer(ctx context.Context, req * func (*UnimplementedLLBBridgeServer) ExecProcess(srv LLBBridge_ExecProcessServer) error { return status.Errorf(codes.Unimplemented, "method ExecProcess not implemented") } +func (*UnimplementedLLBBridgeServer) Warn(ctx context.Context, req *WarnRequest) (*WarnResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Warn not implemented") +} func RegisterLLBBridgeServer(s *grpc.Server, srv LLBBridgeServer) { s.RegisterService(&_LLBBridge_serviceDesc, srv) @@ -2611,6 +2837,24 @@ func (x *lLBBridgeExecProcessServer) Recv() (*ExecMessage, error) { return m, nil } +func _LLBBridge_Warn_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(WarnRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(LLBBridgeServer).Warn(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/moby.buildkit.v1.frontend.LLBBridge/Warn", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(LLBBridgeServer).Warn(ctx, req.(*WarnRequest)) + } + return interceptor(ctx, in, info, handler) +} + var _LLBBridge_serviceDesc = grpc.ServiceDesc{ ServiceName: "moby.buildkit.v1.frontend.LLBBridge", HandlerType: (*LLBBridgeServer)(nil), @@ -2655,6 +2899,10 @@ var _LLBBridge_serviceDesc = grpc.ServiceDesc{ MethodName: "ReleaseContainer", Handler: _LLBBridge_ReleaseContainer_Handler, }, + { + MethodName: "Warn", + Handler: _LLBBridge_Warn_Handler, + }, }, Streams: []grpc.StreamDesc{ { @@ -3854,6 +4102,121 @@ func (m *PongResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *WarnRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WarnRequest) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WarnRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Ranges) > 0 { + for iNdEx := len(m.Ranges) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ranges[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + } + if m.Info != nil { + { + size, err := m.Info.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + if len(m.Url) > 0 { + i -= len(m.Url) + copy(dAtA[i:], m.Url) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Url))) + i-- + dAtA[i] = 0x2a + } + if len(m.Detail) > 0 { + for iNdEx := len(m.Detail) - 1; iNdEx >= 0; iNdEx-- { + i -= len(m.Detail[iNdEx]) + copy(dAtA[i:], m.Detail[iNdEx]) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Detail[iNdEx]))) + i-- + dAtA[i] = 0x22 + } + } + if len(m.Short) > 0 { + i -= len(m.Short) + copy(dAtA[i:], m.Short) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Short))) + i-- + dAtA[i] = 0x1a + } + if m.Level != 0 { + i = encodeVarintGateway(dAtA, i, uint64(m.Level)) + i-- + dAtA[i] = 0x10 + } + if len(m.Digest) > 0 { + i -= len(m.Digest) + copy(dAtA[i:], m.Digest) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Digest))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *WarnResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *WarnResponse) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *WarnResponse) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + return len(dAtA) - i, nil +} + func (m *NewContainerRequest) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3878,6 +4241,20 @@ func (m *NewContainerRequest) MarshalToSizedBuffer(dAtA []byte) (int, error) { i -= len(m.XXX_unrecognized) copy(dAtA[i:], m.XXX_unrecognized) } + if len(m.ExtraHosts) > 0 { + for iNdEx := len(m.ExtraHosts) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.ExtraHosts[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + } if m.Constraints != nil { { size, err := m.Constraints.MarshalToSizedBuffer(dAtA[:i]) @@ -4188,6 +4565,27 @@ func (m *ExecMessage_Done) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *ExecMessage_Signal) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ExecMessage_Signal) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Signal != nil { + { + size, err := m.Signal.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintGateway(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x42 + } + return len(dAtA) - i, nil +} func (m *InitMessage) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4228,20 +4626,20 @@ func (m *InitMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { dAtA[i] = 0x20 } if len(m.Fds) > 0 { - dAtA24 := make([]byte, len(m.Fds)*10) - var j23 int + dAtA26 := make([]byte, len(m.Fds)*10) + var j25 int for _, num := range m.Fds { for num >= 1<<7 { - dAtA24[j23] = uint8(uint64(num)&0x7f | 0x80) + dAtA26[j25] = uint8(uint64(num)&0x7f | 0x80) num >>= 7 - j23++ + j25++ } - dAtA24[j23] = uint8(num) - j23++ + dAtA26[j25] = uint8(num) + j25++ } - i -= j23 - copy(dAtA[i:], dAtA24[:j23]) - i = encodeVarintGateway(dAtA, i, uint64(j23)) + i -= j25 + copy(dAtA[i:], dAtA26[:j25]) + i = encodeVarintGateway(dAtA, i, uint64(j25)) i-- dAtA[i] = 0x1a } @@ -4451,6 +4849,40 @@ func (m *ResizeMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *SignalMessage) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SignalMessage) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SignalMessage) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.XXX_unrecognized != nil { + i -= len(m.XXX_unrecognized) + copy(dAtA[i:], m.XXX_unrecognized) + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintGateway(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintGateway(dAtA []byte, offset int, v uint64) int { offset -= sovGateway(v) base := offset @@ -5008,6 +5440,61 @@ func (m *PongResponse) Size() (n int) { return n } +func (m *WarnRequest) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Digest) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Level != 0 { + n += 1 + sovGateway(uint64(m.Level)) + } + l = len(m.Short) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Detail) > 0 { + for _, b := range m.Detail { + l = len(b) + n += 1 + l + sovGateway(uint64(l)) + } + } + l = len(m.Url) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.Info != nil { + l = m.Info.Size() + n += 1 + l + sovGateway(uint64(l)) + } + if len(m.Ranges) > 0 { + for _, e := range m.Ranges { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + +func (m *WarnResponse) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func (m *NewContainerRequest) Size() (n int) { if m == nil { return 0 @@ -5035,6 +5522,12 @@ func (m *NewContainerRequest) Size() (n int) { l = m.Constraints.Size() n += 1 + l + sovGateway(uint64(l)) } + if len(m.ExtraHosts) > 0 { + for _, e := range m.ExtraHosts { + l = e.Size() + n += 1 + l + sovGateway(uint64(l)) + } + } if m.XXX_unrecognized != nil { n += len(m.XXX_unrecognized) } @@ -5172,6 +5665,18 @@ func (m *ExecMessage_Done) Size() (n int) { } return n } +func (m *ExecMessage_Signal) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Signal != nil { + l = m.Signal.Size() + n += 1 + l + sovGateway(uint64(l)) + } + return n +} func (m *InitMessage) Size() (n int) { if m == nil { return 0 @@ -5288,6 +5793,22 @@ func (m *ResizeMessage) Size() (n int) { return n } +func (m *SignalMessage) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovGateway(uint64(l)) + } + if m.XXX_unrecognized != nil { + n += len(m.XXX_unrecognized) + } + return n +} + func sovGateway(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -8639,6 +9160,327 @@ func (m *PongResponse) Unmarshal(dAtA []byte) error { } return nil } +func (m *WarnRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WarnRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WarnRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Digest", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Digest = github_com_opencontainers_go_digest.Digest(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Level", wireType) + } + m.Level = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Level |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Short", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Short = append(m.Short[:0], dAtA[iNdEx:postIndex]...) + if m.Short == nil { + m.Short = []byte{} + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Detail", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + byteLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Detail = append(m.Detail, make([]byte, postIndex-iNdEx)) + copy(m.Detail[len(m.Detail)-1], dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Url", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Url = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Info", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Info == nil { + m.Info = &pb.SourceInfo{} + } + if err := m.Info.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ranges", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ranges = append(m.Ranges, &pb.Range{}) + if err := m.Ranges[len(m.Ranges)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *WarnResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: WarnResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: WarnResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *NewContainerRequest) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8825,6 +9667,40 @@ func (m *NewContainerRequest) Unmarshal(dAtA []byte) error { return err } iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ExtraHosts", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ExtraHosts = append(m.ExtraHosts, &pb.HostIP{}) + if err := m.ExtraHosts[len(m.ExtraHosts)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -9303,6 +10179,41 @@ func (m *ExecMessage) Unmarshal(dAtA []byte) error { } m.Input = &ExecMessage_Done{v} iNdEx = postIndex + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Signal", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &SignalMessage{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Input = &ExecMessage_Signal{v} + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipGateway(dAtA[iNdEx:]) @@ -9980,6 +10891,89 @@ func (m *ResizeMessage) Unmarshal(dAtA []byte) error { } return nil } +func (m *SignalMessage) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SignalMessage: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SignalMessage: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowGateway + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthGateway + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthGateway + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipGateway(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthGateway + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipGateway(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto index 5fc8a021f7..31aaf3b20d 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto @@ -9,6 +9,7 @@ import "github.com/moby/buildkit/api/types/worker.proto"; import "github.com/moby/buildkit/util/apicaps/pb/caps.proto"; import "github.com/tonistiigi/fsutil/types/stat.proto"; + option (gogoproto.sizer_all) = true; option (gogoproto.marshaler_all) = true; option (gogoproto.unmarshaler_all) = true; @@ -31,7 +32,10 @@ service LLBBridge { rpc NewContainer(NewContainerRequest) returns (NewContainerResponse); rpc ReleaseContainer(ReleaseContainerRequest) returns (ReleaseContainerResponse); - rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage); + rpc ExecProcess(stream ExecMessage) returns (stream ExecMessage); + + // apicaps:CapGatewayWarnings + rpc Warn(WarnRequest) returns (WarnResponse); } message Result { @@ -169,6 +173,18 @@ message PongResponse{ repeated moby.buildkit.v1.types.WorkerRecord Workers = 3; } +message WarnRequest { + string digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false]; + int64 level = 2; + bytes short = 3; + repeated bytes detail = 4; + string url = 5; + pb.SourceInfo info = 6; + repeated pb.Range ranges = 7; +} + +message WarnResponse{} + message NewContainerRequest { string ContainerID = 1; // For mount input values we can use random identifiers passed with ref @@ -176,6 +192,7 @@ message NewContainerRequest { pb.NetMode Network = 3; pb.Platform platform = 4; pb.WorkerConstraints constraints = 5; + repeated pb.HostIP extraHosts = 6; } message NewContainerResponse{} @@ -206,6 +223,8 @@ message ExecMessage { // DoneMessage from server to client will be the last message for any // process. Note that FdMessage might be sent after ExitMessage. DoneMessage Done = 7; + // SignalMessage is used from client to server to send signal events + SignalMessage Signal = 8; } } @@ -236,3 +255,9 @@ message ResizeMessage{ uint32 Rows = 1; uint32 Cols = 2; } + +message SignalMessage { + // we only send name (ie HUP, INT) because the int values + // are platform dependent. + string Name = 1; +} diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go index e17b9daf6b..2e55abb1b0 100644 --- a/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go +++ b/vendor/github.com/moby/buildkit/frontend/gateway/pb/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1_frontend //nolint:golint +package moby_buildkit_v1_frontend //nolint:revive //go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. gateway.proto diff --git a/vendor/github.com/moby/buildkit/frontend/gateway/util.go b/vendor/github.com/moby/buildkit/frontend/gateway/util.go new file mode 100644 index 0000000000..0de8353402 --- /dev/null +++ b/vendor/github.com/moby/buildkit/frontend/gateway/util.go @@ -0,0 +1,24 @@ +package gateway + +import ( + "net" + + "github.com/moby/buildkit/executor" + "github.com/moby/buildkit/solver/pb" + "github.com/pkg/errors" +) + +func ParseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) { + out := make([]executor.HostIP, len(ips)) + for i, hip := range ips { + ip := net.ParseIP(hip.IP) + if ip == nil { + return nil, errors.Errorf("failed to parse IP %s", hip.IP) + } + out[i] = executor.HostIP{ + IP: ip, + Host: hip.Host, + } + } + return out, nil +} diff --git a/vendor/github.com/moby/buildkit/session/content/attachable.go b/vendor/github.com/moby/buildkit/session/content/attachable.go index 253b37a23e..4656908fe7 100644 --- a/vendor/github.com/moby/buildkit/session/content/attachable.go +++ b/vendor/github.com/moby/buildkit/session/content/attachable.go @@ -9,7 +9,7 @@ import ( "github.com/containerd/containerd/services/content/contentserver" "github.com/moby/buildkit/session" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/metadata" @@ -104,7 +104,7 @@ func (cs *attachableContentStore) Writer(ctx context.Context, opts ...content.Wr return store.Writer(ctx, opts...) } -func (cs *attachableContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (cs *attachableContentStore) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { store, err := cs.choose(ctx) if err != nil { return nil, err diff --git a/vendor/github.com/moby/buildkit/session/content/caller.go b/vendor/github.com/moby/buildkit/session/content/caller.go index 70e82130d7..b712bf9174 100644 --- a/vendor/github.com/moby/buildkit/session/content/caller.go +++ b/vendor/github.com/moby/buildkit/session/content/caller.go @@ -8,7 +8,7 @@ import ( "github.com/containerd/containerd/content/proxy" "github.com/moby/buildkit/session" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "google.golang.org/grpc/metadata" ) @@ -75,7 +75,7 @@ func (cs *callerContentStore) Writer(ctx context.Context, opts ...content.Writer return w, errors.WithStack(err) } -func (cs *callerContentStore) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (cs *callerContentStore) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { ctx = cs.choose(ctx) ra, err := cs.store.ReaderAt(ctx, desc) return ra, errors.WithStack(err) diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go index a03326f6d5..27bc5d5414 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go +++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go @@ -7,8 +7,9 @@ import ( "os" "time" + "github.com/moby/buildkit/util/bklog" + "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/tonistiigi/fsutil" fstypes "github.com/tonistiigi/fsutil/types" "google.golang.org/grpc" @@ -70,10 +71,10 @@ func (wc *streamWriterCloser) Close() error { return nil } -func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, filter func(string, *fstypes.Stat) bool) (err error) { +func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress progressCb, differ fsutil.DiffType, filter func(string, *fstypes.Stat) bool) (err error) { st := time.Now() defer func() { - logrus.Debugf("diffcopy took: %v", time.Since(st)) + bklog.G(ds.Context()).Debugf("diffcopy took: %v", time.Since(st)) }() var cf fsutil.ChangeFunc var ch fsutil.ContentHasher @@ -93,6 +94,7 @@ func recvDiffCopy(ds grpc.ClientStream, dest string, cu CacheUpdater, progress p ContentHasher: ch, ProgressCb: progress, Filter: fsutil.FilterFunc(filter), + Differ: differ, })) } diff --git a/vendor/github.com/moby/buildkit/session/filesync/filesync.go b/vendor/github.com/moby/buildkit/session/filesync/filesync.go index af62d1c2c6..ae3f29f86c 100644 --- a/vendor/github.com/moby/buildkit/session/filesync/filesync.go +++ b/vendor/github.com/moby/buildkit/session/filesync/filesync.go @@ -64,13 +64,13 @@ func (sp *fsSyncProvider) TarStream(stream FileSync_TarStreamServer) error { func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retErr error) { var pr *protocol for _, p := range supportedProtocols { - if method == p.name && isProtoSupported(p.name) { + if method == p.name { pr = &p break } } if pr == nil { - return errors.New("failed to negotiate protocol") + return InvalidSessionError{errors.New("failed to negotiate protocol")} } opts, _ := metadata.FromIncomingContext(stream.Context()) // if no metadata continue with empty object @@ -83,7 +83,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr dir, ok := sp.dirs[dirName] if !ok { - return status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName) + return InvalidSessionError{status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)} } excludes := opts[keyExcludePatterns] @@ -130,15 +130,7 @@ type progressCb func(int, bool) type protocol struct { name string sendFn func(stream Stream, fs fsutil.FS, progress progressCb) error - recvFn func(stream grpc.ClientStream, destDir string, cu CacheUpdater, progress progressCb, mapFunc func(string, *fstypes.Stat) bool) error -} - -func isProtoSupported(p string) bool { - // TODO: this should be removed after testing if stability is confirmed - if override := os.Getenv("BUILD_STREAM_PROTOCOL"); override != "" { - return strings.EqualFold(p, override) - } - return true + recvFn func(stream grpc.ClientStream, destDir string, cu CacheUpdater, progress progressCb, differ fsutil.DiffType, mapFunc func(string, *fstypes.Stat) bool) error } var supportedProtocols = []protocol{ @@ -160,6 +152,7 @@ type FSSendRequestOpt struct { CacheUpdater CacheUpdater ProgressCb func(int, bool) Filter func(string, *fstypes.Stat) bool + Differ fsutil.DiffType } // CacheUpdater is an object capable of sending notifications for the cache hash changes @@ -173,7 +166,7 @@ type CacheUpdater interface { func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { var pr *protocol for _, p := range supportedProtocols { - if isProtoSupported(p.name) && c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { + if c.Supports(session.MethodURL(_FileSync_serviceDesc.ServiceName, p.name)) { pr = &p break } @@ -227,7 +220,7 @@ func FSSync(ctx context.Context, c session.Caller, opt FSSendRequestOpt) error { panic(fmt.Sprintf("invalid protocol: %q", pr.name)) } - return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Filter) + return pr.recvFn(stream, opt.DestDir, opt.CacheUpdater, opt.ProgressCb, opt.Differ, opt.Filter) } // NewFSSyncTargetDir allows writing into a directory @@ -324,3 +317,15 @@ func CopyFileWriter(ctx context.Context, md map[string]string, c session.Caller) return newStreamWriter(cc), nil } + +type InvalidSessionError struct { + err error +} + +func (e InvalidSessionError) Error() string { + return e.err.Error() +} + +func (e InvalidSessionError) Unwrap() error { + return e.err +} diff --git a/vendor/github.com/moby/buildkit/session/grpc.go b/vendor/github.com/moby/buildkit/session/grpc.go index 728ea2e17c..a7237ac350 100644 --- a/vendor/github.com/moby/buildkit/session/grpc.go +++ b/vendor/github.com/moby/buildkit/session/grpc.go @@ -7,13 +7,14 @@ import ( "time" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/grpcerrors" - opentracing "github.com/opentracing/opentracing-go" "github.com/pkg/errors" - "github.com/sirupsen/logrus" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel/trace" "golang.org/x/net/http2" "google.golang.org/grpc" + "google.golang.org/grpc/credentials/insecure" "google.golang.org/grpc/health/grpc_health_v1" ) @@ -22,7 +23,7 @@ func serve(ctx context.Context, grpcServer *grpc.Server, conn net.Conn) { <-ctx.Done() conn.Close() }() - logrus.Debugf("serving grpc connection") + bklog.G(ctx).Debugf("serving grpc connection") (&http2.Server{}).ServeConn(conn, &http2.ServeConnOpts{Handler: grpcServer}) } @@ -40,13 +41,12 @@ func grpcClientConn(ctx context.Context, conn net.Conn) (context.Context, *grpc. dialOpts := []grpc.DialOption{ dialer, - grpc.WithInsecure(), + grpc.WithTransportCredentials(insecure.NewCredentials()), } - if span := opentracing.SpanFromContext(ctx); span != nil { - tracer := span.Tracer() - unary = append(unary, otgrpc.OpenTracingClientInterceptor(tracer, traceFilter())) - stream = append(stream, otgrpc.OpenTracingStreamClientInterceptor(tracer, traceFilter())) + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { + unary = append(unary, filterClient(otelgrpc.UnaryClientInterceptor(otelgrpc.WithTracerProvider(span.TracerProvider()), otelgrpc.WithPropagators(propagators)))) + stream = append(stream, otelgrpc.StreamClientInterceptor(otelgrpc.WithTracerProvider(span.TracerProvider()), otelgrpc.WithPropagators(propagators))) } unary = append(unary, grpcerrors.UnaryClientInterceptor) diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go index 5f0cf3d77a..25e3ddee99 100644 --- a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go +++ b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go @@ -16,11 +16,8 @@ import ( func Dialer(api controlapi.ControlClient) session.Dialer { return func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) { - meta = lowerHeaders(meta) - md := metadata.MD(meta) - ctx = metadata.NewOutgoingContext(ctx, md) stream, err := api.Session(ctx) @@ -126,7 +123,6 @@ func (c *conn) Close() (err error) { c.lastBuf = append(c.lastBuf, c.buf...) } c.readMu.Unlock() - }) return nil } diff --git a/vendor/github.com/moby/buildkit/session/secrets/secrets.go b/vendor/github.com/moby/buildkit/session/secrets/secrets.go index 604199df8e..ac251ae15e 100644 --- a/vendor/github.com/moby/buildkit/session/secrets/secrets.go +++ b/vendor/github.com/moby/buildkit/session/secrets/secrets.go @@ -22,7 +22,7 @@ func GetSecret(ctx context.Context, c session.Caller, id string) ([]byte, error) }) if err != nil { if code := grpcerrors.Code(err); code == codes.Unimplemented || code == codes.NotFound { - return nil, errors.Wrapf(ErrNotFound, "secret %s not found", id) + return nil, errors.Wrapf(ErrNotFound, "secret %s", id) } return nil, err } diff --git a/vendor/github.com/moby/buildkit/session/session.go b/vendor/github.com/moby/buildkit/session/session.go index 02c7420a8e..50cb3b4486 100644 --- a/vendor/github.com/moby/buildkit/session/session.go +++ b/vendor/github.com/moby/buildkit/session/session.go @@ -6,11 +6,12 @@ import ( "strings" grpc_middleware "github.com/grpc-ecosystem/go-grpc-middleware" - "github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/grpcerrors" - opentracing "github.com/opentracing/opentracing-go" "github.com/pkg/errors" + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" "google.golang.org/grpc" "google.golang.org/grpc/health" "google.golang.org/grpc/health/grpc_health_v1" @@ -23,6 +24,8 @@ const ( headerSessionMethod = "X-Docker-Expose-Session-Grpc-Method" ) +var propagators = propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{}) + // Dialer returns a connection that can be used by the session type Dialer func(ctx context.Context, proto string, meta map[string][]string) (net.Conn, error) @@ -51,10 +54,10 @@ func NewSession(ctx context.Context, name, sharedKey string) (*Session, error) { var stream []grpc.StreamServerInterceptor serverOpts := []grpc.ServerOption{} - if span := opentracing.SpanFromContext(ctx); span != nil { - tracer := span.Tracer() - unary = append(unary, otgrpc.OpenTracingServerInterceptor(tracer, traceFilter())) - stream = append(stream, otgrpc.OpenTracingStreamServerInterceptor(span.Tracer(), traceFilter())) + + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { + unary = append(unary, filterServer(otelgrpc.UnaryServerInterceptor(otelgrpc.WithTracerProvider(span.TracerProvider()), otelgrpc.WithPropagators(propagators)))) + stream = append(stream, otelgrpc.StreamServerInterceptor(otelgrpc.WithTracerProvider(span.TracerProvider()), otelgrpc.WithPropagators(propagators))) } unary = append(unary, grpcerrors.UnaryServerInterceptor) @@ -152,10 +155,21 @@ func MethodURL(s, m string) string { return "/" + s + "/" + m } -func traceFilter() otgrpc.Option { - return otgrpc.IncludingSpans(func(parentSpanCtx opentracing.SpanContext, - method string, - req, resp interface{}) bool { - return !strings.HasSuffix(method, "Health/Check") - }) +// updates needed in opentelemetry-contrib to avoid this +func filterServer(intercept grpc.UnaryServerInterceptor) grpc.UnaryServerInterceptor { + return func(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { + if strings.HasSuffix(info.FullMethod, "Health/Check") { + return handler(ctx, req) + } + return intercept(ctx, req, info, handler) + } +} + +func filterClient(intercept grpc.UnaryClientInterceptor) grpc.UnaryClientInterceptor { + return func(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { + if strings.HasSuffix(method, "Health/Check") { + return invoker(ctx, method, req, reply, cc, opts...) + } + return intercept(ctx, method, req, reply, cc, invoker, opts...) + } } diff --git a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go index 1be21e9004..3c730523a7 100644 --- a/vendor/github.com/moby/buildkit/snapshot/containerd/content.go +++ b/vendor/github.com/moby/buildkit/snapshot/containerd/content.go @@ -6,7 +6,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/namespaces" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -53,7 +53,7 @@ func (c *nsContent) Abort(ctx context.Context, ref string) error { return c.Store.Abort(ctx, ref) } -func (c *nsContent) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (c *nsContent) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { ctx = namespaces.WithNamespace(ctx, c.ns) return c.Store.ReaderAt(ctx, desc) } diff --git a/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go new file mode 100644 index 0000000000..5010519365 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/diffapply_unix.go @@ -0,0 +1,852 @@ +//go:build !windows +// +build !windows + +package snapshot + +import ( + "context" + gofs "io/fs" + "os" + "path/filepath" + "strings" + "syscall" + + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/snapshots" + "github.com/containerd/continuity/fs" + "github.com/containerd/continuity/sysx" + "github.com/containerd/stargz-snapshotter/snapshot/overlayutils" + "github.com/hashicorp/go-multierror" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/overlay" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// diffApply applies the provided diffs to the dest Mountable and returns the correctly calculated disk usage +// that accounts for any hardlinks made from existing snapshots. ctx is expected to have a temporary lease +// associated with it. +func (sn *mergeSnapshotter) diffApply(ctx context.Context, dest Mountable, diffs ...Diff) (_ snapshots.Usage, rerr error) { + a, err := applierFor(dest, sn.tryCrossSnapshotLink, sn.userxattr) + if err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to create applier") + } + defer func() { + releaseErr := a.Release() + if releaseErr != nil { + rerr = multierror.Append(rerr, errors.Wrapf(releaseErr, "failed to release applier")).ErrorOrNil() + } + }() + + // TODO:(sipsma) optimization: parallelize differ and applier in separate goroutines, connected with a buffered channel + + for _, diff := range diffs { + var lowerMntable Mountable + if diff.Lower != "" { + if info, err := sn.Stat(ctx, diff.Lower); err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to stat lower snapshot %s", diff.Lower) + } else if info.Kind == snapshots.KindCommitted { + lowerMntable, err = sn.View(ctx, identity.NewID(), diff.Lower) + if err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to mount lower snapshot view %s", diff.Lower) + } + } else { + lowerMntable, err = sn.Mounts(ctx, diff.Lower) + if err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to mount lower snapshot %s", diff.Lower) + } + } + } + var upperMntable Mountable + if diff.Upper != "" { + if info, err := sn.Stat(ctx, diff.Upper); err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to stat upper snapshot %s", diff.Upper) + } else if info.Kind == snapshots.KindCommitted { + upperMntable, err = sn.View(ctx, identity.NewID(), diff.Upper) + if err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to mount upper snapshot view %s", diff.Upper) + } + } else { + upperMntable, err = sn.Mounts(ctx, diff.Upper) + if err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to mount upper snapshot %s", diff.Upper) + } + } + } else { + // create an empty view + upperMntable, err = sn.View(ctx, identity.NewID(), "") + if err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to mount empty upper snapshot view %s", diff.Upper) + } + } + d, err := differFor(lowerMntable, upperMntable) + if err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to create differ") + } + defer func() { + rerr = multierror.Append(rerr, d.Release()).ErrorOrNil() + }() + if err := d.HandleChanges(ctx, a.Apply); err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to handle changes") + } + } + + if err := a.Flush(); err != nil { + return snapshots.Usage{}, errors.Wrapf(err, "failed to flush changes") + } + return a.Usage() +} + +type change struct { + kind fs.ChangeKind + subPath string + srcPath string + srcStat *syscall.Stat_t + // linkSubPath is set to a subPath of a previous change from the same + // differ instance that is a hardlink to this one, if any. + linkSubPath string +} + +type changeApply struct { + *change + dstPath string + dstStat *syscall.Stat_t + setOpaque bool +} + +type inode struct { + ino uint64 + dev uint64 +} + +func statInode(stat *syscall.Stat_t) inode { + if stat == nil { + return inode{} + } + return inode{ + ino: stat.Ino, + dev: stat.Dev, + } +} + +type applier struct { + root string + release func() error + lowerdirs []string // ordered highest -> lowest, the order we want to check them in + crossSnapshotLinks map[inode]struct{} + createWhiteoutDelete bool + userxattr bool + dirModTimes map[string]unix.Timespec // map of dstPath -> mtime that should be set on that subPath +} + +func applierFor(dest Mountable, tryCrossSnapshotLink, userxattr bool) (_ *applier, rerr error) { + a := &applier{ + dirModTimes: make(map[string]unix.Timespec), + userxattr: userxattr, + } + defer func() { + if rerr != nil { + rerr = multierror.Append(rerr, a.Release()).ErrorOrNil() + } + }() + if tryCrossSnapshotLink { + a.crossSnapshotLinks = make(map[inode]struct{}) + } + + mnts, release, err := dest.Mount() + if err != nil { + return nil, nil + } + a.release = release + + if len(mnts) != 1 { + return nil, errors.Errorf("expected exactly one mount, got %d", len(mnts)) + } + mnt := mnts[0] + + switch mnt.Type { + case "overlay": + for _, opt := range mnt.Options { + if strings.HasPrefix(opt, "upperdir=") { + a.root = strings.TrimPrefix(opt, "upperdir=") + } else if strings.HasPrefix(opt, "lowerdir=") { + a.lowerdirs = strings.Split(strings.TrimPrefix(opt, "lowerdir="), ":") + } + } + if a.root == "" { + return nil, errors.Errorf("could not find upperdir in mount options %v", mnt.Options) + } + if len(a.lowerdirs) == 0 { + return nil, errors.Errorf("could not find lowerdir in mount options %v", mnt.Options) + } + a.createWhiteoutDelete = true + case "bind", "rbind": + a.root = mnt.Source + default: + mnter := LocalMounter(dest) + root, err := mnter.Mount() + if err != nil { + return nil, err + } + a.root = root + prevRelease := a.release + a.release = func() error { + err := mnter.Unmount() + return multierror.Append(err, prevRelease()).ErrorOrNil() + } + } + + a.root, err = filepath.EvalSymlinks(a.root) + if err != nil { + return nil, errors.Wrapf(err, "failed to resolve symlinks in %s", a.root) + } + return a, nil +} + +func (a *applier) Apply(ctx context.Context, c *change) error { + if c == nil { + return errors.New("nil change") + } + + if c.kind == fs.ChangeKindUnmodified { + return nil + } + + dstPath, err := safeJoin(a.root, c.subPath) + if err != nil { + return errors.Wrapf(err, "failed to join paths %q and %q", a.root, c.subPath) + } + var dstStat *syscall.Stat_t + if dstfi, err := os.Lstat(dstPath); err == nil { + stat, ok := dstfi.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("failed to get stat_t for %T", dstStat) + } + dstStat = stat + } else if !os.IsNotExist(err) { + return errors.Wrap(err, "failed to stat during copy apply") + } + + ca := &changeApply{ + change: c, + dstPath: dstPath, + dstStat: dstStat, + } + + if done, err := a.applyDelete(ctx, ca); err != nil { + return errors.Wrap(err, "failed to delete during apply") + } else if done { + return nil + } + + if done, err := a.applyHardlink(ctx, ca); err != nil { + return errors.Wrapf(err, "failed to hardlink during apply") + } else if done { + return nil + } + + if err := a.applyCopy(ctx, ca); err != nil { + return errors.Wrapf(err, "failed to copy during apply") + } + return nil +} + +func (a *applier) applyDelete(ctx context.Context, ca *changeApply) (bool, error) { + // Even when not deleting, we may be overwriting a file, in which case we should + // delete the existing file at the path, if any. Don't delete when both are dirs + // in this case though because they should get merged, not overwritten. + deleteOnly := ca.kind == fs.ChangeKindDelete + overwrite := !deleteOnly && ca.dstStat != nil && ca.srcStat.Mode&ca.dstStat.Mode&unix.S_IFMT != unix.S_IFDIR + + if !deleteOnly && !overwrite { + // nothing to delete, continue on + return false, nil + } + + if err := os.RemoveAll(ca.dstPath); err != nil { + return false, errors.Wrap(err, "failed to remove during apply") + } + ca.dstStat = nil + + if overwrite && a.createWhiteoutDelete && ca.srcStat.Mode&unix.S_IFMT == unix.S_IFDIR { + // If we are using an overlay snapshotter and overwriting an existing non-directory + // with a directory, we need this new dir to be opaque so that any files from lowerdirs + // under it are not visible. + ca.setOpaque = true + } + + if deleteOnly && a.createWhiteoutDelete { + // only create a whiteout device if there is something to delete + var foundLower bool + for _, lowerdir := range a.lowerdirs { + lowerPath, err := safeJoin(lowerdir, ca.subPath) + if err != nil { + return false, errors.Wrapf(err, "failed to join lowerdir %q and subPath %q", lowerdir, ca.subPath) + } + if _, err := os.Lstat(lowerPath); err == nil { + foundLower = true + break + } else if !errors.Is(err, unix.ENOENT) && !errors.Is(err, unix.ENOTDIR) { + return false, errors.Wrapf(err, "failed to stat lowerPath %q", lowerPath) + } + } + if foundLower { + ca.kind = fs.ChangeKindAdd + if ca.srcStat == nil { + ca.srcStat = &syscall.Stat_t{ + Mode: syscall.S_IFCHR, + Rdev: unix.Mkdev(0, 0), + } + ca.srcPath = "" + } + return false, nil + } + } + + return deleteOnly, nil +} + +func (a *applier) applyHardlink(ctx context.Context, ca *changeApply) (bool, error) { + switch ca.srcStat.Mode & unix.S_IFMT { + case unix.S_IFDIR, unix.S_IFIFO, unix.S_IFSOCK: + // Directories can't be hard-linked, so they just have to be recreated. + // Named pipes and sockets can be hard-linked but is best to avoid as it could enable IPC in weird cases. + return false, nil + + default: + var linkSrcPath string + if ca.linkSubPath != "" { + // there's an already applied path that we should link from + path, err := safeJoin(a.root, ca.linkSubPath) + if err != nil { + return false, errors.Errorf("failed to get hardlink source path: %v", err) + } + linkSrcPath = path + } else if a.crossSnapshotLinks != nil { + // we can try to link across snapshots from the source file + linkSrcPath = ca.srcPath + a.crossSnapshotLinks[statInode(ca.srcStat)] = struct{}{} + } + if linkSrcPath == "" { + // nothing to hardlink from, will have to copy the file + return false, nil + } + + if err := os.Link(linkSrcPath, ca.dstPath); errors.Is(err, unix.EXDEV) || errors.Is(err, unix.EMLINK) { + // These errors are expected when the hardlink would cross devices or would exceed the maximum number of links for the inode. + // Just fallback to a copy. + bklog.G(ctx).WithError(err).WithField("srcPath", linkSrcPath).WithField("dstPath", ca.dstPath).Debug("hardlink failed") + if a.crossSnapshotLinks != nil { + delete(a.crossSnapshotLinks, statInode(ca.srcStat)) + } + return false, nil + } else if err != nil { + return false, errors.Wrap(err, "failed to hardlink during apply") + } + + return true, nil + } +} + +func (a *applier) applyCopy(ctx context.Context, ca *changeApply) error { + switch ca.srcStat.Mode & unix.S_IFMT { + case unix.S_IFREG: + if err := fs.CopyFile(ca.dstPath, ca.srcPath); err != nil { + return errors.Wrapf(err, "failed to copy from %s to %s during apply", ca.srcPath, ca.dstPath) + } + case unix.S_IFDIR: + if ca.dstStat == nil { + // dstPath doesn't exist, make it a dir + if err := unix.Mkdir(ca.dstPath, ca.srcStat.Mode); err != nil { + return errors.Wrapf(err, "failed to create applied dir at %q from %q", ca.dstPath, ca.srcPath) + } + } + case unix.S_IFLNK: + if target, err := os.Readlink(ca.srcPath); err != nil { + return errors.Wrap(err, "failed to read symlink during apply") + } else if err := os.Symlink(target, ca.dstPath); err != nil { + return errors.Wrap(err, "failed to create symlink during apply") + } + case unix.S_IFBLK, unix.S_IFCHR, unix.S_IFIFO, unix.S_IFSOCK: + if err := unix.Mknod(ca.dstPath, ca.srcStat.Mode, int(ca.srcStat.Rdev)); err != nil { + return errors.Wrap(err, "failed to mknod during apply") + } + default: + // should never be here, all types should be handled + return errors.Errorf("unhandled file type %d during merge at path %q", ca.srcStat.Mode&unix.S_IFMT, ca.srcPath) + } + + if ca.srcPath != "" { + xattrs, err := sysx.LListxattr(ca.srcPath) + if err != nil { + return errors.Wrapf(err, "failed to list xattrs of src path %s", ca.srcPath) + } + for _, xattr := range xattrs { + if isOpaqueXattr(xattr) { + // Don't recreate opaque xattrs during merge based on the source file. The differs take care of converting + // source path from the "opaque whiteout" format to the "explicit whiteout" format. The only time we set + // opaque xattrs is handled after this loop below. + continue + } + xattrVal, err := sysx.LGetxattr(ca.srcPath, xattr) + if err != nil { + return errors.Wrapf(err, "failed to get xattr %s of src path %s", xattr, ca.srcPath) + } + if err := sysx.LSetxattr(ca.dstPath, xattr, xattrVal, 0); err != nil { + // This can often fail, so just log it: https://github.com/moby/buildkit/issues/1189 + bklog.G(ctx).Debugf("failed to set xattr %s of path %s during apply", xattr, ca.dstPath) + } + } + } + + if ca.setOpaque { + // This is set in the case where we are creating a directory that is replacing a whiteout device + xattr := opaqueXattr(a.userxattr) + if err := sysx.LSetxattr(ca.dstPath, xattr, []byte{'y'}, 0); err != nil { + return errors.Wrapf(err, "failed to set opaque xattr %q of path %s", xattr, ca.dstPath) + } + } + + if err := os.Lchown(ca.dstPath, int(ca.srcStat.Uid), int(ca.srcStat.Gid)); err != nil { + return errors.Wrap(err, "failed to chown during apply") + } + + if ca.srcStat.Mode&unix.S_IFMT != unix.S_IFLNK { + if err := unix.Chmod(ca.dstPath, ca.srcStat.Mode); err != nil { + return errors.Wrapf(err, "failed to chmod path %q during apply", ca.dstPath) + } + } + + atimeSpec := unix.Timespec{Sec: ca.srcStat.Atim.Sec, Nsec: ca.srcStat.Atim.Nsec} + mtimeSpec := unix.Timespec{Sec: ca.srcStat.Mtim.Sec, Nsec: ca.srcStat.Mtim.Nsec} + if ca.srcStat.Mode&unix.S_IFMT != unix.S_IFDIR { + // apply times immediately for non-dirs + if err := unix.UtimesNanoAt(unix.AT_FDCWD, ca.dstPath, []unix.Timespec{atimeSpec, mtimeSpec}, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return err + } + } else { + // save the times we should set on this dir, to be applied after subfiles have been set + a.dirModTimes[ca.dstPath] = mtimeSpec + } + + return nil +} + +func (a *applier) Flush() error { + // Set dir times now that everything has been modified. Walk the filesystem tree to ensure + // that we never try to apply to a path that has been deleted or modified since times for it + // were stored. This is needed for corner cases such as where a parent dir is removed and + // replaced with a symlink. + return filepath.WalkDir(a.root, func(path string, d gofs.DirEntry, prevErr error) error { + if prevErr != nil { + return prevErr + } + if !d.IsDir() { + return nil + } + if mtime, ok := a.dirModTimes[path]; ok { + if err := unix.UtimesNanoAt(unix.AT_FDCWD, path, []unix.Timespec{{Nsec: unix.UTIME_OMIT}, mtime}, unix.AT_SYMLINK_NOFOLLOW); err != nil { + return err + } + } + return nil + }) +} + +func (a *applier) Release() error { + if a.release != nil { + err := a.release() + if err != nil { + return err + } + } + a.release = nil + return nil +} + +func (a *applier) Usage() (snapshots.Usage, error) { + // Calculate the disk space used under the apply root, similar to the normal containerd snapshotter disk usage + // calculations but with the extra ability to take into account hardlinks that were created between snapshots, ensuring that + // they don't get double counted. + inodes := make(map[inode]struct{}) + var usage snapshots.Usage + if err := filepath.WalkDir(a.root, func(path string, dirent gofs.DirEntry, err error) error { + if err != nil { + return err + } + info, err := dirent.Info() + if err != nil { + return err + } + stat := info.Sys().(*syscall.Stat_t) + inode := statInode(stat) + if _, ok := inodes[inode]; ok { + return nil + } + inodes[inode] = struct{}{} + if a.crossSnapshotLinks != nil { + if _, ok := a.crossSnapshotLinks[statInode(stat)]; ok { + // don't count cross-snapshot hardlinks + return nil + } + } + usage.Inodes++ + usage.Size += stat.Blocks * 512 // 512 is always block size, see "man 2 stat" + return nil + }); err != nil { + return snapshots.Usage{}, err + } + return usage, nil +} + +type differ struct { + lowerRoot string + releaseLower func() error + + upperRoot string + releaseUpper func() error + + upperBindSource string + upperOverlayDirs []string // ordered lowest -> highest + + upperdir string + + visited map[string]struct{} // set of parent subPaths that have been visited + inodes map[inode]string // map of inode -> subPath +} + +func differFor(lowerMntable, upperMntable Mountable) (_ *differ, rerr error) { + d := &differ{ + visited: make(map[string]struct{}), + inodes: make(map[inode]string), + } + defer func() { + if rerr != nil { + rerr = multierror.Append(rerr, d.Release()).ErrorOrNil() + } + }() + + var lowerMnts []mount.Mount + if lowerMntable != nil { + mnts, release, err := lowerMntable.Mount() + if err != nil { + return nil, err + } + mounter := LocalMounterWithMounts(mnts) + root, err := mounter.Mount() + if err != nil { + return nil, err + } + d.lowerRoot = root + lowerMnts = mnts + d.releaseLower = func() error { + err := mounter.Unmount() + return multierror.Append(err, release()).ErrorOrNil() + } + } + + var upperMnts []mount.Mount + if upperMntable != nil { + mnts, release, err := upperMntable.Mount() + if err != nil { + return nil, err + } + mounter := LocalMounterWithMounts(mnts) + root, err := mounter.Mount() + if err != nil { + return nil, err + } + d.upperRoot = root + upperMnts = mnts + d.releaseUpper = func() error { + err := mounter.Unmount() + return multierror.Append(err, release()).ErrorOrNil() + } + } + + if len(upperMnts) == 1 { + switch upperMnts[0].Type { + case "bind", "rbind": + d.upperBindSource = upperMnts[0].Source + case "overlay": + overlayDirs, err := overlay.GetOverlayLayers(upperMnts[0]) + if err != nil { + return nil, errors.Wrapf(err, "failed to get overlay layers from mount %+v", upperMnts[0]) + } + d.upperOverlayDirs = overlayDirs + } + } + if len(lowerMnts) > 0 { + if upperdir, err := overlay.GetUpperdir(lowerMnts, upperMnts); err == nil { + d.upperdir = upperdir + } + } + + return d, nil +} + +func (d *differ) HandleChanges(ctx context.Context, handle func(context.Context, *change) error) error { + if d.upperdir != "" { + return d.overlayChanges(ctx, handle) + } + return d.doubleWalkingChanges(ctx, handle) +} + +func (d *differ) doubleWalkingChanges(ctx context.Context, handle func(context.Context, *change) error) error { + return fs.Changes(ctx, d.lowerRoot, d.upperRoot, func(kind fs.ChangeKind, subPath string, srcfi os.FileInfo, prevErr error) error { + if prevErr != nil { + return prevErr + } + if ctx.Err() != nil { + return ctx.Err() + } + + if kind == fs.ChangeKindUnmodified { + return nil + } + + // NOTE: it's tempting to skip creating parent dirs when change kind is Delete, but + // that would make us incompatible with the image exporter code: + // https://github.com/containerd/containerd/pull/2095 + if err := d.checkParent(ctx, subPath, handle); err != nil { + return errors.Wrapf(err, "failed to check parent for %s", subPath) + } + + c := &change{ + kind: kind, + subPath: subPath, + } + + if srcfi != nil { + // Try to ensure that srcPath and srcStat are set to a file from the underlying filesystem + // rather than the actual mount when possible. This allows hardlinking without getting EXDEV. + switch { + case !srcfi.IsDir() && d.upperBindSource != "": + srcPath, err := safeJoin(d.upperBindSource, c.subPath) + if err != nil { + return errors.Wrapf(err, "failed to join %s and %s", d.upperBindSource, c.subPath) + } + c.srcPath = srcPath + if fi, err := os.Lstat(c.srcPath); err == nil { + srcfi = fi + } else { + return errors.Wrap(err, "failed to stat underlying file from bind mount") + } + case !srcfi.IsDir() && len(d.upperOverlayDirs) > 0: + for i := range d.upperOverlayDirs { + dir := d.upperOverlayDirs[len(d.upperOverlayDirs)-1-i] + path, err := safeJoin(dir, c.subPath) + if err != nil { + return errors.Wrapf(err, "failed to join %s and %s", dir, c.subPath) + } + if stat, err := os.Lstat(path); err == nil { + c.srcPath = path + srcfi = stat + break + } else if errors.Is(err, unix.ENOENT) { + continue + } else { + return errors.Wrap(err, "failed to lstat when finding direct path of overlay file") + } + } + default: + srcPath, err := safeJoin(d.upperRoot, subPath) + if err != nil { + return errors.Wrapf(err, "failed to join %s and %s", d.upperRoot, subPath) + } + c.srcPath = srcPath + if fi, err := os.Lstat(c.srcPath); err == nil { + srcfi = fi + } else { + return errors.Wrap(err, "failed to stat srcPath from differ") + } + } + + var ok bool + c.srcStat, ok = srcfi.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("unhandled stat type for %+v", srcfi) + } + + if !srcfi.IsDir() && c.srcStat.Nlink > 1 { + if linkSubPath, ok := d.inodes[statInode(c.srcStat)]; ok { + c.linkSubPath = linkSubPath + } else { + d.inodes[statInode(c.srcStat)] = c.subPath + } + } + } + + return handle(ctx, c) + }) +} + +func (d *differ) overlayChanges(ctx context.Context, handle func(context.Context, *change) error) error { + return overlay.Changes(ctx, func(kind fs.ChangeKind, subPath string, srcfi os.FileInfo, prevErr error) error { + if prevErr != nil { + return prevErr + } + if ctx.Err() != nil { + return ctx.Err() + } + + if kind == fs.ChangeKindUnmodified { + return nil + } + + if err := d.checkParent(ctx, subPath, handle); err != nil { + return errors.Wrapf(err, "failed to check parent for %s", subPath) + } + + srcPath, err := safeJoin(d.upperdir, subPath) + if err != nil { + return errors.Wrapf(err, "failed to join %s and %s", d.upperdir, subPath) + } + + c := &change{ + kind: kind, + subPath: subPath, + srcPath: srcPath, + } + + if srcfi != nil { + var ok bool + c.srcStat, ok = srcfi.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("unhandled stat type for %+v", srcfi) + } + + if !srcfi.IsDir() && c.srcStat.Nlink > 1 { + if linkSubPath, ok := d.inodes[statInode(c.srcStat)]; ok { + c.linkSubPath = linkSubPath + } else { + d.inodes[statInode(c.srcStat)] = c.subPath + } + } + } + + return handle(ctx, c) + }, d.upperdir, d.upperRoot, d.lowerRoot) +} + +func (d *differ) checkParent(ctx context.Context, subPath string, handle func(context.Context, *change) error) error { + parentSubPath := filepath.Dir(subPath) + if parentSubPath == "/" { + return nil + } + if _, ok := d.visited[parentSubPath]; ok { + return nil + } + d.visited[parentSubPath] = struct{}{} + + if err := d.checkParent(ctx, parentSubPath, handle); err != nil { + return err + } + parentSrcPath, err := safeJoin(d.upperRoot, parentSubPath) + if err != nil { + return err + } + srcfi, err := os.Lstat(parentSrcPath) + if err != nil { + return err + } + parentSrcStat, ok := srcfi.Sys().(*syscall.Stat_t) + if !ok { + return errors.Errorf("unexpected type %T", srcfi) + } + return handle(ctx, &change{ + kind: fs.ChangeKindModify, + subPath: parentSubPath, + srcPath: parentSrcPath, + srcStat: parentSrcStat, + }) +} + +func (d *differ) Release() error { + var err error + if d.releaseLower != nil { + err = d.releaseLower() + if err == nil { + d.releaseLower = nil + } + } + if d.releaseUpper != nil { + err = multierror.Append(err, d.releaseUpper()).ErrorOrNil() + if err == nil { + d.releaseUpper = nil + } + } + return err +} + +func safeJoin(root, path string) (string, error) { + dir, base := filepath.Split(path) + parent, err := fs.RootPath(root, dir) + if err != nil { + return "", err + } + return filepath.Join(parent, base), nil +} + +const ( + trustedOpaqueXattr = "trusted.overlay.opaque" + userOpaqueXattr = "user.overlay.opaque" +) + +func isOpaqueXattr(s string) bool { + for _, k := range []string{trustedOpaqueXattr, userOpaqueXattr} { + if s == k { + return true + } + } + return false +} + +func opaqueXattr(userxattr bool) string { + if userxattr { + return userOpaqueXattr + } + return trustedOpaqueXattr +} + +// needsUserXAttr checks whether overlay mounts should be provided the userxattr option. We can't use +// NeedsUserXAttr from the overlayutils package directly because we don't always have direct knowledge +// of the root of the snapshotter state (such as when using a remote snapshotter). Instead, we create +// a temporary new snapshot and test using its root, which works because single layer snapshots will +// use bind-mounts even when created by an overlay based snapshotter. +func needsUserXAttr(ctx context.Context, sn Snapshotter, lm leases.Manager) (bool, error) { + key := identity.NewID() + + ctx, done, err := leaseutil.WithLease(ctx, lm, leaseutil.MakeTemporary) + if err != nil { + return false, errors.Wrap(err, "failed to create lease for checking user xattr") + } + defer done(context.TODO()) + + err = sn.Prepare(ctx, key, "") + if err != nil { + return false, err + } + mntable, err := sn.Mounts(ctx, key) + if err != nil { + return false, err + } + mnts, unmount, err := mntable.Mount() + if err != nil { + return false, err + } + defer unmount() + + var userxattr bool + if err := mount.WithTempMount(ctx, mnts, func(root string) error { + var err error + userxattr, err = overlayutils.NeedsUserXAttr(root) + return err + }); err != nil { + return false, err + } + return userxattr, nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/diffapply_windows.go b/vendor/github.com/moby/buildkit/snapshot/diffapply_windows.go new file mode 100644 index 0000000000..2ec9fc3d78 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/diffapply_windows.go @@ -0,0 +1,20 @@ +//go:build windows +// +build windows + +package snapshot + +import ( + "context" + + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/snapshots" + "github.com/pkg/errors" +) + +func (sn *mergeSnapshotter) diffApply(ctx context.Context, dest Mountable, diffs ...Diff) (_ snapshots.Usage, rerr error) { + return snapshots.Usage{}, errors.New("diffApply not yet supported on windows") +} + +func needsUserXAttr(ctx context.Context, sn Snapshotter, lm leases.Manager) (bool, error) { + return false, errors.New("needs userxattr not supported on windows") +} diff --git a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go index 78d395b125..ef73e263fc 100644 --- a/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go +++ b/vendor/github.com/moby/buildkit/snapshot/localmounter_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package snapshot @@ -15,7 +16,7 @@ func (lm *localMounter) Mount() (string, error) { lm.mu.Lock() defer lm.mu.Unlock() - if lm.mounts == nil { + if lm.mounts == nil && lm.mountable != nil { mounts, release, err := lm.mountable.Mount() if err != nil { return "", err diff --git a/vendor/github.com/moby/buildkit/snapshot/merge.go b/vendor/github.com/moby/buildkit/snapshot/merge.go new file mode 100644 index 0000000000..35cc0b71e3 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/merge.go @@ -0,0 +1,203 @@ +package snapshot + +import ( + "context" + "strconv" + + "github.com/containerd/containerd/leases" + "github.com/containerd/containerd/pkg/userns" + "github.com/containerd/containerd/snapshots" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/leaseutil" + "github.com/pkg/errors" +) + +// hardlinkMergeSnapshotters are the names of snapshotters that support merges implemented by +// creating "hardlink farms" where non-directory objects are hard-linked into the merged tree +// from their parent snapshots. +var hardlinkMergeSnapshotters = map[string]struct{}{ + "native": {}, + "overlayfs": {}, +} + +// overlayBasedSnapshotters are the names of snapshotter that use overlay mounts, which +// enables optimizations such as skipping the base layer when doing a hardlink merge. +var overlayBasedSnapshotters = map[string]struct{}{ + "overlayfs": {}, + "stargz": {}, +} + +type Diff struct { + Lower string + Upper string +} + +type MergeSnapshotter interface { + Snapshotter + // Merge creates a snapshot whose contents are the provided diffs applied onto one + // another in the provided order, starting from scratch. The diffs are calculated + // the same way that diffs are calculated during exports, which ensures that the + // result of merging these diffs looks the same as exporting the diffs as layer + // blobs and unpacking them as an image. + // + // Each key in the provided diffs is expected to be a committed snapshot. The + // snapshot created by Merge is also committed. + // + // The size of a merged snapshot (as returned by the Usage method) depends on the merge + // implementation. Implementations using hardlinks to create merged views will take up + // less space than those that use copies, for example. + Merge(ctx context.Context, key string, diffs []Diff, opts ...snapshots.Opt) error +} + +type mergeSnapshotter struct { + Snapshotter + lm leases.Manager + + // Whether we should try to implement merges by hardlinking between underlying directories + tryCrossSnapshotLink bool + + // Whether the optimization of preparing on top of base layers is supported (see Merge method). + skipBaseLayers bool + + // Whether we should use the "user.*" namespace when writing overlay xattrs. If false, + // "trusted.*" is used instead. + userxattr bool +} + +func NewMergeSnapshotter(ctx context.Context, sn Snapshotter, lm leases.Manager) MergeSnapshotter { + name := sn.Name() + _, tryCrossSnapshotLink := hardlinkMergeSnapshotters[name] + _, overlayBased := overlayBasedSnapshotters[name] + + skipBaseLayers := overlayBased // default to skipping base layer for overlay-based snapshotters + var userxattr bool + if overlayBased && userns.RunningInUserNS() { + // When using an overlay-based snapshotter, if we are running rootless on a pre-5.11 + // kernel, we will not have userxattr. This results in opaque xattrs not being visible + // to us and thus breaking the overlay-optimized differ. + var err error + userxattr, err = needsUserXAttr(ctx, sn, lm) + if err != nil { + bklog.G(ctx).Debugf("failed to check user xattr: %v", err) + tryCrossSnapshotLink = false + skipBaseLayers = false + } else { + tryCrossSnapshotLink = tryCrossSnapshotLink && userxattr + // Disable skipping base layers when in pre-5.11 rootless mode. Skipping the base layers + // necessitates the ability to set opaque xattrs sometimes, which only works in 5.11+ + // kernels that support userxattr. + skipBaseLayers = userxattr + } + } + + return &mergeSnapshotter{ + Snapshotter: sn, + lm: lm, + tryCrossSnapshotLink: tryCrossSnapshotLink, + skipBaseLayers: skipBaseLayers, + userxattr: userxattr, + } +} + +func (sn *mergeSnapshotter) Merge(ctx context.Context, key string, diffs []Diff, opts ...snapshots.Opt) error { + var baseKey string + if sn.skipBaseLayers { + // Overlay-based snapshotters can skip the base snapshot of the merge (if one exists) and just use it as the + // parent of the merge snapshot. Other snapshotters will start empty (with baseKey set to ""). + // Find the baseKey by following the chain of diffs for as long as it follows the pattern of the current lower + // being the parent of the current upper and equal to the previous upper, i.e.: + // Diff("", A) -> Diff(A, B) -> Diff(B, C), etc. + var baseIndex int + for i, diff := range diffs { + var parentKey string + if diff.Upper != "" { + info, err := sn.Stat(ctx, diff.Upper) + if err != nil { + return err + } + parentKey = info.Parent + } + if parentKey != diff.Lower { + break + } + if diff.Lower != baseKey { + break + } + baseKey = diff.Upper + baseIndex = i + 1 + } + diffs = diffs[baseIndex:] + } + + tempLeaseCtx, done, err := leaseutil.WithLease(ctx, sn.lm, leaseutil.MakeTemporary) + if err != nil { + return errors.Wrap(err, "failed to create temporary lease for view mounts during merge") + } + defer done(context.TODO()) + + // Make the snapshot that will be merged into + prepareKey := identity.NewID() + if err := sn.Prepare(tempLeaseCtx, prepareKey, baseKey); err != nil { + return errors.Wrapf(err, "failed to prepare %q", key) + } + applyMounts, err := sn.Mounts(ctx, prepareKey) + if err != nil { + return errors.Wrapf(err, "failed to get mounts of %q", key) + } + + usage, err := sn.diffApply(ctx, applyMounts, diffs...) + if err != nil { + return errors.Wrap(err, "failed to apply diffs") + } + if err := sn.Commit(ctx, key, prepareKey, withMergeUsage(usage)); err != nil { + return errors.Wrapf(err, "failed to commit %q", key) + } + return nil +} + +func (sn *mergeSnapshotter) Usage(ctx context.Context, key string) (snapshots.Usage, error) { + // If key was created by Merge, we may need to use the annotated mergeUsage key as + // the snapshotter's usage method is wrong when hardlinks are used to create the merge. + if info, err := sn.Stat(ctx, key); err != nil { + return snapshots.Usage{}, err + } else if usage, ok, err := mergeUsageOf(info); err != nil { + return snapshots.Usage{}, err + } else if ok { + return usage, nil + } + return sn.Snapshotter.Usage(ctx, key) +} + +// mergeUsage{Size,Inodes}Label hold the correct usage calculations for diffApplyMerges, for which the builtin usage +// is wrong because it can't account for hardlinks made across immutable snapshots +const mergeUsageSizeLabel = "buildkit.mergeUsageSize" +const mergeUsageInodesLabel = "buildkit.mergeUsageInodes" + +func withMergeUsage(usage snapshots.Usage) snapshots.Opt { + return snapshots.WithLabels(map[string]string{ + mergeUsageSizeLabel: strconv.Itoa(int(usage.Size)), + mergeUsageInodesLabel: strconv.Itoa(int(usage.Inodes)), + }) +} + +func mergeUsageOf(info snapshots.Info) (usage snapshots.Usage, ok bool, rerr error) { + if info.Labels == nil { + return snapshots.Usage{}, false, nil + } + if str, ok := info.Labels[mergeUsageSizeLabel]; ok { + i, err := strconv.Atoi(str) + if err != nil { + return snapshots.Usage{}, false, err + } + usage.Size = int64(i) + } + if str, ok := info.Labels[mergeUsageInodesLabel]; ok { + i, err := strconv.Atoi(str) + if err != nil { + return snapshots.Usage{}, false, err + } + usage.Inodes = int64(i) + } + return usage, true, nil +} diff --git a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go index a4bf1c4f77..edf95cee70 100644 --- a/vendor/github.com/moby/buildkit/snapshot/snapshotter.go +++ b/vendor/github.com/moby/buildkit/snapshot/snapshotter.go @@ -3,12 +3,14 @@ package snapshot import ( "context" "os" + "strings" "sync" - "sync/atomic" "github.com/containerd/containerd/mount" + "github.com/containerd/containerd/pkg/userns" "github.com/containerd/containerd/snapshots" "github.com/docker/docker/pkg/idtools" + "github.com/pkg/errors" ) type Mountable interface { @@ -55,6 +57,7 @@ func (s *fromContainerd) Mounts(ctx context.Context, key string) (Mountable, err } return &staticMountable{mounts: mounts, idmap: s.idmap, id: key}, nil } + func (s *fromContainerd) Prepare(ctx context.Context, key, parent string, opts ...snapshots.Opt) error { _, err := s.Snapshotter.Prepare(ctx, key, parent, opts...) return err @@ -70,27 +73,13 @@ func (s *fromContainerd) IdentityMapping() *idtools.IdentityMapping { return s.idmap } -type staticMountable struct { - count int32 - id string - mounts []mount.Mount - idmap *idtools.IdentityMapping -} - -func (cm *staticMountable) Mount() ([]mount.Mount, func() error, error) { - atomic.AddInt32(&cm.count, 1) - return cm.mounts, func() error { - if atomic.AddInt32(&cm.count, -1) < 0 { - if v := os.Getenv("BUILDKIT_DEBUG_PANIC_ON_ERROR"); v == "1" { - panic("release of released mount " + cm.id) - } - } - return nil - }, nil -} - -func (cm *staticMountable) IdentityMapping() *idtools.IdentityMapping { - return cm.idmap +func (s *fromContainerd) Commit(ctx context.Context, name, key string, opts ...snapshots.Opt) error { + info, err := s.Stat(ctx, key) + if err != nil { + return errors.Wrap(err, "failed to stat active key during commit") + } + opts = append(opts, snapshots.WithLabels(snapshots.FilterInheritedLabels(info.Labels))) + return s.Snapshotter.Commit(ctx, name, key, opts...) } // NewContainerdSnapshotter converts snapshotter to containerd snapshotter @@ -125,6 +114,10 @@ func (cs *containerdSnapshotter) returnMounts(mf Mountable) ([]mount.Mount, erro cs.mu.Lock() cs.releasers = append(cs.releasers, release) cs.mu.Unlock() + redirectDirOption := getRedirectDirOption() + if redirectDirOption != "" { + mounts = setRedirectDir(mounts, redirectDirOption) + } return mounts, nil } @@ -149,3 +142,42 @@ func (cs *containerdSnapshotter) View(ctx context.Context, key, parent string, o } return cs.returnMounts(mf) } + +var redirectDirOption string +var redirectDirOptionOnce sync.Once + +func getRedirectDirOption() string { + redirectDirOptionOnce.Do(func() { + if _, err := os.Stat("/sys/module/overlay/parameters/redirect_dir"); err != nil { + redirectDirOption = "" // redirect_dir unsupported + return + } + if userns.RunningInUserNS() { + // userxattr (kernel >= 5.11) disables redirect_dir and doesn't allow specifying "off". + redirectDirOption = "" + return + } + redirectDirOption = "off" // disable redirect_dir to avoid broken diff + }) + return redirectDirOption +} + +func setRedirectDir(mounts []mount.Mount, redirectDirOption string) (ret []mount.Mount) { + if redirectDirOption == "" { + return mounts + } + for _, m := range mounts { + if m.Type == "overlay" { + var opts []string + for _, o := range m.Options { + if strings.HasPrefix(o, "redirect_dir=") { + continue + } + opts = append(opts, o) + } + m.Options = append(opts, "redirect_dir="+redirectDirOption) + } + ret = append(ret, m) + } + return ret +} diff --git a/vendor/github.com/moby/buildkit/snapshot/staticmountable.go b/vendor/github.com/moby/buildkit/snapshot/staticmountable.go new file mode 100644 index 0000000000..62533ba318 --- /dev/null +++ b/vendor/github.com/moby/buildkit/snapshot/staticmountable.go @@ -0,0 +1,41 @@ +package snapshot + +import ( + "os" + "sync/atomic" + + "github.com/containerd/containerd/mount" + "github.com/docker/docker/pkg/idtools" +) + +type staticMountable struct { + count int32 + id string + mounts []mount.Mount + idmap *idtools.IdentityMapping +} + +func (cm *staticMountable) Mount() ([]mount.Mount, func() error, error) { + // return a copy to prevent changes to mount.Mounts in the slice from affecting cm + mounts := make([]mount.Mount, len(cm.mounts)) + copy(mounts, cm.mounts) + + redirectDirOption := getRedirectDirOption() + if redirectDirOption != "" { + mounts = setRedirectDir(mounts, redirectDirOption) + } + + atomic.AddInt32(&cm.count, 1) + return mounts, func() error { + if atomic.AddInt32(&cm.count, -1) < 0 { + if v := os.Getenv("BUILDKIT_DEBUG_PANIC_ON_ERROR"); v == "1" { + panic("release of released mount " + cm.id) + } + } + return nil + }, nil +} + +func (cm *staticMountable) IdentityMapping() *idtools.IdentityMapping { + return cm.idmap +} diff --git a/vendor/github.com/moby/buildkit/solver/cachemanager.go b/vendor/github.com/moby/buildkit/solver/cachemanager.go index b628b55a19..f8bfbd23dd 100644 --- a/vendor/github.com/moby/buildkit/solver/cachemanager.go +++ b/vendor/github.com/moby/buildkit/solver/cachemanager.go @@ -8,17 +8,17 @@ import ( "time" "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) // NewInMemoryCacheManager creates a new in-memory cache manager func NewInMemoryCacheManager() CacheManager { - return NewCacheManager(identity.NewID(), NewInMemoryCacheStorage(), NewInMemoryResultStorage()) + return NewCacheManager(context.TODO(), identity.NewID(), NewInMemoryCacheStorage(), NewInMemoryResultStorage()) } // NewCacheManager creates a new cache manager with specific storage backend -func NewCacheManager(id string, storage CacheKeyStorage, results CacheResultStorage) CacheManager { +func NewCacheManager(ctx context.Context, id string, storage CacheKeyStorage, results CacheResultStorage) CacheManager { cm := &cacheManager{ id: id, backend: storage, @@ -26,7 +26,7 @@ func NewCacheManager(id string, storage CacheKeyStorage, results CacheResultStor } if err := cm.ReleaseUnreferenced(); err != nil { - logrus.Errorf("failed to release unreferenced cache metadata: %+v", err) + bklog.G(ctx).Errorf("failed to release unreferenced cache metadata: %+v", err) } return cm diff --git a/vendor/github.com/moby/buildkit/solver/cacheopts.go b/vendor/github.com/moby/buildkit/solver/cacheopts.go index 7c58d82112..d5821b4e91 100644 --- a/vendor/github.com/moby/buildkit/solver/cacheopts.go +++ b/vendor/github.com/moby/buildkit/solver/cacheopts.go @@ -3,31 +3,36 @@ package solver import ( "context" + "github.com/moby/buildkit/util/bklog" + digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) type CacheOpts map[interface{}]interface{} type cacheOptGetterKey struct{} -func CacheOptGetterOf(ctx context.Context) func(keys ...interface{}) map[interface{}]interface{} { +func CacheOptGetterOf(ctx context.Context) func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} { if v := ctx.Value(cacheOptGetterKey{}); v != nil { - if getter, ok := v.(func(keys ...interface{}) map[interface{}]interface{}); ok { + if getter, ok := v.(func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{}); ok { return getter } } return nil } +func WithCacheOptGetter(ctx context.Context, getter func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{}) context.Context { + return context.WithValue(ctx, cacheOptGetterKey{}, getter) +} + func withAncestorCacheOpts(ctx context.Context, start *state) context.Context { - return context.WithValue(ctx, cacheOptGetterKey{}, func(keys ...interface{}) map[interface{}]interface{} { + return WithCacheOptGetter(ctx, func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} { keySet := make(map[interface{}]struct{}) for _, k := range keys { keySet[k] = struct{}{} } values := make(map[interface{}]interface{}) - walkAncestors(start, func(st *state) bool { + walkAncestors(ctx, start, func(st *state) bool { if st.clientVertex.Error != "" { // don't use values from cancelled or otherwise error'd vertexes return false @@ -46,13 +51,13 @@ func withAncestorCacheOpts(ctx context.Context, start *state) context.Context { } } } - return false + return !includeAncestors // stop after the first state unless includeAncestors is true }) return values }) } -func walkAncestors(start *state, f func(*state) bool) { +func walkAncestors(ctx context.Context, start *state, f func(*state) bool) { stack := [][]*state{{start}} cache := make(map[digest.Digest]struct{}) for len(stack) > 0 { @@ -79,7 +84,7 @@ func walkAncestors(start *state, f func(*state) bool) { parent := st.solver.actives[parentDgst] st.solver.mu.RUnlock() if parent == nil { - logrus.Warnf("parent %q not found in active job list during cache opt search", parentDgst) + bklog.G(ctx).Warnf("parent %q not found in active job list during cache opt search", parentDgst) continue } stack[len(stack)-1] = append(stack[len(stack)-1], parent) diff --git a/vendor/github.com/moby/buildkit/solver/cachestorage.go b/vendor/github.com/moby/buildkit/solver/cachestorage.go index 12797223c6..77724ac4c4 100644 --- a/vendor/github.com/moby/buildkit/solver/cachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/cachestorage.go @@ -5,6 +5,7 @@ import ( "time" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" ) @@ -47,6 +48,6 @@ type CacheInfoLink struct { type CacheResultStorage interface { Save(Result, time.Time) (CacheResult, error) Load(ctx context.Context, res CacheResult) (Result, error) - LoadRemote(ctx context.Context, res CacheResult, s session.Group) (*Remote, error) + LoadRemotes(ctx context.Context, res CacheResult, compression *compression.Config, s session.Group) ([]*Remote, error) Exists(id string) bool } diff --git a/vendor/github.com/moby/buildkit/solver/edge.go b/vendor/github.com/moby/buildkit/solver/edge.go index 56a091c36c..8504d9f657 100644 --- a/vendor/github.com/moby/buildkit/solver/edge.go +++ b/vendor/github.com/moby/buildkit/solver/edge.go @@ -6,9 +6,9 @@ import ( "time" "github.com/moby/buildkit/solver/internal/pipe" + "github.com/moby/buildkit/util/bklog" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type edgeStatusType int @@ -68,6 +68,8 @@ type edge struct { index *edgeIndex secondaryExporters []expDep + + failedOnce sync.Once } // dep holds state for a dependant edge @@ -171,7 +173,7 @@ func (e *edge) finishIncoming(req pipe.Sender) { err = context.Canceled } if debugScheduler { - logrus.Debugf("finishIncoming %s %v %#v desired=%s", e.edge.Vertex.Name(), err, e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) + bklog.G(context.TODO()).Debugf("finishIncoming %s %v %#v desired=%s", e.edge.Vertex.Name(), err, e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) } req.Finalize(&e.edgeState, err) } @@ -179,7 +181,7 @@ func (e *edge) finishIncoming(req pipe.Sender) { // updateIncoming updates the current value of incoming pipe request func (e *edge) updateIncoming(req pipe.Sender) { if debugScheduler { - logrus.Debugf("updateIncoming %s %#v desired=%s", e.edge.Vertex.Name(), e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) + bklog.G(context.TODO()).Debugf("updateIncoming %s %#v desired=%s", e.edge.Vertex.Name(), e.edgeState, req.Request().Payload.(*edgeRequest).desiredState) } req.Update(&e.edgeState) } @@ -358,12 +360,11 @@ func (e *edge) unpark(incoming []pipe.Sender, updates, allPipes []pipe.Receiver, if e.execReq == nil { if added := e.createInputRequests(desiredState, f, false); !added && !e.hasActiveOutgoing && !cacheMapReq { - logrus.Errorf("buildkit scheluding error: leaving incoming open. forcing solve. Please report this with BUILDKIT_SCHEDULER_DEBUG=1") + bklog.G(context.TODO()).Errorf("buildkit scheluding error: leaving incoming open. forcing solve. Please report this with BUILDKIT_SCHEDULER_DEBUG=1") debugSchedulerPreUnpark(e, incoming, updates, allPipes) e.createInputRequests(desiredState, f, true) } } - } func (e *edge) makeExportable(k *CacheKey, records []*CacheRecord) ExportableCacheKey { @@ -375,7 +376,9 @@ func (e *edge) makeExportable(k *CacheKey, records []*CacheRecord) ExportableCac func (e *edge) markFailed(f *pipeFactory, err error) { e.err = err - e.postpone(f) + e.failedOnce.Do(func() { + e.postpone(f) + }) } // processUpdate is called by unpark for every updated pipe request @@ -397,12 +400,12 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { if !e.op.IgnoreCache() { keys, err := e.op.Cache().Query(nil, 0, e.cacheMap.Digest, e.edge.Index) if err != nil { - logrus.Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error + bklog.G(context.TODO()).Error(errors.Wrap(err, "invalid query response")) // make the build fail for this error } else { for _, k := range keys { records, err := e.op.Cache().Records(k) if err != nil { - logrus.Errorf("error receiving cache records: %v", err) + bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) continue } @@ -458,7 +461,14 @@ func (e *edge) processUpdate(upt pipe.Receiver) (depChanged bool) { dep.err = err } - state := upt.Status().Value.(*edgeState) + if upt.Status().Value == nil { + return + } + state, isEdgeState := upt.Status().Value.(*edgeState) + if !isEdgeState { + bklog.G(context.TODO()).Warnf("invalid edgeState value for update: %T", state) + return + } if len(dep.keys) < len(state.keys) { newKeys := state.keys[len(dep.keys):] @@ -573,7 +583,7 @@ func (e *edge) recalcCurrentState() { records, err := e.op.Cache().Records(mergedKey) if err != nil { - logrus.Errorf("error receiving cache records: %v", err) + bklog.G(context.TODO()).Errorf("error receiving cache records: %v", err) continue } @@ -665,7 +675,7 @@ func (e *edge) recalcCurrentState() { if len(openKeys) == 0 { e.state = edgeStatusCacheSlow if debugScheduler { - logrus.Debugf("upgrade to cache-slow because no open keys") + bklog.G(context.TODO()).Debugf("upgrade to cache-slow because no open keys") } } } @@ -704,7 +714,7 @@ func (e *edge) respondToIncoming(incoming []pipe.Sender, allPipes []pipe.Receive } if debugScheduler { - logrus.Debugf("status state=%s cancomplete=%v hasouts=%v noPossibleCache=%v depsCacheFast=%v keys=%d cacheRecords=%d", e.state, allIncomingCanComplete, e.hasActiveOutgoing, e.noCacheMatchPossible, e.allDepsCompletedCacheFast, len(e.keys), len(e.cacheRecords)) + bklog.G(context.TODO()).Debugf("status state=%s cancomplete=%v hasouts=%v noPossibleCache=%v depsCacheFast=%v keys=%d cacheRecords=%d", e.state, allIncomingCanComplete, e.hasActiveOutgoing, e.noCacheMatchPossible, e.allDepsCompletedCacheFast, len(e.keys), len(e.cacheRecords)) } if allIncomingCanComplete && e.hasActiveOutgoing { @@ -876,10 +886,10 @@ func (e *edge) loadCache(ctx context.Context) (interface{}, error) { rec := getBestResult(recs) e.cacheRecordsLoaded[rec.ID] = struct{}{} - logrus.Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID) + bklog.G(ctx).Debugf("load cache for %s with %s", e.edge.Vertex.Name(), rec.ID) res, err := e.op.LoadCache(ctx, rec) if err != nil { - logrus.Debugf("load cache for %s err: %v", e.edge.Vertex.Name(), err) + bklog.G(ctx).Debugf("load cache for %s err: %v", e.edge.Vertex.Name(), err) return nil, errors.Wrap(err, "failed to load cache") } diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/solve.go b/vendor/github.com/moby/buildkit/solver/errdefs/solve.go index 97ce5a3f92..3cbf8097ee 100644 --- a/vendor/github.com/moby/buildkit/solver/errdefs/solve.go +++ b/vendor/github.com/moby/buildkit/solver/errdefs/solve.go @@ -5,7 +5,7 @@ import ( "errors" "github.com/containerd/typeurl" - "github.com/golang/protobuf/jsonpb" + "github.com/golang/protobuf/jsonpb" //nolint:staticcheck "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/grpcerrors" ) @@ -14,7 +14,7 @@ func init() { typeurl.Register((*Solve)(nil), "github.com/moby/buildkit", "errdefs.Solve+json") } -//nolint:golint +//nolint:revive type IsSolve_Subject isSolve_Subject // SolveError will be returned when an error is encountered during a solve that diff --git a/vendor/github.com/moby/buildkit/solver/exporter.go b/vendor/github.com/moby/buildkit/solver/exporter.go index 26ca2fb929..67ede42223 100644 --- a/vendor/github.com/moby/buildkit/solver/exporter.go +++ b/vendor/github.com/moby/buildkit/solver/exporter.go @@ -11,7 +11,6 @@ type exporter struct { records []*CacheRecord record *CacheRecord - res []CacheExporterRecord edge *edge // for secondaryExporters override *bool } @@ -52,9 +51,10 @@ func addBacklinks(t CacheExporterTarget, rec CacheExporterRecord, cm *cacheManag return rec, nil } -type backlinkT struct{} +type contextT string -var backlinkKey = backlinkT{} +var backlinkKey = contextT("solver/exporter/backlinks") +var resKey = contextT("solver/exporter/res") func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt CacheExportOpt) ([]CacheExporterRecord, error) { var bkm map[string]CacheExporterRecord @@ -66,8 +66,16 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach bkm = bk.(map[string]CacheExporterRecord) } + var res map[*exporter][]CacheExporterRecord + if r := ctx.Value(resKey); r == nil { + res = map[*exporter][]CacheExporterRecord{} + ctx = context.WithValue(ctx, resKey, res) + } else { + res = r.(map[*exporter][]CacheExporterRecord) + } + if t.Visited(e) { - return e.res, nil + return res[e], nil } t.Visit(e) @@ -78,7 +86,8 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach selector digest.Digest } - rec := t.Add(rootKey(e.k.Digest(), e.k.Output())) + recKey := rootKey(e.k.Digest(), e.k.Output()) + rec := t.Add(recKey) allRec := []CacheExporterRecord{rec} addRecord := true @@ -93,6 +102,8 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach var remote *Remote if v := e.record; v != nil && len(e.k.Deps()) > 0 && addRecord { + var variants []CacheExporterRecord + cm := v.cacheManager key := cm.getID(v.key) res, err := cm.backend.Load(key, v.ID) @@ -100,21 +111,41 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach return nil, err } - remote, err = cm.results.LoadRemote(ctx, res, opt.Session) + remotes, err := cm.results.LoadRemotes(ctx, res, opt.CompressionOpt, opt.Session) if err != nil { return nil, err } + if len(remotes) > 0 { + remote, remotes = remotes[0], remotes[1:] // pop the first element + } + if opt.CompressionOpt != nil { + for _, r := range remotes { // record all remaining remotes as well + rec := t.Add(recKey) + rec.AddResult(v.CreatedAt, r) + variants = append(variants, rec) + } + } - if remote == nil && opt.Mode != CacheExportModeRemoteOnly { + if (remote == nil || opt.CompressionOpt != nil) && opt.Mode != CacheExportModeRemoteOnly { res, err := cm.results.Load(ctx, res) if err != nil { return nil, err } - remote, err = opt.Convert(ctx, res) + remotes, err := opt.ResolveRemotes(ctx, res) if err != nil { return nil, err } res.Release(context.TODO()) + if remote == nil && len(remotes) > 0 { + remote, remotes = remotes[0], remotes[1:] // pop the first element + } + if opt.CompressionOpt != nil { + for _, r := range remotes { // record all remaining remotes as well + rec := t.Add(recKey) + rec.AddResult(v.CreatedAt, r) + variants = append(variants, rec) + } + } } if remote != nil { @@ -122,6 +153,7 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach rec.AddResult(v.CreatedAt, remote) } } + allRec = append(allRec, variants...) } if remote != nil && opt.Mode == CacheExportModeMin { @@ -154,15 +186,17 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach } } - for i, srcs := range srcs { - for _, src := range srcs { - rec.LinkFrom(src.r, i, src.selector.String()) + for _, rec := range allRec { + for i, srcs := range srcs { + for _, src := range srcs { + rec.LinkFrom(src.r, i, src.selector.String()) + } } - } - for cm, id := range e.k.ids { - if _, err := addBacklinks(t, rec, cm, id, bkm); err != nil { - return nil, err + for cm, id := range e.k.ids { + if _, err := addBacklinks(t, rec, cm, id, bkm); err != nil { + return nil, err + } } } @@ -180,9 +214,9 @@ func (e *exporter) ExportTo(ctx context.Context, t CacheExporterTarget, opt Cach } } - e.res = allRec + res[e] = allRec - return e.res, nil + return allRec, nil } func getBestResult(records []*CacheRecord) *CacheRecord { diff --git a/vendor/github.com/moby/buildkit/solver/jobs.go b/vendor/github.com/moby/buildkit/solver/jobs.go index 20479818ed..0d59fb368e 100644 --- a/vendor/github.com/moby/buildkit/solver/jobs.go +++ b/vendor/github.com/moby/buildkit/solver/jobs.go @@ -8,21 +8,22 @@ import ( "time" "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver/errdefs" "github.com/moby/buildkit/util/flightcontrol" "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/util/tracing" digest "github.com/opencontainers/go-digest" - opentracing "github.com/opentracing/opentracing-go" "github.com/pkg/errors" + "go.opentelemetry.io/otel/trace" ) // ResolveOpFunc finds an Op implementation for a Vertex type ResolveOpFunc func(Vertex, Builder) (Op, error) type Builder interface { - Build(ctx context.Context, e Edge) (CachedResult, error) + Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) InContext(ctx context.Context, f func(ctx context.Context, g session.Group) error) error EachValue(ctx context.Context, key string, fn func(interface{}) error) error } @@ -47,10 +48,9 @@ type state struct { parents map[digest.Digest]struct{} childVtx map[digest.Digest]struct{} - mpw *progress.MultiWriter - allPw map[progress.Writer]struct{} - mspan *tracing.MultiSpan - allSpan map[opentracing.Span]struct{} + mpw *progress.MultiWriter + allPw map[progress.Writer]struct{} + mspan *tracing.MultiSpan vtx Vertex clientVertex client.Vertex @@ -198,19 +198,24 @@ type subBuilder struct { exporters []ExportableCacheKey } -func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, error) { +func (sb *subBuilder) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { + // TODO(@crazy-max): Handle BuildInfo from subbuild res, err := sb.solver.subBuild(ctx, e, sb.vtx) if err != nil { - return nil, err + return nil, nil, err } sb.mu.Lock() sb.exporters = append(sb.exporters, res.CacheKeys()[0]) // all keys already have full export chain sb.mu.Unlock() - return res, nil + return res, nil, nil } func (sb *subBuilder) InContext(ctx context.Context, f func(context.Context, session.Group) error) error { - return f(opentracing.ContextWithSpan(progress.WithProgress(ctx, sb.mpw), sb.mspan), sb.state) + ctx = progress.WithProgress(ctx, sb.mpw) + if sb.mspan.Span != nil { + ctx = trace.ContextWithSpan(ctx, sb.mspan) + } + return f(ctx, sb.state) } func (sb *subBuilder) EachValue(ctx context.Context, key string, fn func(interface{}) error) error { @@ -228,7 +233,7 @@ type Job struct { list *Solver pr *progress.MultiReader pw progress.Writer - span opentracing.Span + span trace.Span values sync.Map id string @@ -358,7 +363,6 @@ func (jl *Solver) loadUnlocked(v, parent Vertex, j *Job, cache map[Vertex]Vertex parents: map[digest.Digest]struct{}{}, childVtx: map[digest.Digest]struct{}{}, allPw: map[progress.Writer]struct{}{}, - allSpan: map[opentracing.Span]struct{}{}, mpw: progress.NewMultiWriter(progress.WithMetadata("vertex", dgst)), mspan: tracing.NewMultiSpan(), vtx: v, @@ -414,9 +418,10 @@ func (jl *Solver) connectProgressFromState(target, src *state) { if _, ok := target.allPw[j.pw]; !ok { target.mpw.Add(j.pw) target.allPw[j.pw] = struct{}{} - j.pw.Write(target.clientVertex.Digest.String(), target.clientVertex) - target.mspan.Add(j.span) - target.allSpan[j.span] = struct{}{} + j.pw.Write(identity.NewID(), target.clientVertex) + if j.span != nil && j.span.SpanContext().IsValid() { + target.mspan.Add(j.span) + } } } for p := range src.parents { @@ -433,14 +438,15 @@ func (jl *Solver) NewJob(id string) (*Job, error) { } pr, ctx, progressCloser := progress.NewContext(context.Background()) - pw, _, _ := progress.FromContext(ctx) // TODO: expose progress.Pipe() + pw, _, _ := progress.NewFromContext(ctx) // TODO: expose progress.Pipe() + _, span := trace.NewNoopTracerProvider().Tracer("").Start(ctx, "") j := &Job{ list: jl, pr: progress.NewMultiReader(pr), pw: pw, progressCloser: progressCloser, - span: (&opentracing.NoopTracer{}).StartSpan(""), + span: span, id: id, } jl.jobs[id] = j @@ -451,7 +457,7 @@ func (jl *Solver) NewJob(id string) (*Job, error) { } func (jl *Solver) Get(id string) (*Job, error) { - ctx, cancel := context.WithTimeout(context.Background(), 3*time.Second) + ctx, cancel := context.WithTimeout(context.Background(), 6*time.Second) defer cancel() go func() { @@ -491,17 +497,43 @@ func (jl *Solver) deleteIfUnreferenced(k digest.Digest, st *state) { } } -func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, error) { - if span := opentracing.SpanFromContext(ctx); span != nil { +func (j *Job) Build(ctx context.Context, e Edge) (CachedResult, BuildSources, error) { + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { j.span = span } v, err := j.list.load(e.Vertex, nil, j) if err != nil { - return nil, err + return nil, nil, err } e.Vertex = v - return j.list.s.build(ctx, e) + + res, err := j.list.s.build(ctx, e) + if err != nil { + return nil, nil, err + } + + j.list.mu.Lock() + defer j.list.mu.Unlock() + return res, j.walkBuildSources(ctx, e, make(BuildSources)), nil +} + +func (j *Job) walkBuildSources(ctx context.Context, e Edge, bsrc BuildSources) BuildSources { + for _, inp := range e.Vertex.Inputs() { + if st, ok := j.list.actives[inp.Vertex.Digest()]; ok { + st.mu.Lock() + for _, cacheRes := range st.op.cacheRes { + for key, val := range cacheRes.BuildSources { + if _, ok := bsrc[key]; !ok { + bsrc[key] = val + } + } + } + st.mu.Unlock() + bsrc = j.walkBuildSources(ctx, inp, bsrc) + } + } + return bsrc } func (j *Job) Discard() error { @@ -521,9 +553,6 @@ func (j *Job) Discard() error { if _, ok := st.allPw[j.pw]; ok { delete(st.allPw, j.pw) } - if _, ok := st.allSpan[j.span]; ok { - delete(st.allSpan, j.span) - } st.mu.Unlock() } @@ -613,13 +642,16 @@ func (s *sharedOp) Cache() CacheManager { } func (s *sharedOp) LoadCache(ctx context.Context, rec *CacheRecord) (Result, error) { - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = progress.WithProgress(ctx, s.st.mpw) + if s.st.mspan.Span != nil { + ctx = trace.ContextWithSpan(ctx, s.st.mspan) + } // no cache hit. start evaluating the node span, ctx := tracing.StartSpan(ctx, "load cache: "+s.st.vtx.Name()) - notifyStarted(ctx, &s.st.clientVertex, true) + notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, true) res, err := s.Cache().Load(withAncestorCacheOpts(ctx, s.st), rec) tracing.FinishWithError(span, err) - notifyCompleted(ctx, &s.st.clientVertex, err, true) + notifyCompleted(err, true) return res, err } @@ -631,7 +663,8 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF err = errdefs.WithOp(err, s.st.vtx.Sys()) err = errdefs.WrapVertex(err, s.st.origDigest) }() - key, err := s.g.Do(ctx, fmt.Sprintf("slow-compute-%d", index), func(ctx context.Context) (interface{}, error) { + flightControlKey := fmt.Sprintf("slow-compute-%d", index) + key, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (interface{}, error) { s.slowMu.Lock() // TODO: add helpers for these stored values if res, ok := s.slowCacheRes[index]; ok { @@ -650,7 +683,10 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF if st == nil { return nil, errors.Errorf("failed to get state for index %d on %v", index, s.st.vtx.Name()) } - ctx2 := opentracing.ContextWithSpan(progress.WithProgress(ctx, st.mpw), st.mspan) + ctx2 := progress.WithProgress(ctx, st.mpw) + if st.mspan.Span != nil { + ctx2 = trace.ContextWithSpan(ctx2, st.mspan) + } err = p(ctx2, res, st) if err != nil { f = nil @@ -660,7 +696,10 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF var key digest.Digest if f != nil { - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = progress.WithProgress(ctx, s.st.mpw) + if s.st.mspan.Span != nil { + ctx = trace.ContextWithSpan(ctx, s.st.mspan) + } key, err = f(withAncestorCacheOpts(ctx, s.st), res, s.st) } if err != nil { @@ -685,9 +724,12 @@ func (s *sharedOp) CalcSlowCache(ctx context.Context, index Index, p PreprocessF return key, err }) if err != nil { - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) - notifyStarted(ctx, &s.st.clientVertex, false) - notifyCompleted(ctx, &s.st.clientVertex, err, false) + ctx = progress.WithProgress(ctx, s.st.mpw) + if s.st.mspan.Span != nil { + ctx = trace.ContextWithSpan(ctx, s.st.mspan) + } + notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, false) + notifyCompleted(err, false) return "", err } return key.(digest.Digest), nil @@ -702,22 +744,26 @@ func (s *sharedOp) CacheMap(ctx context.Context, index int) (resp *cacheMapResp, if err != nil { return nil, err } - res, err := s.g.Do(ctx, "cachemap", func(ctx context.Context) (ret interface{}, retErr error) { + flightControlKey := fmt.Sprintf("cachemap-%d", index) + res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { if s.cacheRes != nil && s.cacheDone || index < len(s.cacheRes) { return s.cacheRes, nil } if s.cacheErr != nil { return nil, s.cacheErr } - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = progress.WithProgress(ctx, s.st.mpw) + if s.st.mspan.Span != nil { + ctx = trace.ContextWithSpan(ctx, s.st.mspan) + } ctx = withAncestorCacheOpts(ctx, s.st) if len(s.st.vtx.Inputs()) == 0 { // no cache hit. start evaluating the node span, ctx := tracing.StartSpan(ctx, "cache request: "+s.st.vtx.Name()) - notifyStarted(ctx, &s.st.clientVertex, false) + notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, false) defer func() { tracing.FinishWithError(span, retErr) - notifyCompleted(ctx, &s.st.clientVertex, retErr, false) + notifyCompleted(retErr, false) }() } res, done, err := op.CacheMap(ctx, s.st, len(s.cacheRes)) @@ -762,7 +808,8 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, if err != nil { return nil, nil, err } - res, err := s.g.Do(ctx, "exec", func(ctx context.Context) (ret interface{}, retErr error) { + flightControlKey := "exec" + res, err := s.g.Do(ctx, flightControlKey, func(ctx context.Context) (ret interface{}, retErr error) { if s.execRes != nil || s.execErr != nil { return s.execRes, s.execErr } @@ -772,15 +819,18 @@ func (s *sharedOp) Exec(ctx context.Context, inputs []Result) (outputs []Result, } defer release() - ctx = opentracing.ContextWithSpan(progress.WithProgress(ctx, s.st.mpw), s.st.mspan) + ctx = progress.WithProgress(ctx, s.st.mpw) + if s.st.mspan.Span != nil { + ctx = trace.ContextWithSpan(ctx, s.st.mspan) + } ctx = withAncestorCacheOpts(ctx, s.st) // no cache hit. start evaluating the node span, ctx := tracing.StartSpan(ctx, s.st.vtx.Name()) - notifyStarted(ctx, &s.st.clientVertex, false) + notifyCompleted := notifyStarted(ctx, &s.st.clientVertex, false) defer func() { tracing.FinishWithError(span, retErr) - notifyCompleted(ctx, &s.st.clientVertex, retErr, false) + notifyCompleted(retErr, false) }() res, err := op.Exec(ctx, s.st, inputs) @@ -843,9 +893,10 @@ func initClientVertex(v Vertex) client.Vertex { inputDigests = append(inputDigests, inp.Vertex.Digest()) } return client.Vertex{ - Inputs: inputDigests, - Name: v.Name(), - Digest: v.Digest(), + Inputs: inputDigests, + Name: v.Name(), + Digest: v.Digest(), + ProgressGroup: v.Options().ProgressGroup, } } @@ -879,29 +930,26 @@ func (v *vertexWithCacheOptions) Inputs() []Edge { return v.inputs } -func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - v.Started = &now +func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) func(err error, cached bool) { + pw, _, _ := progress.NewFromContext(ctx) + start := time.Now() + v.Started = &start v.Completed = nil v.Cached = cached - pw.Write(v.Digest.String(), *v) -} - -func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - if v.Started == nil { - v.Started = &now + id := identity.NewID() + pw.Write(id, *v) + return func(err error, cached bool) { + defer pw.Close() + stop := time.Now() + v.Completed = &stop + v.Cached = cached + if err != nil { + v.Error = err.Error() + } else { + v.Error = "" + } + pw.Write(id, *v) } - v.Completed = &now - v.Cached = cached - if err != nil { - v.Error = err.Error() - } - pw.Write(v.Digest.String(), *v) } type SlowCacheError struct { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go index e6a5850fef..bd31bbfdc6 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/bridge.go @@ -8,21 +8,26 @@ import ( "time" "github.com/containerd/containerd/platforms" - "github.com/mitchellh/hashstructure" + "github.com/mitchellh/hashstructure/v2" "github.com/moby/buildkit/cache/remotecache" + "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" + "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" gw "github.com/moby/buildkit/frontend/gateway/client" + "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/errdefs" llberrdefs "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/progress" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) type llbBridge struct { @@ -36,20 +41,43 @@ type llbBridge struct { sm *session.Manager } -func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, error) { +func (b *llbBridge) Warn(ctx context.Context, dgst digest.Digest, msg string, opts frontend.WarnOpts) error { + return b.builder.InContext(ctx, func(ctx context.Context, g session.Group) error { + pw, ok, _ := progress.NewFromContext(ctx, progress.WithMetadata("vertex", dgst)) + if !ok { + return nil + } + level := opts.Level + if level == 0 { + level = 1 + } + pw.Write(identity.NewID(), client.VertexWarning{ + Vertex: dgst, + Level: level, + Short: []byte(msg), + SourceInfo: opts.SourceInfo, + Range: opts.Range, + Detail: opts.Detail, + URL: opts.URL, + }) + return pw.Close() + }) +} + +func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImports []gw.CacheOptionsEntry) (solver.CachedResult, solver.BuildSources, error) { w, err := b.resolveWorker() if err != nil { - return nil, err + return nil, nil, err } ent, err := loadEntitlements(b.builder) if err != nil { - return nil, err + return nil, nil, err } var cms []solver.CacheManager for _, im := range cacheImports { cmID, err := cmKey(im) if err != nil { - return nil, err + return nil, nil, err } b.cmsMu.Lock() var cm solver.CacheManager @@ -69,7 +97,7 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp cmNew, err = ci.Resolve(ctx, desc, cmID, w) return err }); err != nil { - logrus.Debugf("error while importing cache manifest from cmId=%s: %v", cmID, err) + bklog.G(ctx).Debugf("error while importing cache manifest from cmId=%s: %v", cmID, err) return nil, err } return cmNew, nil @@ -86,7 +114,7 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp edge, err := Load(def, dpc.Load, ValidateEntitlements(ent), WithCacheSources(cms), NormalizeRuntimePlatforms(), WithValidateCaps()) if err != nil { - return nil, errors.Wrap(err, "failed to load LLB") + return nil, nil, errors.Wrap(err, "failed to load LLB") } if len(dpc.ids) > 0 { @@ -97,24 +125,15 @@ func (b *llbBridge) loadResult(ctx context.Context, def *pb.Definition, cacheImp if err := b.eachWorker(func(w worker.Worker) error { return w.PruneCacheMounts(ctx, ids) }); err != nil { - return nil, err + return nil, nil, err } } - res, err := b.builder.Build(ctx, edge) + res, bi, err := b.builder.Build(ctx, edge) if err != nil { - return nil, err + return nil, nil, err } - wr, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference for exporting: %T", res.Sys()) - } - if wr.ImmutableRef != nil { - if err := wr.ImmutableRef.Finalize(ctx, false); err != nil { - return nil, err - } - } - return res, err + return res, bi, nil } func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid string) (res *frontend.Result, err error) { @@ -125,8 +144,7 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid st if req.Definition != nil && req.Definition.Def != nil { res = &frontend.Result{Ref: newResultProxy(b, req)} if req.Evaluate { - _, err := res.Ref.Result(ctx) - return res, err + _, err = res.Ref.Result(ctx) } } else if req.Frontend != "" { f, ok := b.frontends[req.Frontend] @@ -135,22 +153,49 @@ func (b *llbBridge) Solve(ctx context.Context, req frontend.SolveRequest, sid st } res, err = f.Solve(ctx, b, req.FrontendOpt, req.FrontendInputs, sid, b.sm) if err != nil { - return nil, errors.Wrapf(err, "failed to solve with frontend %s", req.Frontend) + return nil, err } } else { return &frontend.Result{}, nil } + if len(res.Refs) > 0 { + for p := range res.Refs { + dtbi, err := buildinfo.GetMetadata(res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p), req.Frontend, req.FrontendOpt) + if err != nil { + return nil, err + } + if dtbi != nil && len(dtbi) > 0 { + if res.Metadata == nil { + res.Metadata = make(map[string][]byte) + } + res.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, p)] = dtbi + } + } + } else { + dtbi, err := buildinfo.GetMetadata(res.Metadata, exptypes.ExporterBuildInfo, req.Frontend, req.FrontendOpt) + if err != nil { + return nil, err + } + if dtbi != nil && len(dtbi) > 0 { + if res.Metadata == nil { + res.Metadata = make(map[string][]byte) + } + res.Metadata[exptypes.ExporterBuildInfo] = dtbi + } + } + return } type resultProxy struct { - cb func(context.Context) (solver.CachedResult, error) + cb func(context.Context) (solver.CachedResult, solver.BuildSources, error) def *pb.Definition g flightcontrol.Group mu sync.Mutex released bool v solver.CachedResult + bsrc solver.BuildSources err error errResults []solver.Result } @@ -159,8 +204,8 @@ func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy { rp := &resultProxy{ def: req.Definition, } - rp.cb = func(ctx context.Context) (solver.CachedResult, error) { - res, err := b.loadResult(ctx, req.Definition, req.CacheImports) + rp.cb = func(ctx context.Context) (solver.CachedResult, solver.BuildSources, error) { + res, bsrc, err := b.loadResult(ctx, req.Definition, req.CacheImports) var ee *llberrdefs.ExecError if errors.As(err, &ee) { ee.EachRef(func(res solver.Result) error { @@ -170,7 +215,7 @@ func newResultProxy(b *llbBridge, req frontend.SolveRequest) *resultProxy { // acquire ownership so ExecError finalizer doesn't attempt to release as well ee.OwnerBorrowed = true } - return res, err + return res, bsrc, err } return rp } @@ -179,6 +224,10 @@ func (rp *resultProxy) Definition() *pb.Definition { return rp.def } +func (rp *resultProxy) BuildSources() solver.BuildSources { + return rp.bsrc +} + func (rp *resultProxy) Release(ctx context.Context) (err error) { rp.mu.Lock() defer rp.mu.Unlock() @@ -190,7 +239,7 @@ func (rp *resultProxy) Release(ctx context.Context) (err error) { } if rp.v != nil { if rp.released { - logrus.Warnf("release of already released result") + bklog.G(ctx).Warnf("release of already released result") } rerr := rp.v.Release(ctx) if err != nil { @@ -237,7 +286,7 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return rp.v, rp.err } rp.mu.Unlock() - v, err := rp.cb(ctx) + v, bsrc, err := rp.cb(ctx) if err != nil { select { case <-ctx.Done(): @@ -256,6 +305,7 @@ func (rp *resultProxy) Result(ctx context.Context) (res solver.CachedResult, err return nil, errors.Errorf("evaluating released result") } rp.v = v + rp.bsrc = bsrc rp.err = err rp.mu.Unlock() return v, err @@ -348,7 +398,7 @@ func cmKey(im gw.CacheOptionsEntry) (string, error) { if im.Type == "registry" && im.Attrs["ref"] != "" { return im.Attrs["ref"], nil } - i, err := hashstructure.Hash(im, nil) + i, err := hashstructure.Hash(im, hashstructure.FormatV2, nil) if err != nil { return "", err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/errdefs/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/errdefs/exec.go index ed3c0d4b0f..f70bc95bf6 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/errdefs/exec.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/errdefs/exec.go @@ -5,7 +5,7 @@ import ( "runtime" "github.com/moby/buildkit/solver" - "github.com/sirupsen/logrus" + "github.com/moby/buildkit/util/bklog" ) // ExecError will be returned when an error is encountered when evaluating an op. @@ -62,6 +62,10 @@ func (e *ExecError) Release() error { } func WithExecError(err error, inputs, mounts []solver.Result) error { + return WithExecErrorWithContext(context.TODO(), err, inputs, mounts) +} + +func WithExecErrorWithContext(ctx context.Context, err error, inputs, mounts []solver.Result) error { if err == nil { return nil } @@ -73,7 +77,7 @@ func WithExecError(err error, inputs, mounts []solver.Result) error { runtime.SetFinalizer(ee, func(e *ExecError) { if !e.OwnerBorrowed { e.EachRef(func(r solver.Result) error { - logrus.Warn("leaked execError detected and released") + bklog.G(ctx).Warn("leaked execError detected and released") r.Release(context.TODO()) return nil }) diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go index 0060ad3fc9..732e674741 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/backend.go @@ -146,10 +146,16 @@ func rm(ctx context.Context, d string, action pb.FileActionRm) error { } func rmPath(root, src string, allowNotFound bool) error { - p, err := fs.RootPath(root, filepath.Join("/", src)) + src = filepath.Clean(src) + dir, base := filepath.Split(src) + if base == "" { + return errors.New("rmPath: invalid empty path") + } + dir, err := fs.RootPath(root, filepath.Join("/", dir)) if err != nil { return err } + p := filepath.Join(dir, base) if err := os.RemoveAll(p); err != nil { if errors.Is(err, os.ErrNotExist) && allowNotFound { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go index 926c46158e..e1c58c1e54 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/refmanager.go @@ -7,6 +7,7 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" + "github.com/moby/buildkit/util/bklog" "github.com/pkg/errors" ) @@ -18,7 +19,7 @@ type RefManager struct { cm cache.Manager } -func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool, g session.Group) (fileoptypes.Mount, error) { +func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly bool, g session.Group) (_ fileoptypes.Mount, rerr error) { ir, ok := ref.(cache.ImmutableRef) if !ok && ref != nil { return nil, errors.Errorf("invalid ref type: %T", ref) @@ -36,6 +37,14 @@ func (rm *RefManager) Prepare(ctx context.Context, ref fileoptypes.Ref, readonly if err != nil { return nil, err } + defer func() { + if rerr != nil { + if err := mr.SetCachePolicyDefault(); err != nil { + bklog.G(ctx).Errorf("failed to reset FileOp mutable ref cachepolicy: %v", err) + } + mr.Release(context.TODO()) + } + }() m, err := mr.Mount(ctx, readonly, g) if err != nil { return nil, err diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go index 8e53052604..41ea222f9d 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/unpack.go @@ -55,6 +55,7 @@ func isArchivePath(path string) bool { if err != nil { return false } + defer rdr.Close() r := tar.NewReader(rdr) _, err = r.Next() return err == nil diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go index 8e4848cc50..1f17431f5c 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_linux.go @@ -2,6 +2,7 @@ package file import ( "os" + "syscall" "github.com/containerd/continuity/fs" "github.com/moby/buildkit/snapshot" @@ -45,6 +46,10 @@ func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) } ufile, err := os.Open(passwdPath) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) { + // Couldn't open the file. Considering this case as not finding the user in the file. + break + } if err != nil { return nil, err } @@ -95,6 +100,10 @@ func readUser(chopt *pb.ChownOpt, mu, mg fileoptypes.Mount) (*copy.User, error) } gfile, err := os.Open(groupPath) + if errors.Is(err, os.ErrNotExist) || errors.Is(err, syscall.ENOTDIR) { + // Couldn't open the file. Considering this case as not finding the group in the file. + break + } if err != nil { return nil, err } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go index 246f18706e..80652fd4ab 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/file/user_nolinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package file diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go index 05703acab5..ffa4df5da3 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/mounts/mount.go @@ -9,11 +9,12 @@ import ( "sync" "time" + "github.com/moby/buildkit/util/bklog" + "github.com/containerd/containerd/mount" - "github.com/containerd/containerd/sys" + "github.com/containerd/containerd/pkg/userns" "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" @@ -24,17 +25,14 @@ import ( "github.com/moby/buildkit/util/grpcerrors" "github.com/moby/locker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" "google.golang.org/grpc/codes" ) -func NewMountManager(name string, cm cache.Manager, sm *session.Manager, md *metadata.Store) *MountManager { +func NewMountManager(name string, cm cache.Manager, sm *session.Manager) *MountManager { return &MountManager{ cm: cm, sm: sm, cacheMounts: map[string]*cacheRefShare{}, - md: md, managerName: name, } } @@ -44,7 +42,6 @@ type MountManager struct { sm *session.Manager cacheMountsMu sync.Mutex cacheMounts map[string]*cacheRefShare - md *metadata.Store managerName string } @@ -53,7 +50,6 @@ func (mm *MountManager) getRefCacheDir(ctx context.Context, ref cache.ImmutableR locker: &mm.cacheMountsMu, cacheMounts: mm.cacheMounts, cm: mm.cm, - md: mm.md, globalCacheRefs: sharedCacheRefs, name: fmt.Sprintf("cached mount %s from %s", m.Dest, mm.managerName), session: s, @@ -65,14 +61,13 @@ type cacheRefGetter struct { locker sync.Locker cacheMounts map[string]*cacheRefShare cm cache.Manager - md *metadata.Store globalCacheRefs *cacheRefs name string session session.Group } func (g *cacheRefGetter) getRefCacheDir(ctx context.Context, ref cache.ImmutableRef, id string, sharing pb.CacheSharingOpt) (mref cache.MutableRef, err error) { - key := "cache-dir:" + id + key := id if ref != nil { key += ":" + ref.ID() } @@ -113,14 +108,14 @@ func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, cacheRefsLocker.Lock(key) defer cacheRefsLocker.Unlock(key) for { - sis, err := g.md.Search(key) + sis, err := SearchCacheDir(ctx, g.cm, key) if err != nil { return nil, err } locked := false for _, si := range sis { if mRef, err := g.cm.GetMutable(ctx, si.ID()); err == nil { - logrus.Debugf("reusing ref for cache dir: %s", mRef.ID()) + bklog.G(ctx).Debugf("reusing ref for cache dir: %s", mRef.ID()) return mRef, nil } else if errors.Is(err, cache.ErrLocked) { locked = true @@ -144,16 +139,8 @@ func (g *cacheRefGetter) getRefCacheDirNoCache(ctx context.Context, key string, return nil, err } - si, _ := g.md.Get(mRef.ID()) - v, err := metadata.NewValue(key) - if err != nil { - mRef.Release(context.TODO()) - return nil, err - } - v.Index = key - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, key, v) - }); err != nil { + md := CacheRefMetadata{mRef} + if err := md.setCacheDirIndex(key); err != nil { mRef.Release(context.TODO()) return nil, err } @@ -314,7 +301,7 @@ func (sm *secretMountInstance) Mount() ([]mount.Mount, func() error, error) { Options: []string{"nodev", "nosuid", "noexec", fmt.Sprintf("uid=%d,gid=%d", os.Geteuid(), os.Getegid())}, } - if sys.RunningInUserNS() { + if userns.RunningInUserNS() { tmpMount.Options = nil } @@ -382,8 +369,8 @@ func (mm *MountManager) MountableCache(ctx context.Context, m *pb.Mount, ref cac return mm.getRefCacheDir(ctx, ref, m.CacheOpt.ID, m, m.CacheOpt.Sharing, g) } -func (mm *MountManager) MountableTmpFS() cache.Mountable { - return newTmpfs(mm.cm.IdentityMapping()) +func (mm *MountManager) MountableTmpFS(m *pb.Mount) cache.Mountable { + return newTmpfs(mm.cm.IdentityMapping(), m.TmpfsOpt) } func (mm *MountManager) MountableSecret(ctx context.Context, m *pb.Mount, g session.Group) (cache.Mountable, error) { @@ -394,21 +381,23 @@ func (mm *MountManager) MountableSSH(ctx context.Context, m *pb.Mount, g session return mm.getSSHMountable(ctx, m, g) } -func newTmpfs(idmap *idtools.IdentityMapping) cache.Mountable { - return &tmpfs{idmap: idmap} +func newTmpfs(idmap *idtools.IdentityMapping, opt *pb.TmpfsOpt) cache.Mountable { + return &tmpfs{idmap: idmap, opt: opt} } type tmpfs struct { idmap *idtools.IdentityMapping + opt *pb.TmpfsOpt } func (f *tmpfs) Mount(ctx context.Context, readonly bool, g session.Group) (snapshot.Mountable, error) { - return &tmpfsMount{readonly: readonly, idmap: f.idmap}, nil + return &tmpfsMount{readonly: readonly, idmap: f.idmap, opt: f.opt}, nil } type tmpfsMount struct { readonly bool idmap *idtools.IdentityMapping + opt *pb.TmpfsOpt } func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) { @@ -416,6 +405,11 @@ func (m *tmpfsMount) Mount() ([]mount.Mount, func() error, error) { if m.readonly { opt = append(opt, "ro") } + if m.opt != nil { + if m.opt.Size_ > 0 { + opt = append(opt, fmt.Sprintf("size=%d", m.opt.Size_)) + } + } return []mount.Mount{{ Type: "tmpfs", Source: "tmpfs", @@ -517,3 +511,30 @@ func (r *cacheRef) Release(ctx context.Context) error { } return nil } + +const keyCacheDir = "cache-dir" +const cacheDirIndex = keyCacheDir + ":" + +func SearchCacheDir(ctx context.Context, store cache.MetadataStore, id string) ([]CacheRefMetadata, error) { + var results []CacheRefMetadata + mds, err := store.Search(ctx, cacheDirIndex+id) + if err != nil { + return nil, err + } + for _, md := range mds { + results = append(results, CacheRefMetadata{md}) + } + return results, nil +} + +type CacheRefMetadata struct { + cache.RefMetadata +} + +func (md CacheRefMetadata) setCacheDirIndex(id string) error { + return md.SetString(keyCacheDir, id, cacheDirIndex+id) +} + +func (md CacheRefMetadata) ClearCacheDirIndex() error { + return md.ClearValueAndIndex(keyCacheDir, cacheDirIndex) +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/diff.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/diff.go new file mode 100644 index 0000000000..1a05f7a6c7 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/diff.go @@ -0,0 +1,135 @@ +package ops + +import ( + "context" + "encoding/json" + + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +const diffCacheType = "buildkit.diff.v0" + +type diffOp struct { + op *pb.DiffOp + worker worker.Worker + vtx solver.Vertex + pg progress.Controller +} + +func NewDiffOp(v solver.Vertex, op *pb.Op_Diff, w worker.Worker) (solver.Op, error) { + if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + return nil, err + } + return &diffOp{ + op: op.Diff, + worker: w, + vtx: v, + }, nil +} + +func (d *diffOp) CacheMap(ctx context.Context, group session.Group, index int) (*solver.CacheMap, bool, error) { + dt, err := json.Marshal(struct { + Type string + Diff *pb.DiffOp + }{ + Type: diffCacheType, + Diff: d.op, + }) + if err != nil { + return nil, false, err + } + + var depCount int + if d.op.Lower.Input != pb.Empty { + depCount++ + } + if d.op.Upper.Input != pb.Empty { + depCount++ + } + + cm := &solver.CacheMap{ + Digest: digest.Digest(dt), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + PreprocessFunc solver.PreprocessFunc + }, depCount), + Opts: solver.CacheOpts(make(map[interface{}]interface{})), + } + + d.pg = &controller.Controller{ + WriterFactory: progress.FromContext(ctx), + Digest: d.vtx.Digest(), + Name: d.vtx.Name(), + ProgressGroup: d.vtx.Options().ProgressGroup, + } + cm.Opts[cache.ProgressKey{}] = d.pg + + return cm, true, nil +} + +func (d *diffOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) ([]solver.Result, error) { + var curInput int + + var lowerRef cache.ImmutableRef + if d.op.Lower.Input != pb.Empty { + if lowerInp := inputs[curInput]; lowerInp != nil { + wref, ok := lowerInp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid lower reference for diff op %T", lowerInp.Sys()) + } + lowerRef = wref.ImmutableRef + } else { + return nil, errors.New("invalid nil lower input for diff op") + } + curInput++ + } + + var upperRef cache.ImmutableRef + if d.op.Upper.Input != pb.Empty { + if upperInp := inputs[curInput]; upperInp != nil { + wref, ok := upperInp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid upper reference for diff op %T", upperInp.Sys()) + } + upperRef = wref.ImmutableRef + } else { + return nil, errors.New("invalid nil upper input for diff op") + } + } + + if lowerRef == nil { + if upperRef == nil { + // The diff of nothing and nothing is nothing. Just return an empty ref. + return []solver.Result{worker.NewWorkerRefResult(nil, d.worker)}, nil + } + // The diff of nothing and upper is upper. Just return a clone of upper + return []solver.Result{worker.NewWorkerRefResult(upperRef.Clone(), d.worker)}, nil + } + if upperRef != nil && lowerRef.ID() == upperRef.ID() { + // The diff of a ref and itself is nothing, return an empty ref. + return []solver.Result{worker.NewWorkerRefResult(nil, d.worker)}, nil + } + + diffRef, err := d.worker.CacheManager().Diff(ctx, lowerRef, upperRef, d.pg, + cache.WithDescription(d.vtx.Name())) + if err != nil { + return nil, err + } + + return []solver.Result{worker.NewWorkerRefResult(diffRef, d.worker)}, nil +} + +func (d *diffOp) Acquire(ctx context.Context) (release solver.ReleaseFunc, err error) { + return func() {}, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go index 1e0b7ad904..6cca733c0b 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec.go @@ -5,7 +5,6 @@ import ( "context" "encoding/json" "fmt" - "net" "os" "path" "sort" @@ -13,22 +12,24 @@ import ( "github.com/containerd/containerd/platforms" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/executor" "github.com/moby/buildkit/frontend/gateway" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/session/secrets" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" "github.com/moby/buildkit/solver/llbsolver/errdefs" "github.com/moby/buildkit/solver/llbsolver/mounts" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/util/progress/logs" utilsystem "github.com/moby/buildkit/util/system" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" - "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/trace" "golang.org/x/sync/semaphore" ) @@ -38,27 +39,31 @@ type execOp struct { op *pb.ExecOp cm cache.Manager mm *mounts.MountManager + sm *session.Manager exec executor.Executor w worker.Worker platform *pb.Platform numInputs int parallelism *semaphore.Weighted + vtx solver.Vertex } -func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, md *metadata.Store, exec executor.Executor, w worker.Worker) (solver.Op, error) { +func NewExecOp(v solver.Vertex, op *pb.Op_Exec, platform *pb.Platform, cm cache.Manager, parallelism *semaphore.Weighted, sm *session.Manager, exec executor.Executor, w worker.Worker) (solver.Op, error) { if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { return nil, err } name := fmt.Sprintf("exec %s", strings.Join(op.Exec.Meta.Args, " ")) return &execOp{ op: op.Exec, - mm: mounts.NewMountManager(name, cm, sm, md), + mm: mounts.NewMountManager(name, cm, sm), cm: cm, + sm: sm, exec: exec, numInputs: len(v.Inputs()), w: w, platform: platform, parallelism: parallelism, + vtx: v, }, nil } @@ -93,7 +98,7 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol p := platforms.DefaultSpec() if e.platform != nil { - p = specs.Platform{ + p = ocispecs.Platform{ OS: e.platform.OS, Architecture: e.platform.Architecture, Variant: e.platform.Variant, @@ -140,6 +145,14 @@ func (e *execOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, e.numInputs), + Opts: solver.CacheOpts(map[interface{}]interface{}{ + cache.ProgressKey{}: &controller.Controller{ + WriterFactory: progress.FromContext(ctx), + Digest: e.vtx.Digest(), + Name: e.vtx.Name(), + ProgressGroup: e.vtx.Options().ProgressGroup, + }, + }), } deps, err := e.getMountDeps() @@ -234,6 +247,8 @@ func addDefaultEnvvar(env []string, k, v string) []string { } func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) (results []solver.Result, err error) { + trace.SpanFromContext(ctx).AddEvent("ExecOp started") + refs := make([]*worker.WorkerRef, len(inputs)) for i, inp := range inputs { var ok bool @@ -290,13 +305,16 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu return nil, err } - extraHosts, err := parseExtraHosts(e.op.Meta.ExtraHosts) + extraHosts, err := gateway.ParseExtraHosts(e.op.Meta.ExtraHosts) if err != nil { return nil, err } - emu, err := getEmulator(e.platform, e.cm.IdentityMapping()) - if err == nil && emu != nil { + emu, err := getEmulator(ctx, e.platform, e.cm.IdentityMapping()) + if err != nil { + return nil, err + } + if emu != nil { e.op.Meta.Args = append([]string{qemuMountName}, e.op.Meta.Args...) p.Mounts = append(p.Mounts, executor.Mount{ @@ -305,9 +323,6 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu Dest: qemuMountName, }) } - if err != nil { - logrus.Warn(err.Error()) // TODO: remove this with pull support - } meta := executor.Meta{ Args: e.op.Meta.Args, @@ -317,6 +332,8 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu Hostname: e.op.Meta.Hostname, ReadonlyRootFS: p.ReadonlyRootFS, ExtraHosts: extraHosts, + Ulimit: e.op.Meta.Ulimit, + CgroupParent: e.op.Meta.CgroupParent, NetMode: e.op.Network, SecurityMode: e.op.Security, } @@ -330,9 +347,20 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu } meta.Env = addDefaultEnvvar(meta.Env, "PATH", utilsystem.DefaultPathEnv(currentOS)) - stdout, stderr := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1") + secretEnv, err := e.loadSecretEnv(ctx, g) + if err != nil { + return nil, err + } + meta.Env = append(meta.Env, secretEnv...) + + stdout, stderr, flush := logs.NewLogStreams(ctx, os.Getenv("BUILDKIT_DEBUG_EXEC_OUTPUT") == "1") defer stdout.Close() defer stderr.Close() + defer func() { + if err != nil { + flush() + } + }() execErr := e.exec.Run(ctx, "", p.Root, p.Mounts, executor.ProcessInfo{ Meta: meta, @@ -354,7 +382,7 @@ func (e *execOp) Exec(ctx context.Context, g session.Group, inputs []solver.Resu // Prevent the result from being released. p.OutputRefs[i].Ref = nil } - return results, errors.Wrapf(execErr, "executor failed running %v", e.op.Meta.Args) + return results, errors.Wrapf(execErr, "process %q did not complete successfully", strings.Join(e.op.Meta.Args, " ")) } func proxyEnvList(p *pb.ProxyEnv) []string { @@ -377,21 +405,6 @@ func proxyEnvList(p *pb.ProxyEnv) []string { return out } -func parseExtraHosts(ips []*pb.HostIP) ([]executor.HostIP, error) { - out := make([]executor.HostIP, len(ips)) - for i, hip := range ips { - ip := net.ParseIP(hip.IP) - if ip == nil { - return nil, errors.Errorf("failed to parse IP %s", hip.IP) - } - out[i] = executor.HostIP{ - IP: ip, - Host: hip.Host, - } - } - return out, nil -} - func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { if e.parallelism == nil { return func() {}, nil @@ -404,3 +417,34 @@ func (e *execOp) Acquire(ctx context.Context) (solver.ReleaseFunc, error) { e.parallelism.Release(1) }, nil } + +func (e *execOp) loadSecretEnv(ctx context.Context, g session.Group) ([]string, error) { + secretenv := e.op.Secretenv + if len(secretenv) == 0 { + return nil, nil + } + out := make([]string, 0, len(secretenv)) + for _, sopt := range secretenv { + id := sopt.ID + if id == "" { + return nil, errors.Errorf("secret ID missing for %q environment variable", sopt.Name) + } + var dt []byte + var err error + err = e.sm.Any(ctx, g, func(ctx context.Context, _ string, caller session.Caller) error { + dt, err = secrets.GetSecret(ctx, caller, id) + if err != nil { + if errors.Is(err, secrets.ErrNotFound) && sopt.Optional { + return nil + } + return err + } + return nil + }) + if err != nil { + return nil, err + } + out = append(out, fmt.Sprintf("%s=%s", sopt.Name, string(dt))) + } + return out, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go index 707c5f6722..56433d49fd 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/exec_binfmt.go @@ -6,6 +6,7 @@ import ( "os" "os/exec" "path/filepath" + "strings" "github.com/containerd/containerd/mount" "github.com/containerd/containerd/platforms" @@ -13,7 +14,8 @@ import ( "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/archutil" - specs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/moby/buildkit/util/bklog" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" copy "github.com/tonistiigi/fsutil/copy" ) @@ -77,28 +79,36 @@ func (m *staticEmulatorMount) Mount() ([]mount.Mount, func() error, error) { }}, func() error { return os.RemoveAll(tmpdir) }, nil - } + func (m *staticEmulatorMount) IdentityMapping() *idtools.IdentityMapping { return m.idmap } -func getEmulator(p *pb.Platform, idmap *idtools.IdentityMapping) (*emulator, error) { +func getEmulator(ctx context.Context, p *pb.Platform, idmap *idtools.IdentityMapping) (*emulator, error) { all := archutil.SupportedPlatforms(false) - m := make(map[string]struct{}, len(all)) - - for _, p := range all { - m[p] = struct{}{} - } - - pp := platforms.Normalize(specs.Platform{ + pp := platforms.Normalize(ocispecs.Platform{ Architecture: p.Architecture, OS: p.OS, Variant: p.Variant, }) - if _, ok := m[platforms.Format(pp)]; ok { - return nil, nil + for _, p := range all { + if platforms.Only(p).Match(pp) { + return nil, nil + } + } + + if pp.Architecture == "amd64" { + if pp.Variant != "" && pp.Variant != "v2" { + var supported []string + for _, p := range all { + if p.Architecture == "amd64" { + supported = append(supported, platforms.Format(p)) + } + } + return nil, errors.Errorf("no support for running processes with %s platform, supported: %s", platforms.Format(pp), strings.Join(supported, ", ")) + } } a, ok := qemuArchMap[pp.Architecture] @@ -108,7 +118,8 @@ func getEmulator(p *pb.Platform, idmap *idtools.IdentityMapping) (*emulator, err fn, err := exec.LookPath("buildkit-qemu-" + a) if err != nil { - return nil, errors.Errorf("no emulator available for %v", pp.OS) + bklog.G(ctx).Warn(err.Error()) // TODO: remove this with pull support + return nil, nil // no emulator available } return &emulator{path: fn}, nil diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go index b0bb56397f..012ef4cc12 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/file.go @@ -11,7 +11,6 @@ import ( "sync" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/solver/llbsolver" @@ -20,6 +19,8 @@ import ( "github.com/moby/buildkit/solver/llbsolver/ops/fileoptypes" "github.com/moby/buildkit/solver/pb" "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -31,24 +32,26 @@ const fileCacheType = "buildkit.file.v0" type fileOp struct { op *pb.FileOp - md *metadata.Store + md cache.MetadataStore w worker.Worker solver *FileOpSolver numInputs int parallelism *semaphore.Weighted + vtx solver.Vertex } -func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *semaphore.Weighted, md *metadata.Store, w worker.Worker) (solver.Op, error) { +func NewFileOp(v solver.Vertex, op *pb.Op_File, cm cache.Manager, parallelism *semaphore.Weighted, w worker.Worker) (solver.Op, error) { if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { return nil, err } return &fileOp{ op: op.File, - md: md, + md: cm, numInputs: len(v.Inputs()), w: w, solver: NewFileOpSolver(w, &file.Backend{}, file.NewRefManager(cm)), parallelism: parallelism, + vtx: v, }, nil } @@ -135,6 +138,14 @@ func (f *fileOp) CacheMap(ctx context.Context, g session.Group, index int) (*sol ComputeDigestFunc solver.ResultBasedCacheFunc PreprocessFunc solver.PreprocessFunc }, f.numInputs), + Opts: solver.CacheOpts(map[interface{}]interface{}{ + cache.ProgressKey{}: &controller.Controller{ + WriterFactory: progress.FromContext(ctx), + Digest: f.vtx.Digest(), + Name: f.vtx.Name(), + ProgressGroup: f.vtx.Options().ProgressGroup, + }, + }), } for idx, m := range selectors { @@ -452,7 +463,7 @@ func (s *FileOpSolver) getInput(ctx context.Context, idx int, inputs []fileoptyp } } - err = errdefs.WithExecError(err, inputRes, outputRes) + err = errdefs.WithExecErrorWithContext(ctx, err, inputRes, outputRes) } for _, m := range toRelease { m.Release(context.TODO()) diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/merge.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/merge.go new file mode 100644 index 0000000000..13bb60ba88 --- /dev/null +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/merge.go @@ -0,0 +1,107 @@ +package ops + +import ( + "context" + "encoding/json" + + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/controller" + "github.com/moby/buildkit/worker" + "github.com/pkg/errors" + + "github.com/moby/buildkit/cache" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/solver/llbsolver" + "github.com/moby/buildkit/solver/pb" + digest "github.com/opencontainers/go-digest" +) + +const mergeCacheType = "buildkit.merge.v0" + +type mergeOp struct { + op *pb.MergeOp + worker worker.Worker + vtx solver.Vertex + pg progress.Controller +} + +func NewMergeOp(v solver.Vertex, op *pb.Op_Merge, w worker.Worker) (solver.Op, error) { + if err := llbsolver.ValidateOp(&pb.Op{Op: op}); err != nil { + return nil, err + } + return &mergeOp{ + op: op.Merge, + worker: w, + vtx: v, + }, nil +} + +func (m *mergeOp) CacheMap(ctx context.Context, group session.Group, index int) (*solver.CacheMap, bool, error) { + dt, err := json.Marshal(struct { + Type string + Merge *pb.MergeOp + }{ + Type: mergeCacheType, + Merge: m.op, + }) + if err != nil { + return nil, false, err + } + + cm := &solver.CacheMap{ + Digest: digest.Digest(dt), + Deps: make([]struct { + Selector digest.Digest + ComputeDigestFunc solver.ResultBasedCacheFunc + PreprocessFunc solver.PreprocessFunc + }, len(m.op.Inputs)), + Opts: solver.CacheOpts(make(map[interface{}]interface{})), + } + + m.pg = &controller.Controller{ + WriterFactory: progress.FromContext(ctx), + Digest: m.vtx.Digest(), + Name: m.vtx.Name(), + ProgressGroup: m.vtx.Options().ProgressGroup, + } + cm.Opts[cache.ProgressKey{}] = m.pg + + return cm, true, nil +} + +func (m *mergeOp) Exec(ctx context.Context, g session.Group, inputs []solver.Result) ([]solver.Result, error) { + refs := make([]cache.ImmutableRef, len(inputs)) + var index int + for _, inp := range inputs { + if inp == nil { + continue + } + wref, ok := inp.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference for merge %T", inp.Sys()) + } + if wref.ImmutableRef == nil { + continue + } + refs[index] = wref.ImmutableRef + index++ + } + refs = refs[:index] + + if len(refs) == 0 { + return nil, nil + } + + mergedRef, err := m.worker.CacheManager().Merge(ctx, refs, m.pg, + cache.WithDescription(m.vtx.Name())) + if err != nil { + return nil, err + } + + return []solver.Result{worker.NewWorkerRefResult(mergedRef, m.worker)}, nil +} + +func (m *mergeOp) Acquire(ctx context.Context) (release solver.ReleaseFunc, err error) { + return func() {}, nil +} diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go index 6cec6320b5..d24a902da5 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/ops/source.go @@ -67,21 +67,27 @@ func (s *sourceOp) CacheMap(ctx context.Context, g session.Group, index int) (*s if err != nil { return nil, false, err } - k, cacheOpts, done, err := src.CacheKey(ctx, g, index) + + k, pin, cacheOpts, done, err := src.CacheKey(ctx, g, index) if err != nil { return nil, false, err } dgst := digest.FromBytes([]byte(sourceCacheType + ":" + k)) - if strings.HasPrefix(k, "session:") { dgst = digest.Digest("random:" + strings.TrimPrefix(dgst.String(), dgst.Algorithm().String()+":")) } + var buildSources map[string]string + if !strings.HasPrefix(s.op.Source.GetIdentifier(), "local://") { + buildSources = map[string]string{s.op.Source.GetIdentifier(): pin} + } + return &solver.CacheMap{ // TODO: add os/arch - Digest: dgst, - Opts: cacheOpts, + Digest: dgst, + Opts: cacheOpts, + BuildSources: buildSources, }, done, nil } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go index 6dcca69f6f..0cadda547d 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/result.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/result.go @@ -5,10 +5,10 @@ import ( "context" "path" + cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/cache/contenthash" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/worker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" @@ -67,7 +67,7 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { s, ) if err != nil { - return err + return errors.Wrapf(err, "failed to calculate checksum of ref %s", ref.ID()) } dgsts[i] = []byte(dgst) return nil @@ -82,13 +82,13 @@ func NewContentHashFunc(selectors []Selector) solver.ResultBasedCacheFunc { } } -func workerRefConverter(g session.Group) func(ctx context.Context, res solver.Result) (*solver.Remote, error) { - return func(ctx context.Context, res solver.Result) (*solver.Remote, error) { +func workerRefResolver(refCfg cacheconfig.RefConfig, all bool, g session.Group) func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { + return func(ctx context.Context, res solver.Result) ([]*solver.Remote, error) { ref, ok := res.Sys().(*worker.WorkerRef) if !ok { return nil, errors.Errorf("invalid result: %T", res.Sys()) } - return ref.GetRemote(ctx, true, compression.Default, g) + return ref.GetRemotes(ctx, true, refCfg, all, g) } } diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go index 31e8afca7a..ee06233da5 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/solver.go @@ -2,11 +2,13 @@ package llbsolver import ( "context" + "encoding/base64" "fmt" "strings" "time" "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/cache/remotecache" "github.com/moby/buildkit/client" controlgateway "github.com/moby/buildkit/control/gateway" @@ -14,8 +16,10 @@ import ( "github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/frontend" "github.com/moby/buildkit/frontend/gateway" + "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" + "github.com/moby/buildkit/util/buildinfo" "github.com/moby/buildkit/util/compression" "github.com/moby/buildkit/util/entitlements" "github.com/moby/buildkit/util/progress" @@ -154,6 +158,36 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, err } + if r := res.Ref; r != nil { + dtbi, err := buildinfo.Encode(ctx, res.Metadata, exptypes.ExporterBuildInfo, r.BuildSources()) + if err != nil { + return nil, err + } + if dtbi != nil && len(dtbi) > 0 { + if res.Metadata == nil { + res.Metadata = make(map[string][]byte) + } + res.Metadata[exptypes.ExporterBuildInfo] = dtbi + } + } + if res.Refs != nil { + for k, r := range res.Refs { + if r == nil { + continue + } + dtbi, err := buildinfo.Encode(ctx, res.Metadata, fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k), r.BuildSources()) + if err != nil { + return nil, err + } + if dtbi != nil && len(dtbi) > 0 { + if res.Metadata == nil { + res.Metadata = make(map[string][]byte) + } + res.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterBuildInfo, k)] = dtbi + } + } + } + var exporterResponse map[string]string if e := exp.Exporter; e != nil { inp := exporter.Source{ @@ -162,6 +196,8 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro if inp.Metadata == nil { inp.Metadata = make(map[string][]byte) } + var cr solver.CachedResult + var crMap = map[string]solver.CachedResult{} if res := res.Ref; res != nil { r, err := res.Result(ctx) if err != nil { @@ -172,14 +208,7 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, errors.Errorf("invalid reference: %T", r.Sys()) } inp.Ref = workerRef.ImmutableRef - - dt, err := inlineCache(ctx, exp.CacheExporter, r, session.NewGroup(sessionID)) - if err != nil { - return nil, err - } - if dt != nil { - inp.Metadata[exptypes.ExporterInlineCache] = dt - } + cr = r } if res.Refs != nil { m := make(map[string]cache.ImmutableRef, len(res.Refs)) @@ -196,19 +225,37 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro return nil, errors.Errorf("invalid reference: %T", r.Sys()) } m[k] = workerRef.ImmutableRef - - dt, err := inlineCache(ctx, exp.CacheExporter, r, session.NewGroup(sessionID)) - if err != nil { - return nil, err - } - if dt != nil { - inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dt - } + crMap[k] = r } } inp.Refs = m } - + if _, ok := asInlineCache(exp.CacheExporter); ok { + if err := inBuilderContext(ctx, j, "preparing layers for inline cache", "", func(ctx context.Context, _ session.Group) error { + if cr != nil { + dtic, err := inlineCache(ctx, exp.CacheExporter, cr, e.Config().Compression, session.NewGroup(sessionID)) + if err != nil { + return err + } + if dtic != nil { + inp.Metadata[exptypes.ExporterInlineCache] = dtic + } + } + for k, res := range crMap { + dtic, err := inlineCache(ctx, exp.CacheExporter, res, e.Config().Compression, session.NewGroup(sessionID)) + if err != nil { + return err + } + if dtic != nil { + inp.Metadata[fmt.Sprintf("%s/%s", exptypes.ExporterInlineCache, k)] = dtic + } + } + exp.CacheExporter = nil + return nil + }); err != nil { + return nil, err + } + } if err := inBuilderContext(ctx, j, e.Name(), "", func(ctx context.Context, _ session.Group) error { exporterResponse, err = e.Export(ctx, inp, j.SessionID) return err @@ -227,11 +274,22 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro if err != nil { return err } + + workerRef, ok := r.Sys().(*worker.WorkerRef) + if !ok { + return errors.Errorf("invalid reference: %T", r.Sys()) + } + ctx = withDescHandlerCacheOpts(ctx, workerRef.ImmutableRef) + + // Configure compression + compressionConfig := e.Config().Compression + // all keys have same export chain so exporting others is not needed _, err = r.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - Convert: workerRefConverter(g), - Mode: exp.CacheExportMode, - Session: g, + ResolveRemotes: workerRefResolver(cacheconfig.RefConfig{Compression: compressionConfig}, false, g), + Mode: exp.CacheExportMode, + Session: g, + CompressionOpt: &compressionConfig, }) return err }); err != nil { @@ -253,6 +311,9 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro if strings.HasPrefix(k, "frontend.") { exporterResponse[k] = string(v) } + if strings.HasPrefix(k, exptypes.ExporterBuildInfo) { + exporterResponse[k] = base64.StdEncoding.EncodeToString(v) + } } for k, v := range cacheExporterResponse { if strings.HasPrefix(k, "cache.") { @@ -265,36 +326,61 @@ func (s *Solver) Solve(ctx context.Context, id string, sessionID string, req fro }, nil } -func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult, g session.Group) ([]byte, error) { - if efl, ok := e.(interface { - ExportForLayers([]digest.Digest) ([]byte, error) - }); ok { - workerRef, ok := res.Sys().(*worker.WorkerRef) - if !ok { - return nil, errors.Errorf("invalid reference: %T", res.Sys()) - } +type inlineCacheExporter interface { + ExportForLayers(context.Context, []digest.Digest) ([]byte, error) +} - remote, err := workerRef.GetRemote(ctx, true, compression.Default, g) - if err != nil || remote == nil { - return nil, nil - } +func asInlineCache(e remotecache.Exporter) (inlineCacheExporter, bool) { + ie, ok := e.(inlineCacheExporter) + return ie, ok +} - digests := make([]digest.Digest, 0, len(remote.Descriptors)) - for _, desc := range remote.Descriptors { - digests = append(digests, desc.Digest) - } - - if _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ - Convert: workerRefConverter(g), - Mode: solver.CacheExportModeMin, - Session: g, - }); err != nil { - return nil, err - } - - return efl.ExportForLayers(digests) +func inlineCache(ctx context.Context, e remotecache.Exporter, res solver.CachedResult, compressionopt compression.Config, g session.Group) ([]byte, error) { + ie, ok := asInlineCache(e) + if !ok { + return nil, nil } - return nil, nil + workerRef, ok := res.Sys().(*worker.WorkerRef) + if !ok { + return nil, errors.Errorf("invalid reference: %T", res.Sys()) + } + + remotes, err := workerRef.GetRemotes(ctx, true, cacheconfig.RefConfig{Compression: compressionopt}, false, g) + if err != nil || len(remotes) == 0 { + return nil, nil + } + remote := remotes[0] + + digests := make([]digest.Digest, 0, len(remote.Descriptors)) + for _, desc := range remote.Descriptors { + digests = append(digests, desc.Digest) + } + + ctx = withDescHandlerCacheOpts(ctx, workerRef.ImmutableRef) + refCfg := cacheconfig.RefConfig{Compression: compressionopt} + if _, err := res.CacheKeys()[0].Exporter.ExportTo(ctx, e, solver.CacheExportOpt{ + ResolveRemotes: workerRefResolver(refCfg, true, g), // load as many compression blobs as possible + Mode: solver.CacheExportModeMin, + Session: g, + CompressionOpt: &compressionopt, // cache possible compression variants + }); err != nil { + return nil, err + } + return ie.ExportForLayers(ctx, digests) +} + +func withDescHandlerCacheOpts(ctx context.Context, ref cache.ImmutableRef) context.Context { + return solver.WithCacheOptGetter(ctx, func(includeAncestors bool, keys ...interface{}) map[interface{}]interface{} { + vals := make(map[interface{}]interface{}) + for _, k := range keys { + if key, ok := k.(cache.DescHandlerKey); ok { + if handler := ref.DescHandler(digest.Digest(key)); handler != nil { + vals[k] = handler + } + } + } + return vals + }) } func (s *Solver) Status(ctx context.Context, id string, statusChan chan *client.SolveStatus) error { @@ -327,7 +413,7 @@ func allWorkers(wc *worker.Controller) func(func(w worker.Worker) error) error { } func oneOffProgress(ctx context.Context, id string) func(err error) error { - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, @@ -352,38 +438,33 @@ func inBuilderContext(ctx context.Context, b solver.Builder, name, id string, f Name: name, } return b.InContext(ctx, func(ctx context.Context, g session.Group) error { - pw, _, ctx := progress.FromContext(ctx, progress.WithMetadata("vertex", v.Digest)) - notifyStarted(ctx, &v, false) + pw, _, ctx := progress.NewFromContext(ctx, progress.WithMetadata("vertex", v.Digest)) + notifyCompleted := notifyStarted(ctx, &v, false) defer pw.Close() err := f(ctx, g) - notifyCompleted(ctx, &v, err, false) + notifyCompleted(err, false) return err }) } -func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - v.Started = &now +func notifyStarted(ctx context.Context, v *client.Vertex, cached bool) func(err error, cached bool) { + pw, _, _ := progress.NewFromContext(ctx) + start := time.Now() + v.Started = &start v.Completed = nil v.Cached = cached - pw.Write(v.Digest.String(), *v) -} - -func notifyCompleted(ctx context.Context, v *client.Vertex, err error, cached bool) { - pw, _, _ := progress.FromContext(ctx) - defer pw.Close() - now := time.Now() - if v.Started == nil { - v.Started = &now + id := identity.NewID() + pw.Write(id, *v) + return func(err error, cached bool) { + defer pw.Close() + stop := time.Now() + v.Completed = &stop + v.Cached = cached + if err != nil { + v.Error = err.Error() + } + pw.Write(id, *v) } - v.Completed = &now - v.Cached = cached - if err != nil { - v.Error = err.Error() - } - pw.Write(v.Digest.String(), *v) } func supportedEntitlements(ents []string) []entitlements.Entitlement { diff --git a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go index f5aa0b9926..4f36c2eddb 100644 --- a/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go +++ b/vendor/github.com/moby/buildkit/solver/llbsolver/vertex.go @@ -10,7 +10,7 @@ import ( "github.com/moby/buildkit/source" "github.com/moby/buildkit/util/entitlements" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -82,7 +82,7 @@ func NormalizeRuntimePlatforms() LoadOpt { } op.Platform = defaultPlatform } - platform := specs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant} + platform := ocispecs.Platform{OS: op.Platform.OS, Architecture: op.Platform.Architecture, Variant: op.Platform.Variant} normalizedPlatform := platforms.Normalize(platform) op.Platform = &pb.Platform{ @@ -162,6 +162,7 @@ func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(d if opMeta.ExportCache != nil { opt.ExportCache = &opMeta.ExportCache.Value } + opt.ProgressGroup = opMeta.ProgressGroup } for _, fn := range opts { if err := fn(op, opMeta, &opt); err != nil { @@ -169,7 +170,11 @@ func newVertex(dgst digest.Digest, op *pb.Op, opMeta *pb.OpMetadata, load func(d } } - vtx := &vertex{sys: op, options: opt, digest: dgst, name: llbOpName(op)} + name, err := llbOpName(op, load) + if err != nil { + return nil, err + } + vtx := &vertex{sys: op, options: opt, digest: dgst, name: name} for _, in := range op.Inputs { sub, err := load(in.Digest) if err != nil { @@ -242,25 +247,57 @@ func loadLLB(def *pb.Definition, fn func(digest.Digest, *pb.Op, func(digest.Dige return solver.Edge{Vertex: v, Index: solver.Index(lastOp.Inputs[0].Index)}, nil } -func llbOpName(op *pb.Op) string { - switch op := op.Op.(type) { +func llbOpName(pbOp *pb.Op, load func(digest.Digest) (solver.Vertex, error)) (string, error) { + switch op := pbOp.Op.(type) { case *pb.Op_Source: if id, err := source.FromLLB(op, nil); err == nil { if id, ok := id.(*source.LocalIdentifier); ok { if len(id.IncludePatterns) == 1 { - return op.Source.Identifier + " (" + id.IncludePatterns[0] + ")" + return op.Source.Identifier + " (" + id.IncludePatterns[0] + ")", nil } } } - return op.Source.Identifier + return op.Source.Identifier, nil case *pb.Op_Exec: - return strings.Join(op.Exec.Meta.Args, " ") + return strings.Join(op.Exec.Meta.Args, " "), nil case *pb.Op_File: - return fileOpName(op.File.Actions) + return fileOpName(op.File.Actions), nil case *pb.Op_Build: - return "build" + return "build", nil + case *pb.Op_Merge: + subnames := make([]string, len(pbOp.Inputs)) + for i, inp := range pbOp.Inputs { + subvtx, err := load(inp.Digest) + if err != nil { + return "", err + } + subnames[i] = subvtx.Name() + } + return "merge " + fmt.Sprintf("(%s)", strings.Join(subnames, ", ")), nil + case *pb.Op_Diff: + var lowerName string + if op.Diff.Lower.Input == -1 { + lowerName = "scratch" + } else { + lowerVtx, err := load(pbOp.Inputs[op.Diff.Lower.Input].Digest) + if err != nil { + return "", err + } + lowerName = fmt.Sprintf("(%s)", lowerVtx.Name()) + } + var upperName string + if op.Diff.Upper.Input == -1 { + upperName = "scratch" + } else { + upperVtx, err := load(pbOp.Inputs[op.Diff.Upper.Input].Digest) + if err != nil { + return "", err + } + upperName = fmt.Sprintf("(%s)", upperVtx.Name()) + } + return "diff " + lowerName + " -> " + upperName, nil default: - return "unknown" + return "unknown", nil } } @@ -309,6 +346,14 @@ func ValidateOp(op *pb.Op) error { if op.Build == nil { return errors.Errorf("invalid nil build op") } + case *pb.Op_Merge: + if op.Merge == nil { + return errors.Errorf("invalid nil merge op") + } + case *pb.Op_Diff: + if op.Diff == nil { + return errors.Errorf("invalid nil diff op") + } } return nil } diff --git a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go index 6754d48908..fc50d82ad4 100644 --- a/vendor/github.com/moby/buildkit/solver/memorycachestorage.go +++ b/vendor/github.com/moby/buildkit/solver/memorycachestorage.go @@ -6,6 +6,7 @@ import ( "time" "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/compression" "github.com/pkg/errors" ) @@ -298,7 +299,7 @@ func (s *inMemoryResultStore) Load(ctx context.Context, res CacheResult) (Result return v.(Result), nil } -func (s *inMemoryResultStore) LoadRemote(_ context.Context, _ CacheResult, _ session.Group) (*Remote, error) { +func (s *inMemoryResultStore) LoadRemotes(_ context.Context, _ CacheResult, _ *compression.Config, _ session.Group) ([]*Remote, error) { return nil, nil } diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go index 0bf9603eb4..aa08a0e828 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/attr.go +++ b/vendor/github.com/moby/buildkit/solver/pb/attr.go @@ -12,6 +12,7 @@ const AttrIncludePatterns = "local.includepattern" const AttrFollowPaths = "local.followpaths" const AttrExcludePatterns = "local.excludepatterns" const AttrSharedKeyHint = "local.sharedkeyhint" + const AttrLLBDefinitionFilename = "llbbuild.filename" const AttrHTTPChecksum = "http.checksum" @@ -26,4 +27,8 @@ const AttrImageResolveModeForcePull = "pull" const AttrImageResolveModePreferLocal = "local" const AttrImageRecordType = "image.recordtype" +const AttrLocalDiffer = "local.differ" +const AttrLocalDifferNone = "none" +const AttrLocalDifferMetadata = "metadata" + type IsFileAction = isFileAction_Action diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go index aa9e39f9b7..24b2789348 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/caps.go +++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go @@ -18,6 +18,7 @@ const ( CapSourceLocalFollowPaths apicaps.CapID = "source.local.followpaths" CapSourceLocalExcludePatterns apicaps.CapID = "source.local.excludepatterns" CapSourceLocalSharedKeyHint apicaps.CapID = "source.local.sharedkeyhint" + CapSourceLocalDiffer apicaps.CapID = "source.local.differ" CapSourceGit apicaps.CapID = "source.git" CapSourceGitKeepDir apicaps.CapID = "source.git.keepgitdir" @@ -34,26 +35,30 @@ const ( CapBuildOpLLBFileName apicaps.CapID = "source.buildop.llbfilename" - CapExecMetaBase apicaps.CapID = "exec.meta.base" - CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" - CapExecMetaNetwork apicaps.CapID = "exec.meta.network" - CapExecMetaSecurity apicaps.CapID = "exec.meta.security" - CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" - CapExecMountBind apicaps.CapID = "exec.mount.bind" - CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" - CapExecMountCache apicaps.CapID = "exec.mount.cache" - CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" - CapExecMountSelector apicaps.CapID = "exec.mount.selector" - CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs" - CapExecMountSecret apicaps.CapID = "exec.mount.secret" - CapExecMountSSH apicaps.CapID = "exec.mount.ssh" - CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" - + CapExecMetaBase apicaps.CapID = "exec.meta.base" + CapExecMetaCgroupParent apicaps.CapID = "exec.meta.cgroup.parent" + CapExecMetaNetwork apicaps.CapID = "exec.meta.network" + CapExecMetaProxy apicaps.CapID = "exec.meta.proxyenv" + CapExecMetaSecurity apicaps.CapID = "exec.meta.security" CapExecMetaSecurityDeviceWhitelistV1 apicaps.CapID = "exec.meta.security.devices.v1" + CapExecMetaSetsDefaultPath apicaps.CapID = "exec.meta.setsdefaultpath" + CapExecMetaUlimit apicaps.CapID = "exec.meta.ulimit" + CapExecMountBind apicaps.CapID = "exec.mount.bind" + CapExecMountBindReadWriteNoOuput apicaps.CapID = "exec.mount.bind.readwrite-nooutput" + CapExecMountCache apicaps.CapID = "exec.mount.cache" + CapExecMountCacheSharing apicaps.CapID = "exec.mount.cache.sharing" + CapExecMountSelector apicaps.CapID = "exec.mount.selector" + CapExecMountTmpfs apicaps.CapID = "exec.mount.tmpfs" + CapExecMountTmpfsSize apicaps.CapID = "exec.mount.tmpfs.size" + CapExecMountSecret apicaps.CapID = "exec.mount.secret" + CapExecMountSSH apicaps.CapID = "exec.mount.ssh" + CapExecCgroupsMounted apicaps.CapID = "exec.cgroup" + CapExecSecretEnv apicaps.CapID = "exec.secretenv" CapFileBase apicaps.CapID = "file.base" CapFileRmWildcard apicaps.CapID = "file.rm.wildcard" CapFileCopyIncludeExcludePatterns apicaps.CapID = "file.copy.includeexcludepatterns" + CapFileRmNoFollowSymlink apicaps.CapID = "file.rm.nofollowsymlink" CapConstraints apicaps.CapID = "constraints" CapPlatform apicaps.CapID = "platform" @@ -61,10 +66,14 @@ const ( CapMetaIgnoreCache apicaps.CapID = "meta.ignorecache" CapMetaDescription apicaps.CapID = "meta.description" CapMetaExportCache apicaps.CapID = "meta.exportcache" + + CapRemoteCacheGHA apicaps.CapID = "cache.gha" + + CapMergeOp apicaps.CapID = "mergeop" + CapDiffOp apicaps.CapID = "diffop" ) func init() { - Caps.Init(apicaps.Cap{ ID: CapSourceImage, Enabled: true, @@ -118,6 +127,13 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapSourceLocalDiffer, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapSourceGit, Enabled: true, @@ -196,6 +212,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapExecMetaCgroupParent, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapExecMetaProxy, Enabled: true, @@ -226,6 +248,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapExecMetaUlimit, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapExecMountBind, Enabled: true, @@ -262,6 +290,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapExecMountTmpfsSize, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapExecMountSecret, Enabled: true, @@ -280,6 +314,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapExecSecretEnv, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapFileBase, Enabled: true, @@ -296,6 +336,12 @@ func init() { Status: apicaps.CapStatusExperimental, }) + Caps.Init(apicaps.Cap{ + ID: CapFileRmNoFollowSymlink, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ ID: CapFileCopyIncludeExcludePatterns, Enabled: true, @@ -331,4 +377,20 @@ func init() { Enabled: true, Status: apicaps.CapStatusExperimental, }) + + Caps.Init(apicaps.Cap{ + ID: CapRemoteCacheGHA, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ + ID: CapMergeOp, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) + Caps.Init(apicaps.Cap{ + ID: CapDiffOp, + Enabled: true, + Status: apicaps.CapStatusExperimental, + }) } diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go index 77feeac781..252227a944 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.pb.go @@ -158,6 +158,8 @@ type Op struct { // *Op_Source // *Op_File // *Op_Build + // *Op_Merge + // *Op_Diff Op isOp_Op `protobuf_oneof:"op"` Platform *Platform `protobuf:"bytes,10,opt,name=platform,proto3" json:"platform,omitempty"` Constraints *WorkerConstraints `protobuf:"bytes,11,opt,name=constraints,proto3" json:"constraints,omitempty"` @@ -210,11 +212,19 @@ type Op_File struct { type Op_Build struct { Build *BuildOp `protobuf:"bytes,5,opt,name=build,proto3,oneof" json:"build,omitempty"` } +type Op_Merge struct { + Merge *MergeOp `protobuf:"bytes,6,opt,name=merge,proto3,oneof" json:"merge,omitempty"` +} +type Op_Diff struct { + Diff *DiffOp `protobuf:"bytes,7,opt,name=diff,proto3,oneof" json:"diff,omitempty"` +} func (*Op_Exec) isOp_Op() {} func (*Op_Source) isOp_Op() {} func (*Op_File) isOp_Op() {} func (*Op_Build) isOp_Op() {} +func (*Op_Merge) isOp_Op() {} +func (*Op_Diff) isOp_Op() {} func (m *Op) GetOp() isOp_Op { if m != nil { @@ -258,6 +268,20 @@ func (m *Op) GetBuild() *BuildOp { return nil } +func (m *Op) GetMerge() *MergeOp { + if x, ok := m.GetOp().(*Op_Merge); ok { + return x.Merge + } + return nil +} + +func (m *Op) GetDiff() *DiffOp { + if x, ok := m.GetOp().(*Op_Diff); ok { + return x.Diff + } + return nil +} + func (m *Op) GetPlatform() *Platform { if m != nil { return m.Platform @@ -279,6 +303,8 @@ func (*Op) XXX_OneofWrappers() []interface{} { (*Op_Source)(nil), (*Op_File)(nil), (*Op_Build)(nil), + (*Op_Merge)(nil), + (*Op_Diff)(nil), } } @@ -394,10 +420,11 @@ var xxx_messageInfo_Input proto.InternalMessageInfo // ExecOp executes a command in a container. type ExecOp struct { - Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` - Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` - Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` - Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` + Meta *Meta `protobuf:"bytes,1,opt,name=meta,proto3" json:"meta,omitempty"` + Mounts []*Mount `protobuf:"bytes,2,rep,name=mounts,proto3" json:"mounts,omitempty"` + Network NetMode `protobuf:"varint,3,opt,name=network,proto3,enum=pb.NetMode" json:"network,omitempty"` + Security SecurityMode `protobuf:"varint,4,opt,name=security,proto3,enum=pb.SecurityMode" json:"security,omitempty"` + Secretenv []*SecretEnv `protobuf:"bytes,5,rep,name=secretenv,proto3" json:"secretenv,omitempty"` } func (m *ExecOp) Reset() { *m = ExecOp{} } @@ -457,17 +484,26 @@ func (m *ExecOp) GetSecurity() SecurityMode { return SecurityMode_SANDBOX } +func (m *ExecOp) GetSecretenv() []*SecretEnv { + if m != nil { + return m.Secretenv + } + return nil +} + // Meta is a set of arguments for ExecOp. // Meta is unrelated to LLB metadata. // FIXME: rename (ExecContext? ExecArgs?) type Meta struct { - Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` - Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` - Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` - User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` - ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` - ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` - Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` + Args []string `protobuf:"bytes,1,rep,name=args,proto3" json:"args,omitempty"` + Env []string `protobuf:"bytes,2,rep,name=env,proto3" json:"env,omitempty"` + Cwd string `protobuf:"bytes,3,opt,name=cwd,proto3" json:"cwd,omitempty"` + User string `protobuf:"bytes,4,opt,name=user,proto3" json:"user,omitempty"` + ProxyEnv *ProxyEnv `protobuf:"bytes,5,opt,name=proxy_env,json=proxyEnv,proto3" json:"proxy_env,omitempty"` + ExtraHosts []*HostIP `protobuf:"bytes,6,rep,name=extraHosts,proto3" json:"extraHosts,omitempty"` + Hostname string `protobuf:"bytes,7,opt,name=hostname,proto3" json:"hostname,omitempty"` + Ulimit []*Ulimit `protobuf:"bytes,9,rep,name=ulimit,proto3" json:"ulimit,omitempty"` + CgroupParent string `protobuf:"bytes,10,opt,name=cgroupParent,proto3" json:"cgroupParent,omitempty"` } func (m *Meta) Reset() { *m = Meta{} } @@ -548,6 +584,181 @@ func (m *Meta) GetHostname() string { return "" } +func (m *Meta) GetUlimit() []*Ulimit { + if m != nil { + return m.Ulimit + } + return nil +} + +func (m *Meta) GetCgroupParent() string { + if m != nil { + return m.CgroupParent + } + return "" +} + +type HostIP struct { + Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` + IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` +} + +func (m *HostIP) Reset() { *m = HostIP{} } +func (m *HostIP) String() string { return proto.CompactTextString(m) } +func (*HostIP) ProtoMessage() {} +func (*HostIP) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{5} +} +func (m *HostIP) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *HostIP) XXX_Merge(src proto.Message) { + xxx_messageInfo_HostIP.Merge(m, src) +} +func (m *HostIP) XXX_Size() int { + return m.Size() +} +func (m *HostIP) XXX_DiscardUnknown() { + xxx_messageInfo_HostIP.DiscardUnknown(m) +} + +var xxx_messageInfo_HostIP proto.InternalMessageInfo + +func (m *HostIP) GetHost() string { + if m != nil { + return m.Host + } + return "" +} + +func (m *HostIP) GetIP() string { + if m != nil { + return m.IP + } + return "" +} + +type Ulimit struct { + Name string `protobuf:"bytes,1,opt,name=Name,proto3" json:"Name,omitempty"` + Soft int64 `protobuf:"varint,2,opt,name=Soft,proto3" json:"Soft,omitempty"` + Hard int64 `protobuf:"varint,3,opt,name=Hard,proto3" json:"Hard,omitempty"` +} + +func (m *Ulimit) Reset() { *m = Ulimit{} } +func (m *Ulimit) String() string { return proto.CompactTextString(m) } +func (*Ulimit) ProtoMessage() {} +func (*Ulimit) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{6} +} +func (m *Ulimit) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *Ulimit) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *Ulimit) XXX_Merge(src proto.Message) { + xxx_messageInfo_Ulimit.Merge(m, src) +} +func (m *Ulimit) XXX_Size() int { + return m.Size() +} +func (m *Ulimit) XXX_DiscardUnknown() { + xxx_messageInfo_Ulimit.DiscardUnknown(m) +} + +var xxx_messageInfo_Ulimit proto.InternalMessageInfo + +func (m *Ulimit) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Ulimit) GetSoft() int64 { + if m != nil { + return m.Soft + } + return 0 +} + +func (m *Ulimit) GetHard() int64 { + if m != nil { + return m.Hard + } + return 0 +} + +// SecretEnv is an environment variable that is backed by a secret. +type SecretEnv struct { + ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Optional bool `protobuf:"varint,3,opt,name=optional,proto3" json:"optional,omitempty"` +} + +func (m *SecretEnv) Reset() { *m = SecretEnv{} } +func (m *SecretEnv) String() string { return proto.CompactTextString(m) } +func (*SecretEnv) ProtoMessage() {} +func (*SecretEnv) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{7} +} +func (m *SecretEnv) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *SecretEnv) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *SecretEnv) XXX_Merge(src proto.Message) { + xxx_messageInfo_SecretEnv.Merge(m, src) +} +func (m *SecretEnv) XXX_Size() int { + return m.Size() +} +func (m *SecretEnv) XXX_DiscardUnknown() { + xxx_messageInfo_SecretEnv.DiscardUnknown(m) +} + +var xxx_messageInfo_SecretEnv proto.InternalMessageInfo + +func (m *SecretEnv) GetID() string { + if m != nil { + return m.ID + } + return "" +} + +func (m *SecretEnv) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *SecretEnv) GetOptional() bool { + if m != nil { + return m.Optional + } + return false +} + // Mount specifies how to mount an input Op as a filesystem. type Mount struct { Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` @@ -556,6 +767,7 @@ type Mount struct { Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"` Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"` MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"` + TmpfsOpt *TmpfsOpt `protobuf:"bytes,19,opt,name=TmpfsOpt,proto3" json:"TmpfsOpt,omitempty"` CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"` SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"` SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"` @@ -566,7 +778,7 @@ func (m *Mount) Reset() { *m = Mount{} } func (m *Mount) String() string { return proto.CompactTextString(m) } func (*Mount) ProtoMessage() {} func (*Mount) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{5} + return fileDescriptor_8de16154b2733812, []int{8} } func (m *Mount) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -619,6 +831,13 @@ func (m *Mount) GetMountType() MountType { return MountType_BIND } +func (m *Mount) GetTmpfsOpt() *TmpfsOpt { + if m != nil { + return m.TmpfsOpt + } + return nil +} + func (m *Mount) GetCacheOpt() *CacheOpt { if m != nil { return m.CacheOpt @@ -647,6 +866,48 @@ func (m *Mount) GetResultID() string { return "" } +// TmpfsOpt defines options describing tpmfs mounts +type TmpfsOpt struct { + // Specify an upper limit on the size of the filesystem. + Size_ int64 `protobuf:"varint,1,opt,name=size,proto3" json:"size,omitempty"` +} + +func (m *TmpfsOpt) Reset() { *m = TmpfsOpt{} } +func (m *TmpfsOpt) String() string { return proto.CompactTextString(m) } +func (*TmpfsOpt) ProtoMessage() {} +func (*TmpfsOpt) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{9} +} +func (m *TmpfsOpt) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *TmpfsOpt) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *TmpfsOpt) XXX_Merge(src proto.Message) { + xxx_messageInfo_TmpfsOpt.Merge(m, src) +} +func (m *TmpfsOpt) XXX_Size() int { + return m.Size() +} +func (m *TmpfsOpt) XXX_DiscardUnknown() { + xxx_messageInfo_TmpfsOpt.DiscardUnknown(m) +} + +var xxx_messageInfo_TmpfsOpt proto.InternalMessageInfo + +func (m *TmpfsOpt) GetSize_() int64 { + if m != nil { + return m.Size_ + } + return 0 +} + // CacheOpt defines options specific to cache mounts type CacheOpt struct { // ID is an optional namespace for the mount @@ -659,7 +920,7 @@ func (m *CacheOpt) Reset() { *m = CacheOpt{} } func (m *CacheOpt) String() string { return proto.CompactTextString(m) } func (*CacheOpt) ProtoMessage() {} func (*CacheOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{6} + return fileDescriptor_8de16154b2733812, []int{10} } func (m *CacheOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -717,7 +978,7 @@ func (m *SecretOpt) Reset() { *m = SecretOpt{} } func (m *SecretOpt) String() string { return proto.CompactTextString(m) } func (*SecretOpt) ProtoMessage() {} func (*SecretOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{7} + return fileDescriptor_8de16154b2733812, []int{11} } func (m *SecretOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -796,7 +1057,7 @@ func (m *SSHOpt) Reset() { *m = SSHOpt{} } func (m *SSHOpt) String() string { return proto.CompactTextString(m) } func (*SSHOpt) ProtoMessage() {} func (*SSHOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{8} + return fileDescriptor_8de16154b2733812, []int{12} } func (m *SSHOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -869,7 +1130,7 @@ func (m *SourceOp) Reset() { *m = SourceOp{} } func (m *SourceOp) String() string { return proto.CompactTextString(m) } func (*SourceOp) ProtoMessage() {} func (*SourceOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{9} + return fileDescriptor_8de16154b2733812, []int{13} } func (m *SourceOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -921,7 +1182,7 @@ func (m *BuildOp) Reset() { *m = BuildOp{} } func (m *BuildOp) String() string { return proto.CompactTextString(m) } func (*BuildOp) ProtoMessage() {} func (*BuildOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{10} + return fileDescriptor_8de16154b2733812, []int{14} } func (m *BuildOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -976,7 +1237,7 @@ func (m *BuildInput) Reset() { *m = BuildInput{} } func (m *BuildInput) String() string { return proto.CompactTextString(m) } func (*BuildInput) ProtoMessage() {} func (*BuildInput) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{11} + return fileDescriptor_8de16154b2733812, []int{15} } func (m *BuildInput) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1009,15 +1270,16 @@ type OpMetadata struct { Description map[string]string `protobuf:"bytes,2,rep,name=description,proto3" json:"description,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` // index 3 reserved for WorkerConstraint in previous versions // WorkerConstraint worker_constraint = 3; - ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache,proto3" json:"export_cache,omitempty"` - Caps map[github_com_moby_buildkit_util_apicaps.CapID]bool `protobuf:"bytes,5,rep,name=caps,proto3,castkey=github.com/moby/buildkit/util/apicaps.CapID" json:"caps" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + ExportCache *ExportCache `protobuf:"bytes,4,opt,name=export_cache,json=exportCache,proto3" json:"export_cache,omitempty"` + Caps map[github_com_moby_buildkit_util_apicaps.CapID]bool `protobuf:"bytes,5,rep,name=caps,proto3,castkey=github.com/moby/buildkit/util/apicaps.CapID" json:"caps" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"varint,2,opt,name=value,proto3"` + ProgressGroup *ProgressGroup `protobuf:"bytes,6,opt,name=progress_group,json=progressGroup,proto3" json:"progress_group,omitempty"` } func (m *OpMetadata) Reset() { *m = OpMetadata{} } func (m *OpMetadata) String() string { return proto.CompactTextString(m) } func (*OpMetadata) ProtoMessage() {} func (*OpMetadata) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{12} + return fileDescriptor_8de16154b2733812, []int{16} } func (m *OpMetadata) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1070,6 +1332,13 @@ func (m *OpMetadata) GetCaps() map[github_com_moby_buildkit_util_apicaps.CapID]b return nil } +func (m *OpMetadata) GetProgressGroup() *ProgressGroup { + if m != nil { + return m.ProgressGroup + } + return nil +} + // Source is a source mapping description for a file type Source struct { Locations map[string]*Locations `protobuf:"bytes,1,rep,name=locations,proto3" json:"locations,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` @@ -1080,7 +1349,7 @@ func (m *Source) Reset() { *m = Source{} } func (m *Source) String() string { return proto.CompactTextString(m) } func (*Source) ProtoMessage() {} func (*Source) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{13} + return fileDescriptor_8de16154b2733812, []int{17} } func (m *Source) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1128,7 +1397,7 @@ func (m *Locations) Reset() { *m = Locations{} } func (m *Locations) String() string { return proto.CompactTextString(m) } func (*Locations) ProtoMessage() {} func (*Locations) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{14} + return fileDescriptor_8de16154b2733812, []int{18} } func (m *Locations) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1171,7 +1440,7 @@ func (m *SourceInfo) Reset() { *m = SourceInfo{} } func (m *SourceInfo) String() string { return proto.CompactTextString(m) } func (*SourceInfo) ProtoMessage() {} func (*SourceInfo) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{15} + return fileDescriptor_8de16154b2733812, []int{19} } func (m *SourceInfo) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1227,7 +1496,7 @@ func (m *Location) Reset() { *m = Location{} } func (m *Location) String() string { return proto.CompactTextString(m) } func (*Location) ProtoMessage() {} func (*Location) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{16} + return fileDescriptor_8de16154b2733812, []int{20} } func (m *Location) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1276,7 +1545,7 @@ func (m *Range) Reset() { *m = Range{} } func (m *Range) String() string { return proto.CompactTextString(m) } func (*Range) ProtoMessage() {} func (*Range) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{17} + return fileDescriptor_8de16154b2733812, []int{21} } func (m *Range) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1325,7 +1594,7 @@ func (m *Position) Reset() { *m = Position{} } func (m *Position) String() string { return proto.CompactTextString(m) } func (*Position) ProtoMessage() {} func (*Position) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{18} + return fileDescriptor_8de16154b2733812, []int{22} } func (m *Position) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1372,7 +1641,7 @@ func (m *ExportCache) Reset() { *m = ExportCache{} } func (m *ExportCache) String() string { return proto.CompactTextString(m) } func (*ExportCache) ProtoMessage() {} func (*ExportCache) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{19} + return fileDescriptor_8de16154b2733812, []int{23} } func (m *ExportCache) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1404,6 +1673,62 @@ func (m *ExportCache) GetValue() bool { return false } +type ProgressGroup struct { + Id string `protobuf:"bytes,1,opt,name=id,proto3" json:"id,omitempty"` + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + Weak bool `protobuf:"varint,3,opt,name=weak,proto3" json:"weak,omitempty"` +} + +func (m *ProgressGroup) Reset() { *m = ProgressGroup{} } +func (m *ProgressGroup) String() string { return proto.CompactTextString(m) } +func (*ProgressGroup) ProtoMessage() {} +func (*ProgressGroup) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{24} +} +func (m *ProgressGroup) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *ProgressGroup) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *ProgressGroup) XXX_Merge(src proto.Message) { + xxx_messageInfo_ProgressGroup.Merge(m, src) +} +func (m *ProgressGroup) XXX_Size() int { + return m.Size() +} +func (m *ProgressGroup) XXX_DiscardUnknown() { + xxx_messageInfo_ProgressGroup.DiscardUnknown(m) +} + +var xxx_messageInfo_ProgressGroup proto.InternalMessageInfo + +func (m *ProgressGroup) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *ProgressGroup) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ProgressGroup) GetWeak() bool { + if m != nil { + return m.Weak + } + return false +} + type ProxyEnv struct { HttpProxy string `protobuf:"bytes,1,opt,name=http_proxy,json=httpProxy,proto3" json:"http_proxy,omitempty"` HttpsProxy string `protobuf:"bytes,2,opt,name=https_proxy,json=httpsProxy,proto3" json:"https_proxy,omitempty"` @@ -1416,7 +1741,7 @@ func (m *ProxyEnv) Reset() { *m = ProxyEnv{} } func (m *ProxyEnv) String() string { return proto.CompactTextString(m) } func (*ProxyEnv) ProtoMessage() {} func (*ProxyEnv) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{20} + return fileDescriptor_8de16154b2733812, []int{25} } func (m *ProxyEnv) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1485,7 +1810,7 @@ func (m *WorkerConstraints) Reset() { *m = WorkerConstraints{} } func (m *WorkerConstraints) String() string { return proto.CompactTextString(m) } func (*WorkerConstraints) ProtoMessage() {} func (*WorkerConstraints) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{21} + return fileDescriptor_8de16154b2733812, []int{26} } func (m *WorkerConstraints) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1532,7 +1857,7 @@ func (m *Definition) Reset() { *m = Definition{} } func (m *Definition) String() string { return proto.CompactTextString(m) } func (*Definition) ProtoMessage() {} func (*Definition) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{22} + return fileDescriptor_8de16154b2733812, []int{27} } func (m *Definition) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1578,54 +1903,6 @@ func (m *Definition) GetSource() *Source { return nil } -type HostIP struct { - Host string `protobuf:"bytes,1,opt,name=Host,proto3" json:"Host,omitempty"` - IP string `protobuf:"bytes,2,opt,name=IP,proto3" json:"IP,omitempty"` -} - -func (m *HostIP) Reset() { *m = HostIP{} } -func (m *HostIP) String() string { return proto.CompactTextString(m) } -func (*HostIP) ProtoMessage() {} -func (*HostIP) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{23} -} -func (m *HostIP) XXX_Unmarshal(b []byte) error { - return m.Unmarshal(b) -} -func (m *HostIP) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { - b = b[:cap(b)] - n, err := m.MarshalToSizedBuffer(b) - if err != nil { - return nil, err - } - return b[:n], nil -} -func (m *HostIP) XXX_Merge(src proto.Message) { - xxx_messageInfo_HostIP.Merge(m, src) -} -func (m *HostIP) XXX_Size() int { - return m.Size() -} -func (m *HostIP) XXX_DiscardUnknown() { - xxx_messageInfo_HostIP.DiscardUnknown(m) -} - -var xxx_messageInfo_HostIP proto.InternalMessageInfo - -func (m *HostIP) GetHost() string { - if m != nil { - return m.Host - } - return "" -} - -func (m *HostIP) GetIP() string { - if m != nil { - return m.IP - } - return "" -} - type FileOp struct { Actions []*FileAction `protobuf:"bytes,2,rep,name=actions,proto3" json:"actions,omitempty"` } @@ -1634,7 +1911,7 @@ func (m *FileOp) Reset() { *m = FileOp{} } func (m *FileOp) String() string { return proto.CompactTextString(m) } func (*FileOp) ProtoMessage() {} func (*FileOp) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{24} + return fileDescriptor_8de16154b2733812, []int{28} } func (m *FileOp) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1682,7 +1959,7 @@ func (m *FileAction) Reset() { *m = FileAction{} } func (m *FileAction) String() string { return proto.CompactTextString(m) } func (*FileAction) ProtoMessage() {} func (*FileAction) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{25} + return fileDescriptor_8de16154b2733812, []int{29} } func (m *FileAction) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1809,7 +2086,7 @@ func (m *FileActionCopy) Reset() { *m = FileActionCopy{} } func (m *FileActionCopy) String() string { return proto.CompactTextString(m) } func (*FileActionCopy) ProtoMessage() {} func (*FileActionCopy) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{26} + return fileDescriptor_8de16154b2733812, []int{30} } func (m *FileActionCopy) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -1942,7 +2219,7 @@ func (m *FileActionMkFile) Reset() { *m = FileActionMkFile{} } func (m *FileActionMkFile) String() string { return proto.CompactTextString(m) } func (*FileActionMkFile) ProtoMessage() {} func (*FileActionMkFile) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{27} + return fileDescriptor_8de16154b2733812, []int{31} } func (m *FileActionMkFile) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2019,7 +2296,7 @@ func (m *FileActionMkDir) Reset() { *m = FileActionMkDir{} } func (m *FileActionMkDir) String() string { return proto.CompactTextString(m) } func (*FileActionMkDir) ProtoMessage() {} func (*FileActionMkDir) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{28} + return fileDescriptor_8de16154b2733812, []int{32} } func (m *FileActionMkDir) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2092,7 +2369,7 @@ func (m *FileActionRm) Reset() { *m = FileActionRm{} } func (m *FileActionRm) String() string { return proto.CompactTextString(m) } func (*FileActionRm) ProtoMessage() {} func (*FileActionRm) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{29} + return fileDescriptor_8de16154b2733812, []int{33} } func (m *FileActionRm) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2147,7 +2424,7 @@ func (m *ChownOpt) Reset() { *m = ChownOpt{} } func (m *ChownOpt) String() string { return proto.CompactTextString(m) } func (*ChownOpt) ProtoMessage() {} func (*ChownOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{30} + return fileDescriptor_8de16154b2733812, []int{34} } func (m *ChownOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2197,7 +2474,7 @@ func (m *UserOpt) Reset() { *m = UserOpt{} } func (m *UserOpt) String() string { return proto.CompactTextString(m) } func (*UserOpt) ProtoMessage() {} func (*UserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{31} + return fileDescriptor_8de16154b2733812, []int{35} } func (m *UserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2276,7 +2553,7 @@ func (m *NamedUserOpt) Reset() { *m = NamedUserOpt{} } func (m *NamedUserOpt) String() string { return proto.CompactTextString(m) } func (*NamedUserOpt) ProtoMessage() {} func (*NamedUserOpt) Descriptor() ([]byte, []int) { - return fileDescriptor_8de16154b2733812, []int{32} + return fileDescriptor_8de16154b2733812, []int{36} } func (m *NamedUserOpt) XXX_Unmarshal(b []byte) error { return m.Unmarshal(b) @@ -2308,6 +2585,193 @@ func (m *NamedUserOpt) GetName() string { return "" } +type MergeInput struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *MergeInput) Reset() { *m = MergeInput{} } +func (m *MergeInput) String() string { return proto.CompactTextString(m) } +func (*MergeInput) ProtoMessage() {} +func (*MergeInput) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{37} +} +func (m *MergeInput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MergeInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MergeInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_MergeInput.Merge(m, src) +} +func (m *MergeInput) XXX_Size() int { + return m.Size() +} +func (m *MergeInput) XXX_DiscardUnknown() { + xxx_messageInfo_MergeInput.DiscardUnknown(m) +} + +var xxx_messageInfo_MergeInput proto.InternalMessageInfo + +type MergeOp struct { + Inputs []*MergeInput `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"` +} + +func (m *MergeOp) Reset() { *m = MergeOp{} } +func (m *MergeOp) String() string { return proto.CompactTextString(m) } +func (*MergeOp) ProtoMessage() {} +func (*MergeOp) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{38} +} +func (m *MergeOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *MergeOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *MergeOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_MergeOp.Merge(m, src) +} +func (m *MergeOp) XXX_Size() int { + return m.Size() +} +func (m *MergeOp) XXX_DiscardUnknown() { + xxx_messageInfo_MergeOp.DiscardUnknown(m) +} + +var xxx_messageInfo_MergeOp proto.InternalMessageInfo + +func (m *MergeOp) GetInputs() []*MergeInput { + if m != nil { + return m.Inputs + } + return nil +} + +type LowerDiffInput struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *LowerDiffInput) Reset() { *m = LowerDiffInput{} } +func (m *LowerDiffInput) String() string { return proto.CompactTextString(m) } +func (*LowerDiffInput) ProtoMessage() {} +func (*LowerDiffInput) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{39} +} +func (m *LowerDiffInput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *LowerDiffInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *LowerDiffInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_LowerDiffInput.Merge(m, src) +} +func (m *LowerDiffInput) XXX_Size() int { + return m.Size() +} +func (m *LowerDiffInput) XXX_DiscardUnknown() { + xxx_messageInfo_LowerDiffInput.DiscardUnknown(m) +} + +var xxx_messageInfo_LowerDiffInput proto.InternalMessageInfo + +type UpperDiffInput struct { + Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"` +} + +func (m *UpperDiffInput) Reset() { *m = UpperDiffInput{} } +func (m *UpperDiffInput) String() string { return proto.CompactTextString(m) } +func (*UpperDiffInput) ProtoMessage() {} +func (*UpperDiffInput) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{40} +} +func (m *UpperDiffInput) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *UpperDiffInput) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *UpperDiffInput) XXX_Merge(src proto.Message) { + xxx_messageInfo_UpperDiffInput.Merge(m, src) +} +func (m *UpperDiffInput) XXX_Size() int { + return m.Size() +} +func (m *UpperDiffInput) XXX_DiscardUnknown() { + xxx_messageInfo_UpperDiffInput.DiscardUnknown(m) +} + +var xxx_messageInfo_UpperDiffInput proto.InternalMessageInfo + +type DiffOp struct { + Lower *LowerDiffInput `protobuf:"bytes,1,opt,name=lower,proto3" json:"lower,omitempty"` + Upper *UpperDiffInput `protobuf:"bytes,2,opt,name=upper,proto3" json:"upper,omitempty"` +} + +func (m *DiffOp) Reset() { *m = DiffOp{} } +func (m *DiffOp) String() string { return proto.CompactTextString(m) } +func (*DiffOp) ProtoMessage() {} +func (*DiffOp) Descriptor() ([]byte, []int) { + return fileDescriptor_8de16154b2733812, []int{41} +} +func (m *DiffOp) XXX_Unmarshal(b []byte) error { + return m.Unmarshal(b) +} +func (m *DiffOp) XXX_Marshal(b []byte, deterministic bool) ([]byte, error) { + b = b[:cap(b)] + n, err := m.MarshalToSizedBuffer(b) + if err != nil { + return nil, err + } + return b[:n], nil +} +func (m *DiffOp) XXX_Merge(src proto.Message) { + xxx_messageInfo_DiffOp.Merge(m, src) +} +func (m *DiffOp) XXX_Size() int { + return m.Size() +} +func (m *DiffOp) XXX_DiscardUnknown() { + xxx_messageInfo_DiffOp.DiscardUnknown(m) +} + +var xxx_messageInfo_DiffOp proto.InternalMessageInfo + +func (m *DiffOp) GetLower() *LowerDiffInput { + if m != nil { + return m.Lower + } + return nil +} + +func (m *DiffOp) GetUpper() *UpperDiffInput { + if m != nil { + return m.Upper + } + return nil +} + func init() { proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value) proto.RegisterEnum("pb.SecurityMode", SecurityMode_name, SecurityMode_value) @@ -2318,7 +2782,11 @@ func init() { proto.RegisterType((*Input)(nil), "pb.Input") proto.RegisterType((*ExecOp)(nil), "pb.ExecOp") proto.RegisterType((*Meta)(nil), "pb.Meta") + proto.RegisterType((*HostIP)(nil), "pb.HostIP") + proto.RegisterType((*Ulimit)(nil), "pb.Ulimit") + proto.RegisterType((*SecretEnv)(nil), "pb.SecretEnv") proto.RegisterType((*Mount)(nil), "pb.Mount") + proto.RegisterType((*TmpfsOpt)(nil), "pb.TmpfsOpt") proto.RegisterType((*CacheOpt)(nil), "pb.CacheOpt") proto.RegisterType((*SecretOpt)(nil), "pb.SecretOpt") proto.RegisterType((*SSHOpt)(nil), "pb.SSHOpt") @@ -2339,11 +2807,11 @@ func init() { proto.RegisterType((*Range)(nil), "pb.Range") proto.RegisterType((*Position)(nil), "pb.Position") proto.RegisterType((*ExportCache)(nil), "pb.ExportCache") + proto.RegisterType((*ProgressGroup)(nil), "pb.ProgressGroup") proto.RegisterType((*ProxyEnv)(nil), "pb.ProxyEnv") proto.RegisterType((*WorkerConstraints)(nil), "pb.WorkerConstraints") proto.RegisterType((*Definition)(nil), "pb.Definition") proto.RegisterMapType((map[github_com_opencontainers_go_digest.Digest]OpMetadata)(nil), "pb.Definition.MetadataEntry") - proto.RegisterType((*HostIP)(nil), "pb.HostIP") proto.RegisterType((*FileOp)(nil), "pb.FileOp") proto.RegisterType((*FileAction)(nil), "pb.FileAction") proto.RegisterType((*FileActionCopy)(nil), "pb.FileActionCopy") @@ -2353,154 +2821,176 @@ func init() { proto.RegisterType((*ChownOpt)(nil), "pb.ChownOpt") proto.RegisterType((*UserOpt)(nil), "pb.UserOpt") proto.RegisterType((*NamedUserOpt)(nil), "pb.NamedUserOpt") + proto.RegisterType((*MergeInput)(nil), "pb.MergeInput") + proto.RegisterType((*MergeOp)(nil), "pb.MergeOp") + proto.RegisterType((*LowerDiffInput)(nil), "pb.LowerDiffInput") + proto.RegisterType((*UpperDiffInput)(nil), "pb.UpperDiffInput") + proto.RegisterType((*DiffOp)(nil), "pb.DiffOp") } func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) } var fileDescriptor_8de16154b2733812 = []byte{ - // 2267 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x6e, 0x1c, 0xc7, - 0xf1, 0xe7, 0x7e, 0xef, 0xd6, 0x2e, 0xa9, 0xfd, 0xb7, 0x65, 0x7b, 0xcd, 0xbf, 0x42, 0xd2, 0x63, - 0xc7, 0xa0, 0x28, 0x69, 0x09, 0xd0, 0x80, 0x65, 0x18, 0x41, 0x10, 0xee, 0x87, 0xc0, 0xb5, 0x25, - 0x2e, 0xd1, 0x2b, 0xc9, 0xb9, 0x09, 0xc3, 0xd9, 0x26, 0x39, 0xe0, 0xec, 0xf4, 0xa0, 0xa7, 0x57, - 0xe2, 0x5e, 0x72, 0xf0, 0x13, 0x18, 0x08, 0x90, 0x5b, 0x12, 0xe4, 0x1d, 0x72, 0xcd, 0x31, 0x80, - 0x8f, 0x3e, 0xe4, 0x60, 0xe4, 0xe0, 0x04, 0xd2, 0x3d, 0x4f, 0x90, 0x00, 0x41, 0x55, 0xf7, 0x7c, - 0x2c, 0x45, 0x41, 0x12, 0x12, 0xe4, 0x34, 0xdd, 0x55, 0xbf, 0xaa, 0xae, 0xae, 0xaa, 0xae, 0xae, - 0x1e, 0x68, 0xc8, 0x28, 0xee, 0x46, 0x4a, 0x6a, 0xc9, 0x8a, 0xd1, 0xf1, 0xfa, 0x9d, 0x53, 0x5f, - 0x9f, 0xcd, 0x8f, 0xbb, 0x9e, 0x9c, 0xed, 0x9e, 0xca, 0x53, 0xb9, 0x4b, 0xac, 0xe3, 0xf9, 0x09, - 0xcd, 0x68, 0x42, 0x23, 0x23, 0xe2, 0xfc, 0xa1, 0x08, 0xc5, 0x71, 0xc4, 0x3e, 0x84, 0xaa, 0x1f, - 0x46, 0x73, 0x1d, 0x77, 0x0a, 0x5b, 0xa5, 0xed, 0xe6, 0x5e, 0xa3, 0x1b, 0x1d, 0x77, 0x47, 0x48, - 0xe1, 0x96, 0xc1, 0xb6, 0xa0, 0x2c, 0x2e, 0x84, 0xd7, 0x29, 0x6e, 0x15, 0xb6, 0x9b, 0x7b, 0x80, - 0x80, 0xe1, 0x85, 0xf0, 0xc6, 0xd1, 0xc1, 0x0a, 0x27, 0x0e, 0xfb, 0x04, 0xaa, 0xb1, 0x9c, 0x2b, - 0x4f, 0x74, 0x4a, 0x84, 0x69, 0x21, 0x66, 0x42, 0x14, 0x42, 0x59, 0x2e, 0x6a, 0x3a, 0xf1, 0x03, - 0xd1, 0x29, 0x67, 0x9a, 0xee, 0xf9, 0x81, 0xc1, 0x10, 0x87, 0x7d, 0x04, 0x95, 0xe3, 0xb9, 0x1f, - 0x4c, 0x3b, 0x15, 0x82, 0x34, 0x11, 0xd2, 0x43, 0x02, 0x61, 0x0c, 0x8f, 0x6d, 0x43, 0x3d, 0x0a, - 0x5c, 0x7d, 0x22, 0xd5, 0xac, 0x03, 0xd9, 0x82, 0x47, 0x96, 0xc6, 0x53, 0x2e, 0xbb, 0x0b, 0x4d, - 0x4f, 0x86, 0xb1, 0x56, 0xae, 0x1f, 0xea, 0xb8, 0xd3, 0x24, 0xf0, 0xbb, 0x08, 0xfe, 0x5a, 0xaa, - 0x73, 0xa1, 0xfa, 0x19, 0x93, 0xe7, 0x91, 0xbd, 0x32, 0x14, 0x65, 0xe4, 0xfc, 0xa6, 0x00, 0xf5, - 0x44, 0x2b, 0x73, 0xa0, 0xb5, 0xaf, 0xbc, 0x33, 0x5f, 0x0b, 0x4f, 0xcf, 0x95, 0xe8, 0x14, 0xb6, - 0x0a, 0xdb, 0x0d, 0xbe, 0x44, 0x63, 0x6b, 0x50, 0x1c, 0x4f, 0xc8, 0x51, 0x0d, 0x5e, 0x1c, 0x4f, - 0x58, 0x07, 0x6a, 0x8f, 0x5d, 0xe5, 0xbb, 0xa1, 0x26, 0xcf, 0x34, 0x78, 0x32, 0x65, 0x37, 0xa0, - 0x31, 0x9e, 0x3c, 0x16, 0x2a, 0xf6, 0x65, 0x48, 0xfe, 0x68, 0xf0, 0x8c, 0xc0, 0x36, 0x00, 0xc6, - 0x93, 0x7b, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x55, 0xda, 0x6e, 0xf0, 0x1c, 0xc5, 0xf9, 0x15, - 0x54, 0x28, 0x46, 0xec, 0x4b, 0xa8, 0x4e, 0xfd, 0x53, 0x11, 0x6b, 0x63, 0x4e, 0x6f, 0xef, 0xbb, - 0x1f, 0x37, 0x57, 0xfe, 0xfa, 0xe3, 0xe6, 0x4e, 0x2e, 0x19, 0x64, 0x24, 0x42, 0x4f, 0x86, 0xda, - 0xf5, 0x43, 0xa1, 0xe2, 0xdd, 0x53, 0x79, 0xc7, 0x88, 0x74, 0x07, 0xf4, 0xe1, 0x56, 0x03, 0xbb, - 0x09, 0x15, 0x3f, 0x9c, 0x8a, 0x0b, 0xb2, 0xbf, 0xd4, 0x7b, 0xc7, 0xaa, 0x6a, 0x8e, 0xe7, 0x3a, - 0x9a, 0xeb, 0x11, 0xb2, 0xb8, 0x41, 0x38, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x00, 0xbb, 0x01, 0xe5, - 0x99, 0xd0, 0x2e, 0xad, 0xdf, 0xdc, 0xab, 0xa3, 0x6f, 0x1f, 0x08, 0xed, 0x72, 0xa2, 0x62, 0x7a, - 0xcd, 0xe4, 0x1c, 0x7d, 0x5f, 0xcc, 0xd2, 0xeb, 0x01, 0x52, 0xb8, 0x65, 0xb0, 0x9f, 0x42, 0x2d, - 0x14, 0xfa, 0x99, 0x54, 0xe7, 0xe4, 0xa3, 0x35, 0x13, 0xf4, 0x43, 0xa1, 0x1f, 0xc8, 0xa9, 0xe0, - 0x09, 0x8f, 0xdd, 0x86, 0x7a, 0x2c, 0xbc, 0xb9, 0xf2, 0xf5, 0x82, 0xfc, 0xb5, 0xb6, 0xd7, 0xa6, - 0x2c, 0xb3, 0x34, 0x02, 0xa7, 0x08, 0xe7, 0xcf, 0x05, 0x28, 0xa3, 0x19, 0x8c, 0x41, 0xd9, 0x55, - 0xa7, 0x26, 0xbb, 0x1b, 0x9c, 0xc6, 0xac, 0x0d, 0x25, 0x11, 0x3e, 0x25, 0x8b, 0x1a, 0x1c, 0x87, - 0x48, 0xf1, 0x9e, 0x4d, 0x6d, 0x8c, 0x70, 0x88, 0x72, 0xf3, 0x58, 0x28, 0x1b, 0x1a, 0x1a, 0xb3, - 0x9b, 0xd0, 0x88, 0x94, 0xbc, 0x58, 0x3c, 0x41, 0xe9, 0x4a, 0x2e, 0xf1, 0x90, 0x38, 0x0c, 0x9f, - 0xf2, 0x7a, 0x64, 0x47, 0x6c, 0x07, 0x40, 0x5c, 0x68, 0xe5, 0x1e, 0xc8, 0x58, 0xc7, 0x9d, 0x2a, - 0xed, 0x9d, 0xf2, 0x1d, 0x09, 0xa3, 0x23, 0x9e, 0xe3, 0xb2, 0x75, 0xa8, 0x9f, 0xc9, 0x58, 0x87, - 0xee, 0x4c, 0x74, 0x6a, 0xb4, 0x5c, 0x3a, 0x77, 0xfe, 0x51, 0x84, 0x0a, 0xb9, 0x8b, 0x6d, 0x63, - 0x74, 0xa2, 0xb9, 0x09, 0x74, 0xa9, 0xc7, 0x6c, 0x74, 0x80, 0xf2, 0x20, 0x0d, 0x0e, 0xe6, 0xc4, - 0x3a, 0x7a, 0x2a, 0x10, 0x9e, 0x96, 0xca, 0xa6, 0x62, 0x3a, 0xc7, 0x6d, 0x4d, 0x31, 0x5b, 0xcc, - 0x4e, 0x69, 0xcc, 0x6e, 0x41, 0x55, 0x52, 0x88, 0x69, 0xb3, 0xaf, 0x08, 0xbc, 0x85, 0xa0, 0x72, - 0x25, 0xdc, 0xa9, 0x0c, 0x83, 0x05, 0xb9, 0xa0, 0xce, 0xd3, 0x39, 0xbb, 0x05, 0x0d, 0x8a, 0xe9, - 0xc3, 0x45, 0x24, 0x3a, 0x55, 0x8a, 0xd1, 0x6a, 0x1a, 0x6f, 0x24, 0xf2, 0x8c, 0x8f, 0x87, 0xd8, - 0x73, 0xbd, 0x33, 0x31, 0x8e, 0x74, 0xe7, 0x7a, 0xe6, 0xcb, 0xbe, 0xa5, 0xf1, 0x94, 0x8b, 0x6a, - 0x63, 0xe1, 0x29, 0xa1, 0x11, 0xfa, 0x2e, 0x41, 0x57, 0x6d, 0xe8, 0x0d, 0x91, 0x67, 0x7c, 0xe6, - 0x40, 0x75, 0x32, 0x39, 0x40, 0xe4, 0x7b, 0x59, 0x91, 0x31, 0x14, 0x6e, 0x39, 0x66, 0x0f, 0xf1, - 0x3c, 0xd0, 0xa3, 0x41, 0xe7, 0x7d, 0xe3, 0xa0, 0x64, 0xee, 0x8c, 0xa0, 0x9e, 0x98, 0x80, 0xa7, - 0x79, 0x34, 0xb0, 0xe7, 0xbc, 0x38, 0x1a, 0xb0, 0x3b, 0x50, 0x8b, 0xcf, 0x5c, 0xe5, 0x87, 0xa7, - 0xe4, 0xd7, 0xb5, 0xbd, 0x77, 0x52, 0x8b, 0x27, 0x86, 0x8e, 0xab, 0x24, 0x18, 0x47, 0x42, 0x23, - 0x35, 0xf1, 0x25, 0x5d, 0x6d, 0x28, 0xcd, 0xfd, 0x29, 0xe9, 0x59, 0xe5, 0x38, 0x44, 0xca, 0xa9, - 0x6f, 0x72, 0x70, 0x95, 0xe3, 0x10, 0x83, 0x35, 0x93, 0x53, 0x53, 0x2e, 0x57, 0x39, 0x8d, 0xd1, - 0x76, 0x19, 0x69, 0x5f, 0x86, 0x6e, 0x90, 0xf8, 0x3f, 0x99, 0x3b, 0x41, 0xb2, 0xf7, 0xff, 0xc9, - 0x6a, 0xbf, 0x2e, 0x40, 0x3d, 0xa9, 0xf1, 0x58, 0xb0, 0xfc, 0xa9, 0x08, 0xb5, 0x7f, 0xe2, 0x0b, - 0x65, 0x17, 0xce, 0x51, 0xd8, 0x1d, 0xa8, 0xb8, 0x5a, 0xab, 0xa4, 0x0c, 0xbc, 0x9f, 0xbf, 0x20, - 0xba, 0xfb, 0xc8, 0x19, 0x86, 0x5a, 0x2d, 0xb8, 0x41, 0xad, 0x7f, 0x0e, 0x90, 0x11, 0xd1, 0xd6, - 0x73, 0xb1, 0xb0, 0x5a, 0x71, 0xc8, 0xae, 0x43, 0xe5, 0xa9, 0x1b, 0xcc, 0x85, 0xcd, 0x6f, 0x33, - 0xf9, 0xa2, 0xf8, 0x79, 0xc1, 0xf9, 0x53, 0x11, 0x6a, 0xf6, 0xc2, 0x60, 0xb7, 0xa1, 0x46, 0x17, - 0x86, 0xb5, 0xe8, 0xea, 0x43, 0x93, 0x40, 0xd8, 0x6e, 0x7a, 0x13, 0xe6, 0x6c, 0xb4, 0xaa, 0xcc, - 0x8d, 0x68, 0x6d, 0xcc, 0xee, 0xc5, 0xd2, 0x54, 0x9c, 0xd8, 0x2b, 0x6f, 0x0d, 0xd1, 0x03, 0x71, - 0xe2, 0x87, 0x3e, 0xfa, 0x87, 0x23, 0x8b, 0xdd, 0x4e, 0x76, 0x5d, 0x26, 0x8d, 0xef, 0xe5, 0x35, - 0xbe, 0xbc, 0xe9, 0x11, 0x34, 0x73, 0xcb, 0x5c, 0xb1, 0xeb, 0x8f, 0xf3, 0xbb, 0xb6, 0x4b, 0x92, - 0x3a, 0x73, 0x5f, 0x67, 0x5e, 0xf8, 0x0f, 0xfc, 0xf7, 0x19, 0x40, 0xa6, 0xf2, 0xcd, 0x8b, 0x8e, - 0xf3, 0x4d, 0x09, 0x60, 0x1c, 0x61, 0xc9, 0x9d, 0xba, 0x54, 0xf7, 0x5b, 0xfe, 0x69, 0x28, 0x95, - 0x78, 0x42, 0xc7, 0x98, 0xe4, 0xeb, 0xbc, 0x69, 0x68, 0x74, 0x62, 0xd8, 0x3e, 0x34, 0xa7, 0x22, - 0xf6, 0x94, 0x4f, 0x09, 0x65, 0x9d, 0xbe, 0x89, 0x7b, 0xca, 0xf4, 0x74, 0x07, 0x19, 0xc2, 0xf8, - 0x2a, 0x2f, 0xc3, 0xf6, 0xa0, 0x25, 0x2e, 0x22, 0xa9, 0xb4, 0x5d, 0xc5, 0xf4, 0x15, 0xd7, 0x4c, - 0x87, 0x82, 0x74, 0x5a, 0x89, 0x37, 0x45, 0x36, 0x61, 0x2e, 0x94, 0x3d, 0x37, 0x32, 0x97, 0x6a, - 0x73, 0xaf, 0x73, 0x69, 0xbd, 0xbe, 0x1b, 0x19, 0xa7, 0xf5, 0x3e, 0xc5, 0xbd, 0x7e, 0xf3, 0xb7, - 0xcd, 0x5b, 0xb9, 0x9b, 0x74, 0x26, 0x8f, 0x17, 0xbb, 0x94, 0x2f, 0xe7, 0xbe, 0xde, 0x9d, 0x6b, - 0x3f, 0xd8, 0x75, 0x23, 0x1f, 0xd5, 0xa1, 0xe0, 0x68, 0xc0, 0x49, 0xf5, 0xfa, 0xcf, 0xa1, 0x7d, - 0xd9, 0xee, 0xb7, 0x89, 0xc1, 0xfa, 0x5d, 0x68, 0xa4, 0x76, 0xbc, 0x4e, 0xb0, 0x9e, 0x0f, 0xde, - 0x1f, 0x0b, 0x50, 0x35, 0xa7, 0x8a, 0xdd, 0x85, 0x46, 0x20, 0x3d, 0x17, 0x0d, 0x48, 0x5a, 0xbb, - 0x0f, 0xb2, 0x43, 0xd7, 0xbd, 0x9f, 0xf0, 0x8c, 0x57, 0x33, 0x2c, 0x26, 0x99, 0x1f, 0x9e, 0xc8, - 0xe4, 0x14, 0xac, 0x65, 0x42, 0xa3, 0xf0, 0x44, 0x72, 0xc3, 0x5c, 0xff, 0x0a, 0xd6, 0x96, 0x55, - 0x5c, 0x61, 0xe7, 0x47, 0xcb, 0xe9, 0x4a, 0x35, 0x3b, 0x15, 0xca, 0x9b, 0x7d, 0x17, 0x1a, 0x29, - 0x9d, 0xed, 0xbc, 0x6c, 0x78, 0x2b, 0x2f, 0x99, 0xb3, 0xd5, 0x09, 0x00, 0x32, 0xd3, 0xb0, 0x58, - 0x61, 0x0f, 0x49, 0xf7, 0xa8, 0x31, 0x23, 0x9d, 0xd3, 0xbd, 0xe7, 0x6a, 0x97, 0x4c, 0x69, 0x71, - 0x1a, 0xb3, 0x2e, 0xc0, 0x34, 0x3d, 0xb0, 0xaf, 0x38, 0xc6, 0x39, 0x84, 0x33, 0x86, 0x7a, 0x62, - 0x04, 0xdb, 0x82, 0x66, 0x6c, 0x57, 0xc6, 0x8e, 0x09, 0x97, 0xab, 0xf0, 0x3c, 0x09, 0x3b, 0x1f, - 0xe5, 0x86, 0xa7, 0x62, 0xa9, 0xf3, 0xe1, 0x48, 0xe1, 0x96, 0xe1, 0x7c, 0x0d, 0x15, 0x22, 0xe0, - 0x31, 0x8b, 0xb5, 0xab, 0xb4, 0x6d, 0xa2, 0x4c, 0x53, 0x21, 0x63, 0x5a, 0xb6, 0x57, 0xc6, 0x44, - 0xe4, 0x06, 0xc0, 0x3e, 0xc6, 0xd6, 0x65, 0x6a, 0x3d, 0x7a, 0x15, 0x0e, 0xd9, 0xce, 0xcf, 0xa0, - 0x9e, 0x90, 0x71, 0xe7, 0xf7, 0xfd, 0x50, 0x58, 0x13, 0x69, 0x8c, 0xcd, 0x67, 0xff, 0xcc, 0x55, - 0xae, 0xa7, 0x85, 0x69, 0x11, 0x2a, 0x3c, 0x23, 0x38, 0x1f, 0x41, 0x33, 0x77, 0x7a, 0x30, 0xdd, - 0x1e, 0x53, 0x18, 0xcd, 0x19, 0x36, 0x13, 0xe7, 0xf7, 0xd8, 0x1a, 0x27, 0xdd, 0xce, 0x4f, 0x00, - 0xce, 0xb4, 0x8e, 0x9e, 0x50, 0xfb, 0x63, 0x7d, 0xdf, 0x40, 0x0a, 0x21, 0xd8, 0x26, 0x34, 0x71, - 0x12, 0x5b, 0xbe, 0xc9, 0x77, 0x92, 0x88, 0x0d, 0xe0, 0xff, 0xa1, 0x71, 0x92, 0x8a, 0x97, 0x6c, - 0xe8, 0x12, 0xe9, 0x0f, 0xa0, 0x1e, 0x4a, 0xcb, 0x33, 0xdd, 0x58, 0x2d, 0x94, 0xa9, 0x9c, 0x1b, - 0x04, 0x96, 0x57, 0x31, 0x72, 0x6e, 0x10, 0x10, 0xd3, 0xb9, 0x05, 0xff, 0xf7, 0x52, 0x93, 0xcf, - 0xde, 0x83, 0xea, 0x89, 0x1f, 0x68, 0xba, 0x11, 0xb0, 0xfb, 0xb3, 0x33, 0xe7, 0x5f, 0x05, 0x80, - 0x2c, 0xec, 0x98, 0xcc, 0x58, 0xda, 0x11, 0xd3, 0x32, 0xa5, 0x3c, 0x80, 0xfa, 0xcc, 0x16, 0x09, - 0x1b, 0xd0, 0x1b, 0xcb, 0xa9, 0xd2, 0x4d, 0x6a, 0x88, 0x29, 0x1f, 0x7b, 0xb6, 0x7c, 0xbc, 0x4d, - 0x23, 0x9e, 0xae, 0x40, 0x5d, 0x4c, 0xfe, 0x41, 0x05, 0xd9, 0x29, 0xe4, 0x96, 0xb3, 0xfe, 0x15, - 0xac, 0x2e, 0x2d, 0xf9, 0x86, 0x17, 0x46, 0x56, 0xec, 0xf2, 0x47, 0xf0, 0x36, 0x54, 0x4d, 0x67, - 0x8a, 0xf9, 0x82, 0x23, 0xab, 0x86, 0xc6, 0xd4, 0x4e, 0x1c, 0x25, 0xcf, 0x9a, 0xd1, 0x91, 0xb3, - 0x07, 0x55, 0xf3, 0x6e, 0x63, 0xdb, 0x50, 0x73, 0x3d, 0x73, 0x56, 0x73, 0xf5, 0x02, 0x99, 0xfb, - 0x44, 0xe6, 0x09, 0xdb, 0xf9, 0x4b, 0x11, 0x20, 0xa3, 0xbf, 0x45, 0x3b, 0xfb, 0x05, 0xac, 0xc5, - 0xc2, 0x93, 0xe1, 0xd4, 0x55, 0x0b, 0xe2, 0xda, 0xf7, 0xc9, 0x55, 0x22, 0x97, 0x90, 0xb9, 0xd6, - 0xb6, 0xf4, 0xfa, 0xd6, 0x76, 0x1b, 0xca, 0x9e, 0x8c, 0x16, 0xf6, 0x16, 0x61, 0xcb, 0x1b, 0xe9, - 0xcb, 0x68, 0x81, 0xaf, 0x54, 0x44, 0xb0, 0x2e, 0x54, 0x67, 0xe7, 0xf4, 0x92, 0x35, 0xaf, 0x80, - 0xeb, 0xcb, 0xd8, 0x07, 0xe7, 0x38, 0xc6, 0x77, 0xaf, 0x41, 0xb1, 0x5b, 0x50, 0x99, 0x9d, 0x4f, - 0x7d, 0x45, 0x4d, 0x71, 0xd3, 0xb4, 0x8d, 0x79, 0xf8, 0xc0, 0x57, 0xf8, 0xba, 0x25, 0x0c, 0x73, - 0xa0, 0xa8, 0x66, 0xf4, 0x10, 0x68, 0x9a, 0x27, 0x4e, 0xce, 0x9b, 0xb3, 0x83, 0x15, 0x5e, 0x54, - 0xb3, 0x5e, 0x1d, 0xaa, 0xc6, 0xaf, 0xce, 0x3f, 0x4b, 0xb0, 0xb6, 0x6c, 0x25, 0xe6, 0x41, 0xac, - 0xbc, 0x24, 0x0f, 0x62, 0xe5, 0xa5, 0x5d, 0x7f, 0x31, 0xd7, 0xf5, 0x3b, 0x50, 0x91, 0xcf, 0x42, - 0xa1, 0xf2, 0x4f, 0xf6, 0xfe, 0x99, 0x7c, 0x16, 0x62, 0x0f, 0x6b, 0x58, 0x4b, 0x2d, 0x61, 0xc5, - 0xb6, 0x84, 0x1f, 0xc3, 0xea, 0x89, 0x0c, 0x02, 0xf9, 0x6c, 0xb2, 0x98, 0x05, 0x7e, 0x78, 0x6e, - 0xfb, 0xc2, 0x65, 0x22, 0xdb, 0x86, 0x6b, 0x53, 0x5f, 0xa1, 0x39, 0x7d, 0x19, 0x6a, 0x11, 0xd2, - 0x23, 0x08, 0x71, 0x97, 0xc9, 0xec, 0x4b, 0xd8, 0x72, 0xb5, 0x16, 0xb3, 0x48, 0x3f, 0x0a, 0x23, - 0xd7, 0x3b, 0x1f, 0x48, 0x8f, 0xce, 0xec, 0x2c, 0x72, 0xb5, 0x7f, 0xec, 0x07, 0xf8, 0xde, 0xab, - 0x91, 0xe8, 0x6b, 0x71, 0xec, 0x13, 0x58, 0xf3, 0x94, 0x70, 0xb5, 0x18, 0x88, 0x58, 0x1f, 0xb9, - 0xfa, 0xac, 0x53, 0x27, 0xc9, 0x4b, 0x54, 0xdc, 0x83, 0x8b, 0xd6, 0x7e, 0xed, 0x07, 0x53, 0xcf, - 0x55, 0xd3, 0x4e, 0xc3, 0xec, 0x61, 0x89, 0xc8, 0xba, 0xc0, 0x88, 0x30, 0x9c, 0x45, 0x7a, 0x91, - 0x42, 0x81, 0xa0, 0x57, 0x70, 0xb0, 0xaa, 0x6a, 0x7f, 0x26, 0x62, 0xed, 0xce, 0x22, 0xfa, 0xd5, - 0x50, 0xe2, 0x19, 0x81, 0xdd, 0x84, 0xb6, 0x1f, 0x7a, 0xc1, 0x7c, 0x2a, 0x9e, 0x44, 0xb8, 0x11, - 0x15, 0xc6, 0x9d, 0x16, 0xd5, 0xa0, 0x6b, 0x96, 0x7e, 0x64, 0xc9, 0x08, 0x15, 0x17, 0x97, 0xa0, - 0xab, 0x06, 0x6a, 0xe9, 0x09, 0xd4, 0xf9, 0xb6, 0x00, 0xed, 0xcb, 0x89, 0x87, 0x61, 0x8b, 0x70, - 0xf3, 0xf6, 0x08, 0xe3, 0x38, 0x0d, 0x65, 0x31, 0x17, 0xca, 0xe4, 0x52, 0x2c, 0xe5, 0x2e, 0xc5, - 0x34, 0x2d, 0xca, 0xaf, 0x4e, 0x8b, 0xa5, 0x8d, 0x56, 0x2e, 0x6d, 0xd4, 0xf9, 0x6d, 0x01, 0xae, - 0x5d, 0x4a, 0xee, 0x37, 0xb6, 0x68, 0x0b, 0x9a, 0x33, 0xf7, 0x5c, 0x1c, 0xb9, 0x8a, 0x52, 0xa6, - 0x64, 0xba, 0xc6, 0x1c, 0xe9, 0xbf, 0x60, 0x5f, 0x08, 0xad, 0xfc, 0x89, 0xba, 0xd2, 0xb6, 0x24, - 0x41, 0x0e, 0xa5, 0xbe, 0x27, 0xe7, 0xf6, 0xc2, 0x4d, 0x12, 0x24, 0x21, 0xbe, 0x9c, 0x46, 0xa5, - 0x2b, 0xd2, 0xc8, 0x39, 0x84, 0x7a, 0x62, 0x20, 0xdb, 0xb4, 0x7f, 0x15, 0x0a, 0xd9, 0xdf, 0xad, - 0x47, 0xb1, 0x50, 0x68, 0xbb, 0xf9, 0xc5, 0xf0, 0x21, 0x54, 0x4e, 0x95, 0x9c, 0x47, 0xb6, 0x62, - 0x2f, 0x21, 0x0c, 0xc7, 0x99, 0x40, 0xcd, 0x52, 0xd8, 0x0e, 0x54, 0x8f, 0x17, 0x87, 0x49, 0xbf, - 0x63, 0xcb, 0x05, 0xce, 0xa7, 0x16, 0x81, 0x35, 0xc8, 0x20, 0xd8, 0x75, 0x28, 0x1f, 0x2f, 0x46, - 0x03, 0xf3, 0x06, 0xc4, 0x4a, 0x86, 0xb3, 0x5e, 0xd5, 0x18, 0xe4, 0xdc, 0x87, 0x56, 0x5e, 0x0e, - 0x9d, 0x92, 0xeb, 0xa3, 0x68, 0x9c, 0x95, 0xec, 0xe2, 0x6b, 0x4a, 0xf6, 0xce, 0x36, 0xd4, 0xec, - 0xff, 0x1b, 0xd6, 0x80, 0xca, 0xa3, 0xc3, 0xc9, 0xf0, 0x61, 0x7b, 0x85, 0xd5, 0xa1, 0x7c, 0x30, - 0x9e, 0x3c, 0x6c, 0x17, 0x70, 0x74, 0x38, 0x3e, 0x1c, 0xb6, 0x8b, 0x3b, 0x37, 0xa1, 0x95, 0xff, - 0x83, 0xc3, 0x9a, 0x50, 0x9b, 0xec, 0x1f, 0x0e, 0x7a, 0xe3, 0x5f, 0xb6, 0x57, 0x58, 0x0b, 0xea, - 0xa3, 0xc3, 0xc9, 0xb0, 0xff, 0x88, 0x0f, 0xdb, 0x85, 0x9d, 0x5f, 0x40, 0x23, 0xfd, 0x91, 0x80, - 0x1a, 0x7a, 0xa3, 0xc3, 0x41, 0x7b, 0x85, 0x01, 0x54, 0x27, 0xc3, 0x3e, 0x1f, 0xa2, 0xde, 0x1a, - 0x94, 0x26, 0x93, 0x83, 0x76, 0x11, 0x57, 0xed, 0xef, 0xf7, 0x0f, 0x86, 0xed, 0x12, 0x0e, 0x1f, - 0x3e, 0x38, 0xba, 0x37, 0x69, 0x97, 0x77, 0x3e, 0x83, 0x6b, 0x97, 0x1e, 0xeb, 0x24, 0x7d, 0xb0, - 0xcf, 0x87, 0xa8, 0xa9, 0x09, 0xb5, 0x23, 0x3e, 0x7a, 0xbc, 0xff, 0x70, 0xd8, 0x2e, 0x20, 0xe3, - 0xfe, 0xb8, 0xff, 0xd5, 0x70, 0xd0, 0x2e, 0xf6, 0x6e, 0x7c, 0xf7, 0x7c, 0xa3, 0xf0, 0xfd, 0xf3, - 0x8d, 0xc2, 0x0f, 0xcf, 0x37, 0x0a, 0x7f, 0x7f, 0xbe, 0x51, 0xf8, 0xf6, 0xc5, 0xc6, 0xca, 0xf7, - 0x2f, 0x36, 0x56, 0x7e, 0x78, 0xb1, 0xb1, 0x72, 0x5c, 0xa5, 0xff, 0xa9, 0x9f, 0xfe, 0x3b, 0x00, - 0x00, 0xff, 0xff, 0x3a, 0xd6, 0xef, 0x88, 0x8f, 0x15, 0x00, 0x00, + // 2538 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7, + 0xf1, 0x17, 0x7f, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xe4, + 0x1b, 0xc8, 0xb2, 0x2d, 0xe1, 0xab, 0x00, 0x71, 0x60, 0x14, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xdb, + 0xa2, 0xb0, 0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xe9, 0x41, 0x8f, 0x6f, 0x1f, 0xf6, + 0x2d, 0x23, 0xb1, 0x87, 0x1e, 0x7a, 0x2f, 0x10, 0xa0, 0x40, 0xd1, 0x4b, 0xd1, 0x7f, 0xa2, 0xc7, + 0xf6, 0x1e, 0xa0, 0x97, 0x1c, 0x7a, 0x08, 0x7a, 0x48, 0x0b, 0xe7, 0xd2, 0x3f, 0xa2, 0x05, 0x8a, + 0x99, 0xdd, 0xf7, 0x83, 0x94, 0x02, 0xc7, 0x6d, 0xd1, 0x13, 0xe7, 0xcd, 0x7c, 0x76, 0x66, 0x76, + 0x77, 0x66, 0x67, 0x76, 0x09, 0x0d, 0x19, 0xc5, 0x5b, 0x91, 0x92, 0x5a, 0xb2, 0x62, 0x74, 0xbc, + 0x7a, 0xef, 0xc4, 0xd7, 0xa7, 0xd3, 0xe3, 0x2d, 0x4f, 0x4e, 0xb6, 0x4f, 0xe4, 0x89, 0xdc, 0x26, + 0xd1, 0xf1, 0x74, 0x4c, 0x5f, 0xf4, 0x41, 0x94, 0x19, 0xe2, 0xfc, 0xad, 0x08, 0xc5, 0x41, 0xc4, + 0xde, 0x85, 0xaa, 0x1f, 0x46, 0x53, 0x1d, 0x77, 0x0a, 0xeb, 0xa5, 0x8d, 0xe6, 0x4e, 0x63, 0x2b, + 0x3a, 0xde, 0xea, 0x23, 0x87, 0x5b, 0x01, 0x5b, 0x87, 0xb2, 0xb8, 0x10, 0x5e, 0xa7, 0xb8, 0x5e, + 0xd8, 0x68, 0xee, 0x00, 0x02, 0x7a, 0x17, 0xc2, 0x1b, 0x44, 0x07, 0x4b, 0x9c, 0x24, 0xec, 0x03, + 0xa8, 0xc6, 0x72, 0xaa, 0x3c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x4a, + 0x51, 0xd3, 0xd8, 0x0f, 0x44, 0xa7, 0x9c, 0x69, 0x7a, 0xe8, 0x07, 0x06, 0x43, 0x12, 0xf6, 0x1e, + 0x54, 0x8e, 0xa7, 0x7e, 0x30, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0x63, 0x64, + 0x08, 0x9a, 0x08, 0x75, 0x22, 0x3a, 0xd5, 0x0c, 0xf4, 0x04, 0x19, 0x06, 0x44, 0x32, 0xb4, 0x35, + 0xf2, 0xc7, 0xe3, 0x4e, 0x2d, 0xb3, 0xd5, 0xf5, 0xc7, 0x63, 0x63, 0x0b, 0x25, 0x6c, 0x03, 0xea, + 0x51, 0xe0, 0xea, 0xb1, 0x54, 0x93, 0x0e, 0x64, 0x7e, 0x1f, 0x59, 0x1e, 0x4f, 0xa5, 0xec, 0x3e, + 0x34, 0x3d, 0x19, 0xc6, 0x5a, 0xb9, 0x7e, 0xa8, 0xe3, 0x4e, 0x93, 0xc0, 0x6f, 0x22, 0xf8, 0x33, + 0xa9, 0xce, 0x84, 0xda, 0xcf, 0x84, 0x3c, 0x8f, 0xdc, 0x2b, 0x43, 0x51, 0x46, 0xce, 0xaf, 0x0a, + 0x50, 0x4f, 0xb4, 0x32, 0x07, 0x96, 0x77, 0x95, 0x77, 0xea, 0x6b, 0xe1, 0xe9, 0xa9, 0x12, 0x9d, + 0xc2, 0x7a, 0x61, 0xa3, 0xc1, 0xe7, 0x78, 0xac, 0x05, 0xc5, 0xc1, 0x90, 0xd6, 0xbb, 0xc1, 0x8b, + 0x83, 0x21, 0xeb, 0x40, 0xed, 0xb9, 0xab, 0x7c, 0x37, 0xd4, 0xb4, 0xc0, 0x0d, 0x9e, 0x7c, 0xb2, + 0x9b, 0xd0, 0x18, 0x0c, 0x9f, 0x0b, 0x15, 0xfb, 0x32, 0xa4, 0x65, 0x6d, 0xf0, 0x8c, 0xc1, 0xd6, + 0x00, 0x06, 0xc3, 0x87, 0xc2, 0x45, 0xa5, 0x71, 0xa7, 0xb2, 0x5e, 0xda, 0x68, 0xf0, 0x1c, 0xc7, + 0xf9, 0x19, 0x54, 0x68, 0xab, 0xd9, 0xa7, 0x50, 0x1d, 0xf9, 0x27, 0x22, 0xd6, 0xc6, 0x9d, 0xbd, + 0x9d, 0x2f, 0xbf, 0xb9, 0xb5, 0xf4, 0xe7, 0x6f, 0x6e, 0x6d, 0xe6, 0x62, 0x4a, 0x46, 0x22, 0xf4, + 0x64, 0xa8, 0x5d, 0x3f, 0x14, 0x2a, 0xde, 0x3e, 0x91, 0xf7, 0xcc, 0x90, 0xad, 0x2e, 0xfd, 0x70, + 0xab, 0x81, 0xdd, 0x86, 0x8a, 0x1f, 0x8e, 0xc4, 0x05, 0xf9, 0x5f, 0xda, 0xbb, 0x6e, 0x55, 0x35, + 0x07, 0x53, 0x1d, 0x4d, 0x75, 0x1f, 0x45, 0xdc, 0x20, 0x9c, 0x3f, 0x16, 0xa0, 0x6a, 0x42, 0x89, + 0xdd, 0x84, 0xf2, 0x44, 0x68, 0x97, 0xec, 0x37, 0x77, 0xea, 0x66, 0x4b, 0xb5, 0xcb, 0x89, 0x8b, + 0x51, 0x3a, 0x91, 0x53, 0x5c, 0xfb, 0x62, 0x16, 0xa5, 0x4f, 0x90, 0xc3, 0xad, 0x80, 0xfd, 0x1f, + 0xd4, 0x42, 0xa1, 0xcf, 0xa5, 0x3a, 0xa3, 0x35, 0x6a, 0x99, 0xb0, 0x38, 0x14, 0xfa, 0x89, 0x1c, + 0x09, 0x9e, 0xc8, 0xd8, 0x5d, 0xa8, 0xc7, 0xc2, 0x9b, 0x2a, 0x5f, 0xcf, 0x68, 0xbd, 0x5a, 0x3b, + 0x6d, 0x0a, 0x56, 0xcb, 0x23, 0x70, 0x8a, 0x60, 0x77, 0xa0, 0x11, 0x0b, 0x4f, 0x09, 0x2d, 0xc2, + 0xcf, 0x69, 0xfd, 0x9a, 0x3b, 0x2b, 0x16, 0xae, 0x84, 0xee, 0x85, 0x9f, 0xf3, 0x4c, 0xee, 0xfc, + 0xa2, 0x08, 0x65, 0xf4, 0x99, 0x31, 0x28, 0xbb, 0xea, 0xc4, 0x64, 0x54, 0x83, 0x13, 0xcd, 0xda, + 0x50, 0x42, 0x1d, 0x45, 0x62, 0x21, 0x89, 0x1c, 0xef, 0x7c, 0x64, 0x37, 0x14, 0x49, 0x1c, 0x37, + 0x8d, 0x85, 0xb2, 0xfb, 0x48, 0x34, 0xbb, 0x0d, 0x8d, 0x48, 0xc9, 0x8b, 0xd9, 0x0b, 0xe3, 0x41, + 0x16, 0xa5, 0xc8, 0x44, 0x07, 0xea, 0x91, 0xa5, 0xd8, 0x26, 0x80, 0xb8, 0xd0, 0xca, 0x3d, 0x90, + 0xb1, 0x8e, 0x3b, 0x55, 0xf2, 0x96, 0xe2, 0x1e, 0x19, 0xfd, 0x23, 0x9e, 0x93, 0xb2, 0x55, 0xa8, + 0x9f, 0xca, 0x58, 0x87, 0xee, 0x44, 0x50, 0x86, 0x34, 0x78, 0xfa, 0xcd, 0x1c, 0xa8, 0x4e, 0x03, + 0x7f, 0xe2, 0xeb, 0x4e, 0x23, 0xd3, 0xf1, 0x8c, 0x38, 0xdc, 0x4a, 0x30, 0x8a, 0xbd, 0x13, 0x25, + 0xa7, 0xd1, 0x91, 0xab, 0x44, 0xa8, 0x29, 0x7f, 0x1a, 0x7c, 0x8e, 0xe7, 0xdc, 0x85, 0xaa, 0xb1, + 0x8c, 0x13, 0x43, 0xca, 0xc6, 0x3a, 0xd1, 0x18, 0xe3, 0xfd, 0xa3, 0x24, 0xc6, 0xfb, 0x47, 0x4e, + 0x17, 0xaa, 0xc6, 0x06, 0xa2, 0x0f, 0xd1, 0x2f, 0x8b, 0x46, 0x1a, 0x79, 0x43, 0x39, 0xd6, 0x26, + 0xa6, 0x38, 0xd1, 0xa4, 0xd5, 0x55, 0x66, 0x05, 0x4b, 0x9c, 0x68, 0xe7, 0x11, 0x34, 0xd2, 0xbd, + 0x21, 0x13, 0x5d, 0xab, 0xa6, 0xd8, 0xef, 0xe2, 0x00, 0x9a, 0xb0, 0x31, 0x4a, 0x34, 0x2e, 0x84, + 0x8c, 0xb4, 0x2f, 0x43, 0x37, 0x20, 0x45, 0x75, 0x9e, 0x7e, 0x3b, 0xbf, 0x2e, 0x41, 0x85, 0x82, + 0x8c, 0x6d, 0x60, 0x4c, 0x47, 0x53, 0x33, 0x83, 0xd2, 0x1e, 0xb3, 0x31, 0x0d, 0x94, 0x3d, 0x69, + 0x48, 0x63, 0x26, 0xad, 0x62, 0x7c, 0x05, 0xc2, 0xd3, 0x52, 0x59, 0x3b, 0xe9, 0x37, 0xda, 0x1f, + 0x61, 0x8e, 0x99, 0x2d, 0x27, 0x9a, 0xdd, 0x81, 0xaa, 0xa4, 0xc4, 0xa0, 0x5d, 0xff, 0x8e, 0x74, + 0xb1, 0x10, 0x54, 0xae, 0x84, 0x3b, 0x92, 0x61, 0x30, 0xa3, 0x58, 0xa8, 0xf3, 0xf4, 0x1b, 0x43, + 0x95, 0x32, 0xe1, 0xe9, 0x2c, 0x32, 0x07, 0x63, 0xcb, 0x84, 0xea, 0x93, 0x84, 0xc9, 0x33, 0x39, + 0x1e, 0x7d, 0x4f, 0x27, 0xd1, 0x38, 0x1e, 0x44, 0xba, 0x73, 0x3d, 0x0b, 0xaa, 0x84, 0xc7, 0x53, + 0x29, 0x22, 0x3d, 0xd7, 0x3b, 0x15, 0x88, 0xbc, 0x91, 0x21, 0xf7, 0x2d, 0x8f, 0xa7, 0xd2, 0x2c, + 0x57, 0x10, 0xfa, 0x26, 0x41, 0x73, 0xb9, 0x82, 0xd8, 0x4c, 0x8e, 0x31, 0x36, 0x1c, 0x1e, 0x20, + 0xf2, 0xad, 0xec, 0x7c, 0x36, 0x1c, 0x6e, 0x25, 0x66, 0xb6, 0xf1, 0x34, 0xd0, 0xfd, 0x6e, 0xe7, + 0x6d, 0xb3, 0x94, 0xc9, 0xb7, 0xb3, 0x96, 0x4d, 0x00, 0x97, 0x35, 0xf6, 0x7f, 0x6a, 0xe2, 0xa5, + 0xc4, 0x89, 0x76, 0xfa, 0x50, 0x4f, 0x5c, 0xbc, 0x14, 0x06, 0xf7, 0xa0, 0x16, 0x9f, 0xba, 0xca, + 0x0f, 0x4f, 0x68, 0x87, 0x5a, 0x3b, 0xd7, 0xd3, 0x19, 0x0d, 0x0d, 0x1f, 0xbd, 0x48, 0x30, 0x8e, + 0x4c, 0x42, 0xea, 0x2a, 0x5d, 0x6d, 0x28, 0x4d, 0xfd, 0x11, 0xe9, 0x59, 0xe1, 0x48, 0x22, 0xe7, + 0xc4, 0x37, 0x41, 0xb9, 0xc2, 0x91, 0x44, 0xff, 0x26, 0x72, 0x64, 0xaa, 0xde, 0x0a, 0x27, 0x7a, + 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0x17, 0x24, 0x6b, 0xf3, 0x5f, 0xb1, 0xf6, 0xcb, 0x02, 0xd4, 0x93, + 0x52, 0x8d, 0x05, 0xc3, 0x1f, 0x89, 0x50, 0xfb, 0x63, 0x5f, 0x28, 0x6b, 0x38, 0xc7, 0x61, 0xf7, + 0xa0, 0xe2, 0x6a, 0xad, 0x92, 0x63, 0xf8, 0xed, 0x7c, 0x9d, 0xdf, 0xda, 0x45, 0x49, 0x2f, 0xd4, + 0x6a, 0xc6, 0x0d, 0x6a, 0xf5, 0x63, 0x80, 0x8c, 0x89, 0xbe, 0x9e, 0x89, 0x99, 0xd5, 0x8a, 0x24, + 0xbb, 0x01, 0x95, 0xcf, 0xdd, 0x60, 0x9a, 0x64, 0xa4, 0xf9, 0x78, 0x50, 0xfc, 0xb8, 0xe0, 0xfc, + 0xa1, 0x08, 0x35, 0x5b, 0xf7, 0xd9, 0x5d, 0xa8, 0x51, 0xdd, 0xb7, 0x1e, 0x5d, 0x9d, 0x7e, 0x09, + 0x84, 0x6d, 0xa7, 0x0d, 0x4d, 0xce, 0x47, 0xab, 0xca, 0x34, 0x36, 0xd6, 0xc7, 0xac, 0xbd, 0x29, + 0x8d, 0xc4, 0xd8, 0x76, 0x2e, 0x2d, 0xea, 0x13, 0xc4, 0xd8, 0x0f, 0x7d, 0x5c, 0x1f, 0x8e, 0x22, + 0x76, 0x37, 0x99, 0x75, 0x99, 0x34, 0xbe, 0x95, 0xd7, 0x78, 0x79, 0xd2, 0x7d, 0x68, 0xe6, 0xcc, + 0x5c, 0x31, 0xeb, 0xf7, 0xf3, 0xb3, 0xb6, 0x26, 0x49, 0x9d, 0x69, 0xbb, 0xb2, 0x55, 0xf8, 0x37, + 0xd6, 0xef, 0x23, 0x80, 0x4c, 0xe5, 0xf7, 0x3f, 0xbe, 0x9c, 0xdf, 0x97, 0x00, 0x06, 0x11, 0x56, + 0xb1, 0x91, 0x4b, 0x75, 0x77, 0xd9, 0x3f, 0x09, 0xa5, 0x12, 0x2f, 0x28, 0xcd, 0x69, 0x7c, 0x9d, + 0x37, 0x0d, 0x8f, 0x32, 0x86, 0xed, 0x42, 0x73, 0x24, 0x62, 0x4f, 0xf9, 0x14, 0x50, 0x76, 0xd1, + 0x6f, 0xe1, 0x9c, 0x32, 0x3d, 0x5b, 0xdd, 0x0c, 0x61, 0xd6, 0x2a, 0x3f, 0x86, 0xed, 0xc0, 0xb2, + 0xb8, 0x88, 0xa4, 0xd2, 0xd6, 0x8a, 0x69, 0x0f, 0xaf, 0x99, 0x46, 0x13, 0xf9, 0x64, 0x89, 0x37, + 0x45, 0xf6, 0xc1, 0x5c, 0x28, 0x7b, 0x6e, 0x14, 0xdb, 0xa2, 0xdc, 0x59, 0xb0, 0xb7, 0xef, 0x46, + 0x66, 0xd1, 0xf6, 0x3e, 0xc4, 0xb9, 0xfe, 0xfc, 0x2f, 0xb7, 0xee, 0xe4, 0x3a, 0x99, 0x89, 0x3c, + 0x9e, 0x6d, 0x53, 0xbc, 0x9c, 0xf9, 0x7a, 0x7b, 0xaa, 0xfd, 0x60, 0xdb, 0x8d, 0x7c, 0x54, 0x87, + 0x03, 0xfb, 0x5d, 0x4e, 0xaa, 0xd9, 0xc7, 0xd0, 0x8a, 0x94, 0x3c, 0x51, 0x22, 0x8e, 0x5f, 0x50, + 0x5d, 0xb3, 0xfd, 0xe6, 0x1b, 0xb6, 0xfe, 0x92, 0xe4, 0x13, 0x14, 0xf0, 0x95, 0x28, 0xff, 0xb9, + 0xfa, 0x43, 0x68, 0x2f, 0xce, 0xf8, 0x75, 0x76, 0x6f, 0xf5, 0x3e, 0x34, 0xd2, 0x19, 0xbc, 0x6a, + 0x60, 0x3d, 0xbf, 0xed, 0xbf, 0x2b, 0x40, 0xd5, 0xe4, 0x23, 0xbb, 0x0f, 0x8d, 0x40, 0x7a, 0x2e, + 0x3a, 0x90, 0xf4, 0xf6, 0xef, 0x64, 0xe9, 0xba, 0xf5, 0x38, 0x91, 0x99, 0xfd, 0xc8, 0xb0, 0x18, + 0x9e, 0x7e, 0x38, 0x96, 0x49, 0xfe, 0xb4, 0xb2, 0x41, 0xfd, 0x70, 0x2c, 0xb9, 0x11, 0xae, 0x3e, + 0x82, 0xd6, 0xbc, 0x8a, 0x2b, 0xfc, 0x7c, 0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x6e, + 0xdf, 0x87, 0x46, 0xca, 0x67, 0x9b, 0x97, 0x1d, 0x5f, 0xce, 0x8f, 0xcc, 0xf9, 0xea, 0x04, 0x00, + 0x99, 0x6b, 0x78, 0xcc, 0xe1, 0x25, 0x22, 0xcc, 0x9a, 0x87, 0xf4, 0x9b, 0x6a, 0xaf, 0xab, 0x5d, + 0x72, 0x65, 0x99, 0x13, 0xcd, 0xb6, 0x00, 0x46, 0x69, 0xaa, 0x7f, 0xc7, 0x01, 0x90, 0x43, 0x38, + 0x03, 0xa8, 0x27, 0x4e, 0xb0, 0x75, 0x68, 0xc6, 0xd6, 0x32, 0xf6, 0xba, 0x68, 0xae, 0xc2, 0xf3, + 0x2c, 0xec, 0x59, 0x95, 0x1b, 0x9e, 0x88, 0xb9, 0x9e, 0x95, 0x23, 0x87, 0x5b, 0x81, 0xf3, 0x19, + 0x54, 0x88, 0x81, 0x09, 0x1a, 0x6b, 0x57, 0x69, 0xdb, 0xfe, 0x9a, 0x0e, 0x4f, 0xc6, 0x64, 0x76, + 0xaf, 0x8c, 0x21, 0xcc, 0x0d, 0x80, 0xbd, 0x8f, 0x7d, 0xe4, 0xc8, 0xae, 0xe8, 0x55, 0x38, 0x14, + 0x3b, 0x3f, 0x80, 0x7a, 0xc2, 0xc6, 0x99, 0x3f, 0xf6, 0x43, 0x61, 0x5d, 0x24, 0x1a, 0xaf, 0x0d, + 0xfb, 0xa7, 0xae, 0x72, 0x3d, 0x2d, 0x4c, 0x9b, 0x52, 0xe1, 0x19, 0xc3, 0x79, 0x0f, 0x9a, 0xb9, + 0xbc, 0xc3, 0x70, 0x7b, 0x4e, 0xdb, 0x68, 0xb2, 0xdf, 0x7c, 0x38, 0x9f, 0xc0, 0xca, 0x5c, 0x0e, + 0x60, 0xb1, 0xf2, 0x47, 0x49, 0xb1, 0x32, 0x85, 0xe8, 0x52, 0xb7, 0xc5, 0xa0, 0x7c, 0x2e, 0xdc, + 0x33, 0xdb, 0x69, 0x11, 0xed, 0xfc, 0x16, 0x6f, 0x47, 0x49, 0x0f, 0xfb, 0xbf, 0x00, 0xa7, 0x5a, + 0x47, 0x2f, 0xa8, 0xa9, 0xb5, 0xca, 0x1a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x4d, 0xfc, 0x88, 0xad, + 0xdc, 0xa8, 0xa6, 0x11, 0xb1, 0x01, 0xfc, 0x0f, 0x34, 0xc6, 0xe9, 0xf0, 0x92, 0x8d, 0x81, 0x64, + 0xf4, 0x3b, 0x50, 0x0f, 0xa5, 0x95, 0x99, 0x1e, 0xbb, 0x16, 0xca, 0x74, 0x9c, 0x1b, 0x04, 0x56, + 0x56, 0x31, 0xe3, 0xdc, 0x20, 0x20, 0xa1, 0x73, 0x07, 0xde, 0xb8, 0x74, 0xcf, 0x63, 0x6f, 0x41, + 0x75, 0xec, 0x07, 0x9a, 0x8a, 0x12, 0xf6, 0xf4, 0xf6, 0xcb, 0xf9, 0x47, 0x01, 0x20, 0x8b, 0x1f, + 0xcc, 0x0a, 0xac, 0x2e, 0x88, 0x59, 0x36, 0xd5, 0x24, 0x80, 0xfa, 0xc4, 0x9e, 0x53, 0x36, 0x32, + 0x6e, 0xce, 0xc7, 0xdc, 0x56, 0x72, 0x8c, 0x99, 0x13, 0x6c, 0xc7, 0x9e, 0x60, 0xaf, 0x73, 0x17, + 0x4b, 0x2d, 0x50, 0xa3, 0x95, 0xbf, 0x9a, 0x43, 0x96, 0xce, 0xdc, 0x4a, 0x56, 0x1f, 0xc1, 0xca, + 0x9c, 0xc9, 0xef, 0x59, 0xb3, 0xb2, 0xf3, 0x36, 0x9f, 0xcb, 0x3b, 0x50, 0x35, 0x77, 0x7a, 0xb6, + 0x01, 0x35, 0xd7, 0x33, 0x69, 0x9c, 0x3b, 0x4a, 0x50, 0xb8, 0x4b, 0x6c, 0x9e, 0x88, 0x9d, 0x3f, + 0x15, 0x01, 0x32, 0xfe, 0x6b, 0x74, 0xdb, 0x0f, 0xa0, 0x15, 0x0b, 0x4f, 0x86, 0x23, 0x57, 0xcd, + 0x48, 0x6a, 0x2f, 0x9d, 0x57, 0x0d, 0x59, 0x40, 0xe6, 0x3a, 0xef, 0xd2, 0xab, 0x3b, 0xef, 0x0d, + 0x28, 0x7b, 0x32, 0x9a, 0xd9, 0xd2, 0xc4, 0xe6, 0x27, 0xb2, 0x2f, 0xa3, 0xd9, 0xc1, 0x12, 0x27, + 0x04, 0xdb, 0x82, 0xea, 0xe4, 0x8c, 0x5e, 0x39, 0xcc, 0x6d, 0xed, 0xc6, 0x3c, 0xf6, 0xc9, 0x19, + 0xd2, 0x07, 0x4b, 0xdc, 0xa2, 0xd8, 0x1d, 0xa8, 0x4c, 0xce, 0x46, 0xbe, 0xb2, 0xc5, 0xe5, 0xfa, + 0x22, 0xbc, 0xeb, 0x2b, 0x7a, 0xd4, 0x40, 0x0c, 0x73, 0xa0, 0xa8, 0x26, 0xf6, 0x49, 0xa3, 0xbd, + 0xb0, 0x9a, 0x93, 0x83, 0x25, 0x5e, 0x54, 0x93, 0xbd, 0x3a, 0x54, 0xcd, 0xba, 0x3a, 0x7f, 0x2f, + 0x41, 0x6b, 0xde, 0x4b, 0xdc, 0xd9, 0x58, 0x79, 0xc9, 0xce, 0xc6, 0xca, 0x4b, 0x2f, 0x25, 0xc5, + 0xdc, 0xa5, 0xc4, 0x81, 0x8a, 0x3c, 0x0f, 0x85, 0xca, 0x3f, 0xe7, 0xec, 0x9f, 0xca, 0xf3, 0x10, + 0x1b, 0x63, 0x23, 0x9a, 0xeb, 0x33, 0x2b, 0xb6, 0xcf, 0x7c, 0x1f, 0x56, 0xc6, 0x32, 0x08, 0xe4, + 0xf9, 0x70, 0x36, 0x09, 0xfc, 0xf0, 0xcc, 0x36, 0x9b, 0xf3, 0x4c, 0xb6, 0x01, 0xd7, 0x46, 0xbe, + 0x42, 0x77, 0xf6, 0x65, 0xa8, 0x45, 0x48, 0x97, 0x55, 0xc4, 0x2d, 0xb2, 0xd9, 0xa7, 0xb0, 0xee, + 0x6a, 0x2d, 0x26, 0x91, 0x7e, 0x16, 0x46, 0xae, 0x77, 0xd6, 0x95, 0x1e, 0x65, 0xe1, 0x24, 0x72, + 0xb5, 0x7f, 0xec, 0x07, 0x78, 0x89, 0xaf, 0xd1, 0xd0, 0x57, 0xe2, 0xd8, 0x07, 0xd0, 0xf2, 0x94, + 0x70, 0xb5, 0xe8, 0x8a, 0x58, 0x1f, 0xb9, 0xfa, 0xb4, 0x53, 0xa7, 0x91, 0x0b, 0x5c, 0x9c, 0x83, + 0x8b, 0xde, 0x7e, 0xe6, 0x07, 0x23, 0x0f, 0xaf, 0x97, 0x0d, 0x33, 0x87, 0x39, 0x26, 0xdb, 0x02, + 0x46, 0x8c, 0xde, 0x24, 0xd2, 0xb3, 0x14, 0x0a, 0x04, 0xbd, 0x42, 0x82, 0x07, 0xae, 0xf6, 0x27, + 0x22, 0xd6, 0xee, 0x24, 0xa2, 0xf7, 0xa3, 0x12, 0xcf, 0x18, 0xec, 0x36, 0xb4, 0xfd, 0xd0, 0x0b, + 0xa6, 0x23, 0xf1, 0x22, 0xc2, 0x89, 0xa8, 0x30, 0xee, 0x2c, 0xd3, 0xa9, 0x72, 0xcd, 0xf2, 0x8f, + 0x2c, 0x1b, 0xa1, 0xe2, 0x62, 0x01, 0xba, 0x62, 0xa0, 0x96, 0x9f, 0x40, 0x9d, 0x2f, 0x0a, 0xd0, + 0x5e, 0x0c, 0x3c, 0xdc, 0xb6, 0x08, 0x27, 0x6f, 0x2f, 0xd7, 0x48, 0xa7, 0x5b, 0x59, 0xcc, 0x6d, + 0x65, 0x52, 0x2f, 0x4b, 0xb9, 0x7a, 0x99, 0x86, 0x45, 0xf9, 0xbb, 0xc3, 0x62, 0x6e, 0xa2, 0x95, + 0x85, 0x89, 0x3a, 0xbf, 0x29, 0xc0, 0xb5, 0x85, 0xe0, 0xfe, 0xde, 0x1e, 0xad, 0x43, 0x73, 0xe2, + 0x9e, 0x09, 0xf3, 0xb8, 0x10, 0xdb, 0x12, 0x92, 0x67, 0xfd, 0x07, 0xfc, 0x0b, 0x61, 0x39, 0x9f, + 0x51, 0x57, 0xfa, 0x96, 0x04, 0xc8, 0xa1, 0xd4, 0x0f, 0xe5, 0xd4, 0xd6, 0xe2, 0x24, 0x40, 0x12, + 0xe6, 0xe5, 0x30, 0x2a, 0x5d, 0x11, 0x46, 0xce, 0x21, 0xd4, 0x13, 0x07, 0xd9, 0x2d, 0xfb, 0xfa, + 0x53, 0xc8, 0x1e, 0x35, 0x9f, 0xc5, 0x42, 0xa1, 0xef, 0xe6, 0x29, 0xe8, 0x5d, 0xa8, 0x98, 0x36, + 0xb4, 0x78, 0x19, 0x61, 0x24, 0xce, 0x10, 0x6a, 0x96, 0xc3, 0x36, 0xa1, 0x7a, 0x3c, 0x4b, 0xdf, + 0x51, 0xec, 0x71, 0x81, 0xdf, 0x23, 0x8b, 0xc0, 0x33, 0xc8, 0x20, 0xd8, 0x0d, 0x28, 0x1f, 0xcf, + 0xfa, 0x5d, 0x73, 0xb1, 0xc4, 0x93, 0x0c, 0xbf, 0xf6, 0xaa, 0xc6, 0x21, 0xe7, 0x31, 0x2c, 0xe7, + 0xc7, 0xa5, 0x85, 0xbd, 0x90, 0x2b, 0xec, 0xe9, 0x91, 0x5d, 0x7c, 0xd5, 0x0d, 0xe3, 0x23, 0x00, + 0x7a, 0xab, 0x7d, 0xdd, 0x9b, 0xc9, 0xff, 0x43, 0xcd, 0xbe, 0xf1, 0xb2, 0x0f, 0x16, 0xde, 0xac, + 0x5b, 0xe9, 0x03, 0xf0, 0xdc, 0xc3, 0xb5, 0xf3, 0x00, 0x7b, 0xd4, 0x73, 0xa1, 0xba, 0xfe, 0x78, + 0xfc, 0xba, 0xe6, 0x1e, 0x40, 0xeb, 0x59, 0x14, 0xfd, 0x6b, 0x63, 0x7f, 0x02, 0x55, 0xf3, 0xd4, + 0x8c, 0x63, 0x02, 0xf4, 0xc0, 0xee, 0x01, 0x33, 0x7d, 0x6c, 0xde, 0x25, 0x6e, 0x00, 0x88, 0x9c, + 0xa2, 0x3d, 0xbb, 0xb9, 0x84, 0x9c, 0x77, 0x80, 0x1b, 0xc0, 0xe6, 0x06, 0xd4, 0xec, 0xab, 0x26, + 0x6b, 0x40, 0xe5, 0xd9, 0xe1, 0xb0, 0xf7, 0xb4, 0xbd, 0xc4, 0xea, 0x50, 0x3e, 0x18, 0x0c, 0x9f, + 0xb6, 0x0b, 0x48, 0x1d, 0x0e, 0x0e, 0x7b, 0xed, 0xe2, 0xe6, 0x6d, 0x58, 0xce, 0xbf, 0x6b, 0xb2, + 0x26, 0xd4, 0x86, 0xbb, 0x87, 0xdd, 0xbd, 0xc1, 0x8f, 0xdb, 0x4b, 0x6c, 0x19, 0xea, 0xfd, 0xc3, + 0x61, 0x6f, 0xff, 0x19, 0xef, 0xb5, 0x0b, 0x9b, 0x3f, 0x82, 0x46, 0xfa, 0x50, 0x84, 0x1a, 0xf6, + 0xfa, 0x87, 0xdd, 0xf6, 0x12, 0x03, 0xa8, 0x0e, 0x7b, 0xfb, 0xbc, 0x87, 0x7a, 0x6b, 0x50, 0x1a, + 0x0e, 0x0f, 0xda, 0x45, 0xb4, 0xba, 0xbf, 0xbb, 0x7f, 0xd0, 0x6b, 0x97, 0x90, 0x7c, 0xfa, 0xe4, + 0xe8, 0xe1, 0xb0, 0x5d, 0xde, 0xfc, 0x08, 0xae, 0x2d, 0x3c, 0xa1, 0xd0, 0xe8, 0x83, 0x5d, 0xde, + 0x43, 0x4d, 0x4d, 0xa8, 0x1d, 0xf1, 0xfe, 0xf3, 0xdd, 0xa7, 0xbd, 0x76, 0x01, 0x05, 0x8f, 0x07, + 0xfb, 0x8f, 0x7a, 0xdd, 0x76, 0x71, 0xef, 0xe6, 0x97, 0x2f, 0xd7, 0x0a, 0x5f, 0xbd, 0x5c, 0x2b, + 0x7c, 0xfd, 0x72, 0xad, 0xf0, 0xd7, 0x97, 0x6b, 0x85, 0x2f, 0xbe, 0x5d, 0x5b, 0xfa, 0xea, 0xdb, + 0xb5, 0xa5, 0xaf, 0xbf, 0x5d, 0x5b, 0x3a, 0xae, 0xd2, 0x9f, 0x15, 0x1f, 0xfe, 0x33, 0x00, 0x00, + 0xff, 0xff, 0x92, 0xc4, 0x20, 0x2a, 0xec, 0x18, 0x00, 0x00, } func (m *Op) Marshal() (dAtA []byte, err error) { @@ -2657,6 +3147,48 @@ func (m *Op_Build) MarshalToSizedBuffer(dAtA []byte) (int, error) { } return len(dAtA) - i, nil } +func (m *Op_Merge) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Op_Merge) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Merge != nil { + { + size, err := m.Merge.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } + return len(dAtA) - i, nil +} +func (m *Op_Diff) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Op_Diff) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + if m.Diff != nil { + { + size, err := m.Diff.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x3a + } + return len(dAtA) - i, nil +} func (m *Platform) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2772,6 +3304,20 @@ func (m *ExecOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.Secretenv) > 0 { + for iNdEx := len(m.Secretenv) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Secretenv[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x2a + } + } if m.Security != 0 { i = encodeVarintOps(dAtA, i, uint64(m.Security)) i-- @@ -2831,6 +3377,27 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if len(m.CgroupParent) > 0 { + i -= len(m.CgroupParent) + copy(dAtA[i:], m.CgroupParent) + i = encodeVarintOps(dAtA, i, uint64(len(m.CgroupParent))) + i-- + dAtA[i] = 0x52 + } + if len(m.Ulimit) > 0 { + for iNdEx := len(m.Ulimit) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Ulimit[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x4a + } + } if len(m.Hostname) > 0 { i -= len(m.Hostname) copy(dAtA[i:], m.Hostname) @@ -2899,6 +3466,130 @@ func (m *Meta) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *HostIP) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.IP) > 0 { + i -= len(m.IP) + copy(dAtA[i:], m.IP) + i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) + i-- + dAtA[i] = 0x12 + } + if len(m.Host) > 0 { + i -= len(m.Host) + copy(dAtA[i:], m.Host) + i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *Ulimit) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Ulimit) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *Ulimit) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Hard != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Hard)) + i-- + dAtA[i] = 0x18 + } + if m.Soft != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Soft)) + i-- + dAtA[i] = 0x10 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintOps(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + +func (m *SecretEnv) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SecretEnv) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *SecretEnv) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Optional { + i-- + if m.Optional { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintOps(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.ID) > 0 { + i -= len(m.ID) + copy(dAtA[i:], m.ID) + i = encodeVarintOps(dAtA, i, uint64(len(m.ID))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *Mount) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -2970,6 +3661,20 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { i-- dAtA[i] = 0xa2 } + if m.TmpfsOpt != nil { + { + size, err := m.TmpfsOpt.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x1 + i-- + dAtA[i] = 0x9a + } if m.MountType != 0 { i = encodeVarintOps(dAtA, i, uint64(m.MountType)) i-- @@ -3012,6 +3717,34 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *TmpfsOpt) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TmpfsOpt) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *TmpfsOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Size_ != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Size_)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + func (m *CacheOpt) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3354,6 +4087,18 @@ func (m *OpMetadata) MarshalToSizedBuffer(dAtA []byte) (int, error) { _ = i var l int _ = l + if m.ProgressGroup != nil { + { + size, err := m.ProgressGroup.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x32 + } if len(m.Caps) > 0 { keysForCaps := make([]string, 0, len(m.Caps)) for k := range m.Caps { @@ -3735,6 +4480,53 @@ func (m *ExportCache) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *ProgressGroup) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ProgressGroup) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *ProgressGroup) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Weak { + i-- + if m.Weak { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i-- + dAtA[i] = 0x18 + } + if len(m.Name) > 0 { + i -= len(m.Name) + copy(dAtA[i:], m.Name) + i = encodeVarintOps(dAtA, i, uint64(len(m.Name))) + i-- + dAtA[i] = 0x12 + } + if len(m.Id) > 0 { + i -= len(m.Id) + copy(dAtA[i:], m.Id) + i = encodeVarintOps(dAtA, i, uint64(len(m.Id))) + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func (m *ProxyEnv) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -3898,43 +4690,6 @@ func (m *Definition) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } -func (m *HostIP) Marshal() (dAtA []byte, err error) { - size := m.Size() - dAtA = make([]byte, size) - n, err := m.MarshalToSizedBuffer(dAtA[:size]) - if err != nil { - return nil, err - } - return dAtA[:n], nil -} - -func (m *HostIP) MarshalTo(dAtA []byte) (int, error) { - size := m.Size() - return m.MarshalToSizedBuffer(dAtA[:size]) -} - -func (m *HostIP) MarshalToSizedBuffer(dAtA []byte) (int, error) { - i := len(dAtA) - _ = i - var l int - _ = l - if len(m.IP) > 0 { - i -= len(m.IP) - copy(dAtA[i:], m.IP) - i = encodeVarintOps(dAtA, i, uint64(len(m.IP))) - i-- - dAtA[i] = 0x12 - } - if len(m.Host) > 0 { - i -= len(m.Host) - copy(dAtA[i:], m.Host) - i = encodeVarintOps(dAtA, i, uint64(len(m.Host))) - i-- - dAtA[i] = 0xa - } - return len(dAtA) - i, nil -} - func (m *FileOp) Marshal() (dAtA []byte, err error) { size := m.Size() dAtA = make([]byte, size) @@ -4558,6 +5313,174 @@ func (m *NamedUserOpt) MarshalToSizedBuffer(dAtA []byte) (int, error) { return len(dAtA) - i, nil } +func (m *MergeInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MergeInput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MergeInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Input != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *MergeOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *MergeOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *MergeOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if len(m.Inputs) > 0 { + for iNdEx := len(m.Inputs) - 1; iNdEx >= 0; iNdEx-- { + { + size, err := m.Inputs[iNdEx].MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + } + return len(dAtA) - i, nil +} + +func (m *LowerDiffInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *LowerDiffInput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *LowerDiffInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Input != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *UpperDiffInput) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UpperDiffInput) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *UpperDiffInput) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Input != 0 { + i = encodeVarintOps(dAtA, i, uint64(m.Input)) + i-- + dAtA[i] = 0x8 + } + return len(dAtA) - i, nil +} + +func (m *DiffOp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalToSizedBuffer(dAtA[:size]) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DiffOp) MarshalTo(dAtA []byte) (int, error) { + size := m.Size() + return m.MarshalToSizedBuffer(dAtA[:size]) +} + +func (m *DiffOp) MarshalToSizedBuffer(dAtA []byte) (int, error) { + i := len(dAtA) + _ = i + var l int + _ = l + if m.Upper != nil { + { + size, err := m.Upper.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0x12 + } + if m.Lower != nil { + { + size, err := m.Lower.MarshalToSizedBuffer(dAtA[:i]) + if err != nil { + return 0, err + } + i -= size + i = encodeVarintOps(dAtA, i, uint64(size)) + } + i-- + dAtA[i] = 0xa + } + return len(dAtA) - i, nil +} + func encodeVarintOps(dAtA []byte, offset int, v uint64) int { offset -= sovOps(v) base := offset @@ -4643,6 +5566,30 @@ func (m *Op_Build) Size() (n int) { } return n } +func (m *Op_Merge) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Merge != nil { + l = m.Merge.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} +func (m *Op_Diff) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Diff != nil { + l = m.Diff.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} func (m *Platform) Size() (n int) { if m == nil { return 0 @@ -4712,6 +5659,12 @@ func (m *ExecOp) Size() (n int) { if m.Security != 0 { n += 1 + sovOps(uint64(m.Security)) } + if len(m.Secretenv) > 0 { + for _, e := range m.Secretenv { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } return n } @@ -4755,6 +5708,72 @@ func (m *Meta) Size() (n int) { if l > 0 { n += 1 + l + sovOps(uint64(l)) } + if len(m.Ulimit) > 0 { + for _, e := range m.Ulimit { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + l = len(m.CgroupParent) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *HostIP) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Host) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.IP) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + return n +} + +func (m *Ulimit) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Soft != 0 { + n += 1 + sovOps(uint64(m.Soft)) + } + if m.Hard != 0 { + n += 1 + sovOps(uint64(m.Hard)) + } + return n +} + +func (m *SecretEnv) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.ID) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Optional { + n += 2 + } return n } @@ -4784,6 +5803,10 @@ func (m *Mount) Size() (n int) { if m.MountType != 0 { n += 1 + sovOps(uint64(m.MountType)) } + if m.TmpfsOpt != nil { + l = m.TmpfsOpt.Size() + n += 2 + l + sovOps(uint64(l)) + } if m.CacheOpt != nil { l = m.CacheOpt.Size() n += 2 + l + sovOps(uint64(l)) @@ -4803,6 +5826,18 @@ func (m *Mount) Size() (n int) { return n } +func (m *TmpfsOpt) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Size_ != 0 { + n += 1 + sovOps(uint64(m.Size_)) + } + return n +} + func (m *CacheOpt) Size() (n int) { if m == nil { return 0 @@ -4968,6 +6003,10 @@ func (m *OpMetadata) Size() (n int) { n += mapEntrySize + 1 + sovOps(uint64(mapEntrySize)) } } + if m.ProgressGroup != nil { + l = m.ProgressGroup.Size() + n += 1 + l + sovOps(uint64(l)) + } return n } @@ -5093,6 +6132,26 @@ func (m *ExportCache) Size() (n int) { return n } +func (m *ProgressGroup) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + l = len(m.Id) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovOps(uint64(l)) + } + if m.Weak { + n += 2 + } + return n +} + func (m *ProxyEnv) Size() (n int) { if m == nil { return 0 @@ -5165,23 +6224,6 @@ func (m *Definition) Size() (n int) { return n } -func (m *HostIP) Size() (n int) { - if m == nil { - return 0 - } - var l int - _ = l - l = len(m.Host) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - l = len(m.IP) - if l > 0 { - n += 1 + l + sovOps(uint64(l)) - } - return n -} - func (m *FileOp) Size() (n int) { if m == nil { return 0 @@ -5461,6 +6503,74 @@ func (m *NamedUserOpt) Size() (n int) { return n } +func (m *MergeInput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func (m *MergeOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if len(m.Inputs) > 0 { + for _, e := range m.Inputs { + l = e.Size() + n += 1 + l + sovOps(uint64(l)) + } + } + return n +} + +func (m *LowerDiffInput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func (m *UpperDiffInput) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Input != 0 { + n += 1 + sovOps(uint64(m.Input)) + } + return n +} + +func (m *DiffOp) Size() (n int) { + if m == nil { + return 0 + } + var l int + _ = l + if m.Lower != nil { + l = m.Lower.Size() + n += 1 + l + sovOps(uint64(l)) + } + if m.Upper != nil { + l = m.Upper.Size() + n += 1 + l + sovOps(uint64(l)) + } + return n +} + func sovOps(x uint64) (n int) { return (math_bits.Len64(x|1) + 6) / 7 } @@ -5670,6 +6780,76 @@ func (m *Op) Unmarshal(dAtA []byte) error { } m.Op = &Op_Build{v} iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Merge", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &MergeOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Merge{v} + iNdEx = postIndex + case 7: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Diff", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &DiffOp{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Op = &Op_Diff{v} + iNdEx = postIndex case 10: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field Platform", wireType) @@ -6211,6 +7391,40 @@ func (m *ExecOp) Unmarshal(dAtA []byte) error { break } } + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Secretenv", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Secretenv = append(m.Secretenv, &SecretEnv{}) + if err := m.Secretenv[len(m.Secretenv)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -6491,6 +7705,440 @@ func (m *Meta) Unmarshal(dAtA []byte) error { } m.Hostname = string(dAtA[iNdEx:postIndex]) iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Ulimit", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Ulimit = append(m.Ulimit, &Ulimit{}) + if err := m.Ulimit[len(m.Ulimit)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field CgroupParent", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.CgroupParent = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HostIP) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HostIP: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Host = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.IP = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Ulimit) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Ulimit: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Ulimit: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Soft", wireType) + } + m.Soft = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Soft |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Hard", wireType) + } + m.Hard = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Hard |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *SecretEnv) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SecretEnv: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SecretEnv: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ID", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ID = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Optional", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Optional = bool(v != 0) default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -6682,6 +8330,42 @@ func (m *Mount) Unmarshal(dAtA []byte) error { break } } + case 19: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TmpfsOpt", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.TmpfsOpt == nil { + m.TmpfsOpt = &TmpfsOpt{} + } + if err := m.TmpfsOpt.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex case 20: if wireType != 2 { return fmt.Errorf("proto: wrong wireType = %d for field CacheOpt", wireType) @@ -6843,6 +8527,75 @@ func (m *Mount) Unmarshal(dAtA []byte) error { } return nil } +func (m *TmpfsOpt) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TmpfsOpt: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TmpfsOpt: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Size_", wireType) + } + m.Size_ = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Size_ |= int64(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *CacheOpt) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -8228,6 +9981,42 @@ func (m *OpMetadata) Unmarshal(dAtA []byte) error { } m.Caps[github_com_moby_buildkit_util_apicaps.CapID(mapkey)] = mapvalue iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ProgressGroup", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.ProgressGroup == nil { + m.ProgressGroup = &ProgressGroup{} + } + if err := m.ProgressGroup.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex default: iNdEx = preIndex skippy, err := skipOps(dAtA[iNdEx:]) @@ -9075,6 +10864,140 @@ func (m *ExportCache) Unmarshal(dAtA []byte) error { } return nil } +func (m *ProgressGroup) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ProgressGroup: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ProgressGroup: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Id", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Id = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + intStringLen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Weak", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + m.Weak = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func (m *ProxyEnv) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -9614,120 +11537,6 @@ func (m *Definition) Unmarshal(dAtA []byte) error { } return nil } -func (m *HostIP) Unmarshal(dAtA []byte) error { - l := len(dAtA) - iNdEx := 0 - for iNdEx < l { - preIndex := iNdEx - var wire uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - wire |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - fieldNum := int32(wire >> 3) - wireType := int(wire & 0x7) - if wireType == 4 { - return fmt.Errorf("proto: HostIP: wiretype end group for non-group") - } - if fieldNum <= 0 { - return fmt.Errorf("proto: HostIP: illegal tag %d (wire type %d)", fieldNum, wire) - } - switch fieldNum { - case 1: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field Host", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.Host = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - case 2: - if wireType != 2 { - return fmt.Errorf("proto: wrong wireType = %d for field IP", wireType) - } - var stringLen uint64 - for shift := uint(0); ; shift += 7 { - if shift >= 64 { - return ErrIntOverflowOps - } - if iNdEx >= l { - return io.ErrUnexpectedEOF - } - b := dAtA[iNdEx] - iNdEx++ - stringLen |= uint64(b&0x7F) << shift - if b < 0x80 { - break - } - } - intStringLen := int(stringLen) - if intStringLen < 0 { - return ErrInvalidLengthOps - } - postIndex := iNdEx + intStringLen - if postIndex < 0 { - return ErrInvalidLengthOps - } - if postIndex > l { - return io.ErrUnexpectedEOF - } - m.IP = string(dAtA[iNdEx:postIndex]) - iNdEx = postIndex - default: - iNdEx = preIndex - skippy, err := skipOps(dAtA[iNdEx:]) - if err != nil { - return err - } - if (skippy < 0) || (iNdEx+skippy) < 0 { - return ErrInvalidLengthOps - } - if (iNdEx + skippy) > l { - return io.ErrUnexpectedEOF - } - iNdEx += skippy - } - } - - if iNdEx > l { - return io.ErrUnexpectedEOF - } - return nil -} func (m *FileOp) Unmarshal(dAtA []byte) error { l := len(dAtA) iNdEx := 0 @@ -11247,6 +13056,419 @@ func (m *NamedUserOpt) Unmarshal(dAtA []byte) error { } return nil } +func (m *MergeInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MergeInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MergeInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *MergeOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: MergeOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: MergeOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Inputs", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Inputs = append(m.Inputs, &MergeInput{}) + if err := m.Inputs[len(m.Inputs)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *LowerDiffInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: LowerDiffInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: LowerDiffInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UpperDiffInput) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UpperDiffInput: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UpperDiffInput: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Input", wireType) + } + m.Input = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Input |= InputIndex(b&0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *DiffOp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= uint64(b&0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DiffOp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DiffOp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Lower", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Lower == nil { + m.Lower = &LowerDiffInput{} + } + if err := m.Lower.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Upper", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowOps + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= int(b&0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthOps + } + postIndex := iNdEx + msglen + if postIndex < 0 { + return ErrInvalidLengthOps + } + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Upper == nil { + m.Upper = &UpperDiffInput{} + } + if err := m.Upper.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipOps(dAtA[iNdEx:]) + if err != nil { + return err + } + if (skippy < 0) || (iNdEx+skippy) < 0 { + return ErrInvalidLengthOps + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} func skipOps(dAtA []byte) (n int, err error) { l := len(dAtA) iNdEx := 0 diff --git a/vendor/github.com/moby/buildkit/solver/pb/ops.proto b/vendor/github.com/moby/buildkit/solver/pb/ops.proto index 39983e0eee..d1e30068df 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/ops.proto +++ b/vendor/github.com/moby/buildkit/solver/pb/ops.proto @@ -17,6 +17,8 @@ message Op { SourceOp source = 3; FileOp file = 4; BuildOp build = 5; + MergeOp merge = 6; + DiffOp diff = 7; } Platform platform = 10; WorkerConstraints constraints = 11; @@ -45,6 +47,7 @@ message ExecOp { repeated Mount mounts = 2; NetMode network = 3; SecurityMode security = 4; + repeated SecretEnv secretenv = 5; } // Meta is a set of arguments for ExecOp. @@ -58,6 +61,19 @@ message Meta { ProxyEnv proxy_env = 5; repeated HostIP extraHosts = 6; string hostname = 7; + repeated Ulimit ulimit = 9; + string cgroupParent = 10; +} + +message HostIP { + string Host = 1; + string IP = 2; +} + +message Ulimit { + string Name = 1; + int64 Soft = 2; + int64 Hard = 3; } enum NetMode { @@ -71,6 +87,13 @@ enum SecurityMode { INSECURE = 1; // privileged mode } +// SecretEnv is an environment variable that is backed by a secret. +message SecretEnv { + string ID = 1; + string name = 2; + bool optional = 3; +} + // Mount specifies how to mount an input Op as a filesystem. message Mount { int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; @@ -79,6 +102,7 @@ message Mount { int64 output = 4 [(gogoproto.customtype) = "OutputIndex", (gogoproto.nullable) = false]; bool readonly = 5; MountType mountType = 6; + TmpfsOpt TmpfsOpt = 19; CacheOpt cacheOpt = 20; SecretOpt secretOpt = 21; SSHOpt SSHOpt = 22; @@ -94,11 +118,17 @@ enum MountType { TMPFS = 4; } +// TmpfsOpt defines options describing tpmfs mounts +message TmpfsOpt { + // Specify an upper limit on the size of the filesystem. + int64 size = 1; +} + // CacheOpt defines options specific to cache mounts message CacheOpt { // ID is an optional namespace for the mount string ID = 1; - // Sharing is the sharing mode for the mount + // Sharing is the sharing mode for the mount CacheSharingOpt sharing = 2; } @@ -171,12 +201,14 @@ message OpMetadata { // ignore_cache specifies to ignore the cache for this Op. bool ignore_cache = 1; // Description can be used for keeping any text fields that builder doesn't parse - map description = 2; + map description = 2; // index 3 reserved for WorkerConstraint in previous versions // WorkerConstraint worker_constraint = 3; ExportCache export_cache = 4; - + map caps = 5 [(gogoproto.castkey) = "github.com/moby/buildkit/util/apicaps.CapID", (gogoproto.nullable) = false]; + + ProgressGroup progress_group = 6; } // Source is a source mapping description for a file @@ -192,7 +224,7 @@ message Locations { // Source info contains the shared metadata of a source mapping message SourceInfo { - string filename = 1; + string filename = 1; bytes data = 2; Definition definition = 3; } @@ -219,6 +251,12 @@ message ExportCache { bool Value = 1; } +message ProgressGroup { + string id = 1; + string name = 2; + bool weak = 3; +} + message ProxyEnv { string http_proxy = 1; string https_proxy = 2; @@ -243,11 +281,6 @@ message Definition { Source Source = 3; } -message HostIP { - string Host = 1; - string IP = 2; -} - message FileOp { repeated FileAction actions = 2; } @@ -348,3 +381,24 @@ message NamedUserOpt { string name = 1; int64 input = 2 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; } + +message MergeInput { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; +} + +message MergeOp { + repeated MergeInput inputs = 1; +} + +message LowerDiffInput { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; +} + +message UpperDiffInput { + int64 input = 1 [(gogoproto.customtype) = "InputIndex", (gogoproto.nullable) = false]; +} + +message DiffOp { + LowerDiffInput lower = 1; + UpperDiffInput upper = 2; +} diff --git a/vendor/github.com/moby/buildkit/solver/pb/platform.go b/vendor/github.com/moby/buildkit/solver/pb/platform.go index a434aa7168..0fd48a07d4 100644 --- a/vendor/github.com/moby/buildkit/solver/pb/platform.go +++ b/vendor/github.com/moby/buildkit/solver/pb/platform.go @@ -1,11 +1,11 @@ package pb import ( - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) -func (p *Platform) Spec() specs.Platform { - return specs.Platform{ +func (p *Platform) Spec() ocispecs.Platform { + return ocispecs.Platform{ OS: p.OS, Architecture: p.Architecture, Variant: p.Variant, @@ -14,7 +14,7 @@ func (p *Platform) Spec() specs.Platform { } } -func PlatformFromSpec(p specs.Platform) Platform { +func PlatformFromSpec(p ocispecs.Platform) Platform { return Platform{ OS: p.OS, Architecture: p.Architecture, @@ -24,15 +24,15 @@ func PlatformFromSpec(p specs.Platform) Platform { } } -func ToSpecPlatforms(p []Platform) []specs.Platform { - out := make([]specs.Platform, 0, len(p)) +func ToSpecPlatforms(p []Platform) []ocispecs.Platform { + out := make([]ocispecs.Platform, 0, len(p)) for _, pp := range p { out = append(out, pp.Spec()) } return out } -func PlatformsFromSpec(p []specs.Platform) []Platform { +func PlatformsFromSpec(p []ocispecs.Platform) []Platform { out := make([]Platform, 0, len(p)) for _, pp := range p { out = append(out, PlatformFromSpec(pp)) diff --git a/vendor/github.com/moby/buildkit/solver/progress.go b/vendor/github.com/moby/buildkit/solver/progress.go index 0548ee3bc9..6e54349671 100644 --- a/vendor/github.com/moby/buildkit/solver/progress.go +++ b/vendor/github.com/moby/buildkit/solver/progress.go @@ -5,10 +5,11 @@ import ( "io" "time" + "github.com/moby/buildkit/util/bklog" + "github.com/moby/buildkit/client" "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" - "github.com/sirupsen/logrus" ) func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { @@ -38,7 +39,7 @@ func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { case progress.Status: vtx, ok := p.Meta("vertex") if !ok { - logrus.Warnf("progress %s status without vertex info", p.ID) + bklog.G(ctx).Warnf("progress %s status without vertex info", p.ID) continue } vs := &client.VertexStatus{ @@ -55,12 +56,20 @@ func (j *Job) Status(ctx context.Context, ch chan *client.SolveStatus) error { case client.VertexLog: vtx, ok := p.Meta("vertex") if !ok { - logrus.Warnf("progress %s log without vertex info", p.ID) + bklog.G(ctx).Warnf("progress %s log without vertex info", p.ID) continue } v.Vertex = vtx.(digest.Digest) v.Timestamp = p.Timestamp ss.Logs = append(ss.Logs, &v) + case client.VertexWarning: + vtx, ok := p.Meta("vertex") + if !ok { + bklog.G(ctx).Warnf("progress %s warning without vertex info", p.ID) + continue + } + v.Vertex = vtx.(digest.Digest) + ss.Warnings = append(ss.Warnings, &v) } } select { diff --git a/vendor/github.com/moby/buildkit/solver/result.go b/vendor/github.com/moby/buildkit/solver/result.go index b35c74b49e..81766a30f4 100644 --- a/vendor/github.com/moby/buildkit/solver/result.go +++ b/vendor/github.com/moby/buildkit/solver/result.go @@ -5,8 +5,9 @@ import ( "sync" "sync/atomic" + "github.com/moby/buildkit/util/bklog" + "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) // SharedResult is a result that can be cloned @@ -48,7 +49,7 @@ type splitResult struct { func (r *splitResult) Release(ctx context.Context) error { if atomic.AddInt64(&r.released, 1) > 1 { err := errors.Errorf("releasing already released reference %+v", r.Result.ID()) - logrus.Error(err) + bklog.G(ctx).Error(err) return err } if atomic.AddInt64(r.sem, 1) == 2 { diff --git a/vendor/github.com/moby/buildkit/solver/scheduler.go b/vendor/github.com/moby/buildkit/solver/scheduler.go index b90f899f1b..d617cd912c 100644 --- a/vendor/github.com/moby/buildkit/solver/scheduler.go +++ b/vendor/github.com/moby/buildkit/solver/scheduler.go @@ -6,9 +6,9 @@ import ( "sync" "github.com/moby/buildkit/solver/internal/pipe" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/cond" "github.com/pkg/errors" - "github.com/sirupsen/logrus" ) var debugScheduler = false // TODO: replace with logs in build trace @@ -138,7 +138,6 @@ func (s *scheduler) dispatch(e *edge) { debugSchedulerPostUnpark(e, inc) } -postUnpark: // set up new requests that didn't complete/were added by this run openIncoming := make([]*edgePipe, 0, len(inc)) for _, r := range s.incoming[e] { @@ -171,9 +170,9 @@ postUnpark: origEdge := e.index.LoadOrStore(k, e) if origEdge != nil { if e.isDep(origEdge) || origEdge.isDep(e) { - logrus.Debugf("skip merge due to dependency") + bklog.G(context.TODO()).Debugf("skip merge due to dependency") } else { - logrus.Debugf("merging edge %s to %s\n", e.edge.Vertex.Name(), origEdge.edge.Vertex.Name()) + bklog.G(context.TODO()).Debugf("merging edge %s to %s\n", e.edge.Vertex.Name(), origEdge.edge.Vertex.Name()) if s.mergeTo(origEdge, e) { s.ef.setEdge(e.edge, origEdge) } @@ -189,11 +188,9 @@ postUnpark: // unpark(), not for any external input. if len(openIncoming) > 0 && len(openOutgoing) == 0 { e.markFailed(pf, errors.New("buildkit scheduler error: return leaving incoming open. Please report this with BUILDKIT_SCHEDULER_DEBUG=1")) - goto postUnpark } if len(openIncoming) == 0 && len(openOutgoing) > 0 { e.markFailed(pf, errors.New("buildkit scheduler error: return leaving outgoing open. Please report this with BUILDKIT_SCHEDULER_DEBUG=1")) - goto postUnpark } } @@ -351,11 +348,13 @@ type pipeFactory struct { func (pf *pipeFactory) NewInputRequest(ee Edge, req *edgeRequest) pipe.Receiver { target := pf.s.ef.getEdge(ee) if target == nil { - panic("failed to get edge") // TODO: return errored pipe + return pf.NewFuncRequest(func(_ context.Context) (interface{}, error) { + return nil, errors.Errorf("failed to get edge: inconsistent graph state") + }) } p := pf.s.newPipe(target, pf.e, pipe.Request{Payload: req}) if debugScheduler { - logrus.Debugf("> newPipe %s %p desiredState=%s", ee.Vertex.Name(), p, req.desiredState) + bklog.G(context.TODO()).Debugf("> newPipe %s %p desiredState=%s", ee.Vertex.Name(), p, req.desiredState) } return p.Receiver } @@ -363,32 +362,34 @@ func (pf *pipeFactory) NewInputRequest(ee Edge, req *edgeRequest) pipe.Receiver func (pf *pipeFactory) NewFuncRequest(f func(context.Context) (interface{}, error)) pipe.Receiver { p := pf.s.newRequestWithFunc(pf.e, f) if debugScheduler { - logrus.Debugf("> newFunc %p", p) + bklog.G(context.TODO()).Debugf("> newFunc %p", p) } return p } func debugSchedulerPreUnpark(e *edge, inc []pipe.Sender, updates, allPipes []pipe.Receiver) { - logrus.Debugf(">> unpark %s req=%d upt=%d out=%d state=%s %s", e.edge.Vertex.Name(), len(inc), len(updates), len(allPipes), e.state, e.edge.Vertex.Digest()) + log := bklog.G(context.TODO()) + + log.Debugf(">> unpark %s req=%d upt=%d out=%d state=%s %s", e.edge.Vertex.Name(), len(inc), len(updates), len(allPipes), e.state, e.edge.Vertex.Digest()) for i, dep := range e.deps { des := edgeStatusInitial if dep.req != nil { des = dep.req.Request().(*edgeRequest).desiredState } - logrus.Debugf(":: dep%d %s state=%s des=%s keys=%d hasslowcache=%v preprocessfunc=%v", i, e.edge.Vertex.Inputs()[i].Vertex.Name(), dep.state, des, len(dep.keys), e.slowCacheFunc(dep) != nil, e.preprocessFunc(dep) != nil) + log.Debugf(":: dep%d %s state=%s des=%s keys=%d hasslowcache=%v preprocessfunc=%v", i, e.edge.Vertex.Inputs()[i].Vertex.Name(), dep.state, des, len(dep.keys), e.slowCacheFunc(dep) != nil, e.preprocessFunc(dep) != nil) } for i, in := range inc { req := in.Request() - logrus.Debugf("> incoming-%d: %p dstate=%s canceled=%v", i, in, req.Payload.(*edgeRequest).desiredState, req.Canceled) + log.Debugf("> incoming-%d: %p dstate=%s canceled=%v", i, in, req.Payload.(*edgeRequest).desiredState, req.Canceled) } for i, up := range updates { if up == e.cacheMapReq { - logrus.Debugf("> update-%d: %p cacheMapReq complete=%v", i, up, up.Status().Completed) + log.Debugf("> update-%d: %p cacheMapReq complete=%v", i, up, up.Status().Completed) } else if up == e.execReq { - logrus.Debugf("> update-%d: %p execReq complete=%v", i, up, up.Status().Completed) + log.Debugf("> update-%d: %p execReq complete=%v", i, up, up.Status().Completed) } else { st, ok := up.Status().Value.(*edgeState) if ok { @@ -396,17 +397,18 @@ func debugSchedulerPreUnpark(e *edge, inc []pipe.Sender, updates, allPipes []pip if dep, ok := e.depRequests[up]; ok { index = int(dep.index) } - logrus.Debugf("> update-%d: %p input-%d keys=%d state=%s", i, up, index, len(st.keys), st.state) + log.Debugf("> update-%d: %p input-%d keys=%d state=%s", i, up, index, len(st.keys), st.state) } else { - logrus.Debugf("> update-%d: unknown", i) + log.Debugf("> update-%d: unknown", i) } } } } func debugSchedulerPostUnpark(e *edge, inc []pipe.Sender) { + log := bklog.G(context.TODO()) for i, in := range inc { - logrus.Debugf("< incoming-%d: %p completed=%v", i, in, in.Status().Completed) + log.Debugf("< incoming-%d: %p completed=%v", i, in, in.Status().Completed) } - logrus.Debugf("<< unpark %s\n", e.edge.Vertex.Name()) + log.Debugf("<< unpark %s\n", e.edge.Vertex.Name()) } diff --git a/vendor/github.com/moby/buildkit/solver/types.go b/vendor/github.com/moby/buildkit/solver/types.go index a87760692e..a20c1020f2 100644 --- a/vendor/github.com/moby/buildkit/solver/types.go +++ b/vendor/github.com/moby/buildkit/solver/types.go @@ -7,8 +7,9 @@ import ( "github.com/containerd/containerd/content" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/compression" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) // Vertex is a node in a build graph. It defines an interface for a @@ -54,6 +55,7 @@ type VertexOptions struct { Description map[string]string // text values with no special meaning for solver ExportCache *bool // WorkerConstraint + ProgressGroup *pb.ProgressGroup } // Result is an abstract return value for a solve @@ -74,6 +76,7 @@ type ResultProxy interface { Result(context.Context) (CachedResult, error) Release(context.Context) error Definition() *pb.Definition + BuildSources() BuildSources } // CacheExportMode is the type for setting cache exporting modes @@ -92,12 +95,15 @@ const ( // CacheExportOpt defines options for exporting build cache type CacheExportOpt struct { - // Convert can convert a build result to transferable object - Convert func(context.Context, Result) (*Remote, error) + // ResolveRemotes can convert a build result to transferable objects + ResolveRemotes func(context.Context, Result) ([]*Remote, error) // Mode defines a cache export algorithm Mode CacheExportMode // Session is the session group to client (for auth credentials etc) Session session.Group + // CompressionOpt is an option to specify the compression of the object to load. + // If specified, all objects that meet the option will be cached. + CompressionOpt *compression.Config } // CacheExporter can export the artifacts of the build chain @@ -122,7 +128,7 @@ type CacheExporterRecord interface { // from a content provider // TODO: add closer to keep referenced data from getting deleted type Remote struct { - Descriptors []ocispec.Descriptor + Descriptors []ocispecs.Descriptor Provider content.Provider } @@ -190,8 +196,15 @@ type CacheMap struct { // such as oci descriptor content providers and progress writers to be passed to // the cache. Opts should not have any impact on the computed cache key. Opts CacheOpts + + // BuildSources contains build dependencies that will be set from source + // operation. + BuildSources BuildSources } +// BuildSources contains solved build dependencies. +type BuildSources map[string]string + // ExportableCacheKey is a cache key connected with an exporter that can export // a chain of cacherecords pointing to that key type ExportableCacheKey struct { diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource.go b/vendor/github.com/moby/buildkit/source/git/gitsource.go index 08125c58e5..9169992f7a 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource.go @@ -18,7 +18,6 @@ import ( "strings" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/session" @@ -27,24 +26,24 @@ import ( "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/source" + srctypes "github.com/moby/buildkit/source/types" + "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/progress/logs" + "github.com/moby/buildkit/util/urlutil" "github.com/moby/locker" "github.com/pkg/errors" - "github.com/sirupsen/logrus" - bolt "go.etcd.io/bbolt" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) var validHex = regexp.MustCompile(`^[a-f0-9]{40}$`) +var defaultBranch = regexp.MustCompile(`refs/heads/(\S+)`) type Opt struct { CacheAccessor cache.Accessor - MetadataStore *metadata.Store } type gitSource struct { - md *metadata.Store cache cache.Accessor locker *locker.Locker } @@ -59,7 +58,6 @@ func Supported() error { func NewSource(opt Opt) (source.Source, error) { gs := &gitSource{ - md: opt.MetadataStore, cache: opt.CacheAccessor, locker: locker.New(), } @@ -67,16 +65,14 @@ func NewSource(opt Opt) (source.Source, error) { } func (gs *gitSource) ID() string { - return source.GitScheme + return srctypes.GitScheme } // needs to be called with repo lock func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []string, g session.Group) (target string, release func(), retErr error) { - remoteKey := "git-remote::" + remote - - sis, err := gs.md.Search(remoteKey) + sis, err := searchGitRemote(ctx, gs.cache, remote) if err != nil { - return "", nil, errors.Wrapf(err, "failed to search metadata for %s", redactCredentials(remote)) + return "", nil, errors.Wrapf(err, "failed to search metadata for %s", urlutil.RedactCredentials(remote)) } var remoteRef cache.MutableRef @@ -85,19 +81,19 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri if err != nil { if errors.Is(err, cache.ErrLocked) { // should never really happen as no other function should access this metadata, but lets be graceful - logrus.Warnf("mutable ref for %s %s was locked: %v", redactCredentials(remote), si.ID(), err) + bklog.G(ctx).Warnf("mutable ref for %s %s was locked: %v", urlutil.RedactCredentials(remote), si.ID(), err) continue } - return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", redactCredentials(remote)) + return "", nil, errors.Wrapf(err, "failed to get mutable ref for %s", urlutil.RedactCredentials(remote)) } break } initializeRepo := false if remoteRef == nil { - remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", redactCredentials(remote)))) + remoteRef, err = gs.cache.New(ctx, nil, g, cache.CachePolicyRetain, cache.WithDescription(fmt.Sprintf("shared git repo for %s", urlutil.RedactCredentials(remote)))) if err != nil { - return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", redactCredentials(remote)) + return "", nil, errors.Wrapf(err, "failed to create new mutable for %s", urlutil.RedactCredentials(remote)) } initializeRepo = true } @@ -138,17 +134,9 @@ func (gs *gitSource) mountRemote(ctx context.Context, remote string, auth []stri return "", nil, errors.Wrapf(err, "failed add origin repo at %s", dir) } - // same new remote metadata - si, _ := gs.md.Get(remoteRef.ID()) - v, err := metadata.NewValue(remoteKey) - if err != nil { - return "", nil, err - } - v.Index = remoteKey - - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, "git-remote", v) - }); err != nil { + // save new remote metadata + md := cacheRefMetadata{remoteRef} + if err := md.setGitRemote(remote); err != nil { return "", nil, err } } @@ -305,26 +293,22 @@ func (gs *gitSourceHandler) mountKnownHosts(ctx context.Context) (string, func() return knownHosts.Name(), cleanup, nil } -func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { +func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) { remote := gs.src.Remote - ref := gs.src.Ref - if ref == "" { - ref = "master" - } gs.locker.Lock(remote) defer gs.locker.Unlock(remote) - if isCommitSHA(ref) { - ref = gs.shaToCacheKey(ref) - gs.cacheKey = ref - return ref, nil, true, nil + if ref := gs.src.Ref; ref != "" && isCommitSHA(ref) { + cacheKey := gs.shaToCacheKey(ref) + gs.cacheKey = cacheKey + return cacheKey, ref, nil, true, nil } gs.getAuthToken(ctx, g) gitDir, unmountGitDir, err := gs.mountRemote(ctx, remote, gs.auth, g) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } defer unmountGitDir() @@ -333,7 +317,7 @@ func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index var unmountSock func() error sock, unmountSock, err = gs.mountSSHAuthSock(ctx, gs.src.MountSSHSock, g) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } defer unmountSock() } @@ -343,42 +327,45 @@ func (gs *gitSourceHandler) CacheKey(ctx context.Context, g session.Group, index var unmountKnownHosts func() error knownHosts, unmountKnownHosts, err = gs.mountKnownHosts(ctx) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } defer unmountKnownHosts() } + ref := gs.src.Ref + if ref == "" { + ref, err = getDefaultBranch(ctx, gitDir, "", sock, knownHosts, gs.auth, gs.src.Remote) + if err != nil { + return "", "", nil, false, err + } + } + // TODO: should we assume that remote tag is immutable? add a timer? buf, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "ls-remote", "origin", ref) if err != nil { - return "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", redactCredentials(remote)) + return "", "", nil, false, errors.Wrapf(err, "failed to fetch remote %s", urlutil.RedactCredentials(remote)) } out := buf.String() idx := strings.Index(out, "\t") if idx == -1 { - return "", nil, false, errors.Errorf("repository does not contain ref %s, output: %q", ref, string(out)) + return "", "", nil, false, errors.Errorf("repository does not contain ref %s, output: %q", ref, string(out)) } sha := string(out[:idx]) if !isCommitSHA(sha) { - return "", nil, false, errors.Errorf("invalid commit sha %q", sha) + return "", "", nil, false, errors.Errorf("invalid commit sha %q", sha) } - sha = gs.shaToCacheKey(sha) - gs.cacheKey = sha - return sha, nil, true, nil + cacheKey := gs.shaToCacheKey(sha) + gs.cacheKey = cacheKey + return cacheKey, sha, nil, true, nil } func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out cache.ImmutableRef, retErr error) { - ref := gs.src.Ref - if ref == "" { - ref = "master" - } - cacheKey := gs.cacheKey if cacheKey == "" { var err error - cacheKey, _, _, err = gs.CacheKey(ctx, g, 0) + cacheKey, _, _, _, err = gs.CacheKey(ctx, g, 0) if err != nil { return nil, err } @@ -386,16 +373,16 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out gs.getAuthToken(ctx, g) - snapshotKey := "git-snapshot::" + cacheKey + ":" + gs.src.Subdir + snapshotKey := cacheKey + ":" + gs.src.Subdir gs.locker.Lock(snapshotKey) defer gs.locker.Unlock(snapshotKey) - sis, err := gs.md.Search(snapshotKey) + sis, err := searchGitSnapshot(ctx, gs.cache, snapshotKey) if err != nil { return nil, errors.Wrapf(err, "failed to search metadata for %s", snapshotKey) } if len(sis) > 0 { - return gs.cache.Get(ctx, sis[0].ID()) + return gs.cache.Get(ctx, sis[0].ID(), nil) } gs.locker.Lock(gs.src.Remote) @@ -426,6 +413,14 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out defer unmountKnownHosts() } + ref := gs.src.Ref + if ref == "" { + ref, err = getDefaultBranch(ctx, gitDir, "", sock, knownHosts, gs.auth, gs.src.Remote) + if err != nil { + return nil, err + } + } + doFetch := true if isCommitSHA(ref) { // skip fetch if commit already exists @@ -454,13 +449,17 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out // TODO: is there a better way to do this? } if _, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, args...); err != nil { - return nil, errors.Wrapf(err, "failed to fetch remote %s", redactCredentials(gs.src.Remote)) + return nil, errors.Wrapf(err, "failed to fetch remote %s", urlutil.RedactCredentials(gs.src.Remote)) + } + _, err = gitWithinDir(ctx, gitDir, "", sock, knownHosts, nil, "reflog", "expire", "--all", "--expire=now") + if err != nil { + return nil, errors.Wrapf(err, "failed to expire reflog for remote %s", urlutil.RedactCredentials(gs.src.Remote)) } } checkoutRef, err := gs.cache.New(ctx, nil, g, cache.WithRecordType(client.UsageRecordTypeGitCheckout), cache.WithDescription(fmt.Sprintf("git snapshot for %s#%s", gs.src.Remote, ref))) if err != nil { - return nil, errors.Wrapf(err, "failed to create new mutable for %s", redactCredentials(gs.src.Remote)) + return nil, errors.Wrapf(err, "failed to create new mutable for %s", urlutil.RedactCredentials(gs.src.Remote)) } defer func() { @@ -502,8 +501,17 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out if err != nil { return nil, err } + + gitCatFileBuf, err := gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "cat-file", "-t", ref) + if err != nil { + return nil, err + } + isAnnotatedTag := strings.TrimSpace(gitCatFileBuf.String()) == "tag" + pullref := ref - if isCommitSHA(ref) { + if isAnnotatedTag { + pullref += ":refs/tags/" + pullref + } else if isCommitSHA(ref) { pullref = "refs/buildkit/" + identity.NewID() _, err = gitWithinDir(ctx, gitDir, "", sock, knownHosts, gs.auth, "update-ref", pullref, ref) if err != nil { @@ -518,7 +526,18 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out } _, err = gitWithinDir(ctx, checkoutDirGit, checkoutDir, sock, knownHosts, nil, "checkout", "FETCH_HEAD") if err != nil { - return nil, errors.Wrapf(err, "failed to checkout remote %s", redactCredentials(gs.src.Remote)) + return nil, errors.Wrapf(err, "failed to checkout remote %s", urlutil.RedactCredentials(gs.src.Remote)) + } + _, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "remote", "set-url", "origin", urlutil.RedactCredentials(gs.src.Remote)) + if err != nil { + return nil, errors.Wrapf(err, "failed to set remote origin to %s", urlutil.RedactCredentials(gs.src.Remote)) + } + _, err = gitWithinDir(ctx, checkoutDirGit, "", sock, knownHosts, nil, "reflog", "expire", "--all", "--expire=now") + if err != nil { + return nil, errors.Wrapf(err, "failed to expire reflog for remote %s", urlutil.RedactCredentials(gs.src.Remote)) + } + if err := os.Remove(filepath.Join(checkoutDirGit, "FETCH_HEAD")); err != nil && !errors.Is(err, os.ErrNotExist) { + return nil, errors.Wrapf(err, "failed to remove FETCH_HEAD for remote %s", urlutil.RedactCredentials(gs.src.Remote)) } gitDir = checkoutDirGit } else { @@ -531,7 +550,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out } _, err = gitWithinDir(ctx, gitDir, cd, sock, knownHosts, nil, "checkout", ref, "--", ".") if err != nil { - return nil, errors.Wrapf(err, "failed to checkout remote %s", redactCredentials(gs.src.Remote)) + return nil, errors.Wrapf(err, "failed to checkout remote %s", urlutil.RedactCredentials(gs.src.Remote)) } if subdir != "." { d, err := os.Open(filepath.Join(cd, subdir)) @@ -564,7 +583,7 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out _, err = gitWithinDir(ctx, gitDir, checkoutDir, sock, knownHosts, gs.auth, "submodule", "update", "--init", "--recursive", "--depth=1") if err != nil { - return nil, errors.Wrapf(err, "failed to update submodules for %s", redactCredentials(gs.src.Remote)) + return nil, errors.Wrapf(err, "failed to update submodules for %s", urlutil.RedactCredentials(gs.src.Remote)) } if idmap := mount.IdentityMapping(); idmap != nil { @@ -592,19 +611,10 @@ func (gs *gitSourceHandler) Snapshot(ctx context.Context, g session.Group) (out } }() - si, _ := gs.md.Get(snap.ID()) - v, err := metadata.NewValue(snapshotKey) - if err != nil { + md := cacheRefMetadata{snap} + if err := md.setGitSnapshot(snapshotKey); err != nil { return nil, err } - v.Index = snapshotKey - - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, "git-snapshot", v) - }); err != nil { - return nil, err - } - return snap, nil } @@ -630,11 +640,16 @@ func getGitSSHCommand(knownHosts string) string { return gitSSHCommand } -func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...string) (*bytes.Buffer, error) { +func git(ctx context.Context, dir, sshAuthSock, knownHosts string, args ...string) (_ *bytes.Buffer, err error) { for { - stdout, stderr := logs.NewLogStreams(ctx, false) + stdout, stderr, flush := logs.NewLogStreams(ctx, false) defer stdout.Close() defer stderr.Close() + defer func() { + if err != nil { + flush() + } + }() cmd := exec.Command("git", args...) cmd.Dir = dir // some commands like submodule require this buf := bytes.NewBuffer(nil) @@ -686,3 +701,54 @@ func tokenScope(remote string) string { } return remote } + +// getDefaultBranch gets the default branch of a repository using ls-remote +func getDefaultBranch(ctx context.Context, gitDir, workDir, sshAuthSock, knownHosts string, auth []string, remoteURL string) (string, error) { + buf, err := gitWithinDir(ctx, gitDir, workDir, sshAuthSock, knownHosts, auth, "ls-remote", "--symref", remoteURL, "HEAD") + if err != nil { + return "", errors.Wrapf(err, "error fetching default branch for repository %s", urlutil.RedactCredentials(remoteURL)) + } + + ss := defaultBranch.FindAllStringSubmatch(buf.String(), -1) + if len(ss) == 0 || len(ss[0]) != 2 { + return "", errors.Errorf("could not find default branch for repository: %s", urlutil.RedactCredentials(remoteURL)) + } + return ss[0][1], nil +} + +const keyGitRemote = "git-remote" +const gitRemoteIndex = keyGitRemote + "::" +const keyGitSnapshot = "git-snapshot" +const gitSnapshotIndex = keyGitSnapshot + "::" + +func search(ctx context.Context, store cache.MetadataStore, key string, idx string) ([]cacheRefMetadata, error) { + var results []cacheRefMetadata + mds, err := store.Search(ctx, idx+key) + if err != nil { + return nil, err + } + for _, md := range mds { + results = append(results, cacheRefMetadata{md}) + } + return results, nil +} + +func searchGitRemote(ctx context.Context, store cache.MetadataStore, remote string) ([]cacheRefMetadata, error) { + return search(ctx, store, remote, gitRemoteIndex) +} + +func searchGitSnapshot(ctx context.Context, store cache.MetadataStore, key string) ([]cacheRefMetadata, error) { + return search(ctx, store, key, gitSnapshotIndex) +} + +type cacheRefMetadata struct { + cache.RefMetadata +} + +func (md cacheRefMetadata) setGitSnapshot(key string) error { + return md.SetString(keyGitSnapshot, key, gitSnapshotIndex+key) +} + +func (md cacheRefMetadata) setGitRemote(key string) error { + return md.SetString(keyGitRemote, key, gitRemoteIndex+key) +} diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go index 5d35dd9cb0..23f289c55d 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_unix.go @@ -1,16 +1,84 @@ +//go:build !windows // +build !windows package git import ( "context" + "os" "os/exec" + "os/signal" "syscall" "time" + + "github.com/docker/docker/pkg/reexec" + "golang.org/x/sys/unix" ) +const ( + gitCmd = "umask-git" +) + +func init() { + reexec.Register(gitCmd, gitMain) +} + +func gitMain() { + // Need standard user umask for git process. + unix.Umask(0022) + + // Reexec git command + cmd := exec.Command(os.Args[1], os.Args[2:]...) + cmd.SysProcAttr = &unix.SysProcAttr{ + Setpgid: true, + Pdeathsig: unix.SIGTERM, + } + cmd.Stdout = os.Stdout + cmd.Stderr = os.Stderr + cmd.Stdin = os.Stdin + + // Forward all signals + sigc := make(chan os.Signal, 1) + done := make(chan struct{}) + signal.Notify(sigc) + go func() { + for { + select { + case sig := <-sigc: + if cmd.Process == nil { + continue + } + switch sig { + case unix.SIGINT, unix.SIGTERM, unix.SIGKILL: + _ = unix.Kill(-cmd.Process.Pid, sig.(unix.Signal)) + default: + _ = cmd.Process.Signal(sig) + } + case <-done: + return + } + } + }() + + err := cmd.Run() + close(done) + if err != nil { + if exiterr, ok := err.(*exec.ExitError); ok { + switch status := exiterr.Sys().(type) { + case unix.WaitStatus: + os.Exit(status.ExitStatus()) + case syscall.WaitStatus: + os.Exit(status.ExitStatus()) + } + } + os.Exit(1) + } + os.Exit(0) +} + func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { - cmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + cmd.Path = reexec.Self() + cmd.Args = append([]string{gitCmd}, cmd.Args...) if err := cmd.Start(); err != nil { return err } @@ -18,12 +86,12 @@ func runProcessGroup(ctx context.Context, cmd *exec.Cmd) error { go func() { select { case <-ctx.Done(): - syscall.Kill(-cmd.Process.Pid, syscall.SIGTERM) + _ = unix.Kill(-cmd.Process.Pid, unix.SIGTERM) go func() { select { case <-waitDone: case <-time.After(10 * time.Second): - syscall.Kill(-cmd.Process.Pid, syscall.SIGKILL) + _ = unix.Kill(-cmd.Process.Pid, unix.SIGKILL) } }() case <-waitDone: diff --git a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go index 3435c8f9ee..a1952ecb0c 100644 --- a/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go +++ b/vendor/github.com/moby/buildkit/source/git/gitsource_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package git diff --git a/vendor/github.com/moby/buildkit/source/git/redact_credentials.go b/vendor/github.com/moby/buildkit/source/git/redact_credentials.go deleted file mode 100644 index e59324d7e0..0000000000 --- a/vendor/github.com/moby/buildkit/source/git/redact_credentials.go +++ /dev/null @@ -1,17 +0,0 @@ -// +build go1.15 - -package git - -import "net/url" - -// redactCredentials takes a URL and redacts a password from it. -// e.g. "https://user:password@github.com/user/private-repo-failure.git" will be changed to -// "https://user:xxxxx@github.com/user/private-repo-failure.git" -func redactCredentials(s string) string { - u, err := url.Parse(s) - if err != nil { - return s // string is not a URL, just return it - } - - return u.Redacted() -} diff --git a/vendor/github.com/moby/buildkit/source/git/redact_credentials_go114.go b/vendor/github.com/moby/buildkit/source/git/redact_credentials_go114.go deleted file mode 100644 index b2aa314042..0000000000 --- a/vendor/github.com/moby/buildkit/source/git/redact_credentials_go114.go +++ /dev/null @@ -1,30 +0,0 @@ -// +build !go1.15 - -package git - -import "net/url" - -// redactCredentials takes a URL and redacts a password from it. -// e.g. "https://user:password@github.com/user/private-repo-failure.git" will be changed to -// "https://user:xxxxx@github.com/user/private-repo-failure.git" -func redactCredentials(s string) string { - u, err := url.Parse(s) - if err != nil { - return s // string is not a URL, just return it - } - - return urlRedacted(u) -} - -// urlRedacted comes from go's url.Redacted() which isn't available on go < 1.15 -func urlRedacted(u *url.URL) string { - if u == nil { - return "" - } - - ru := *u - if _, has := ru.User.Password(); has { - ru.User = url.UserPassword(ru.User.Username(), "xxxxx") - } - return ru.String() -} diff --git a/vendor/github.com/moby/buildkit/source/gitidentifier.go b/vendor/github.com/moby/buildkit/source/gitidentifier.go index 1547c71376..6055c94e2f 100644 --- a/vendor/github.com/moby/buildkit/source/gitidentifier.go +++ b/vendor/github.com/moby/buildkit/source/gitidentifier.go @@ -5,6 +5,7 @@ import ( "path" "strings" + srctypes "github.com/moby/buildkit/source/types" "github.com/moby/buildkit/util/sshutil" ) @@ -53,7 +54,7 @@ func NewGitIdentifier(remoteURL string) (*GitIdentifier, error) { } func (i *GitIdentifier) ID() string { - return "git" + return srctypes.GitScheme } // isGitTransport returns true if the provided str is a git transport by inspecting @@ -64,7 +65,7 @@ func isGitTransport(str string) bool { func getRefAndSubdir(fragment string) (ref string, subdir string) { refAndDir := strings.SplitN(fragment, ":", 2) - ref = "master" + ref = "" if len(refAndDir[0]) != 0 { ref = refAndDir[0] } diff --git a/vendor/github.com/moby/buildkit/source/http/httpsource.go b/vendor/github.com/moby/buildkit/source/http/httpsource.go index dd9c12763b..968c635651 100644 --- a/vendor/github.com/moby/buildkit/source/http/httpsource.go +++ b/vendor/github.com/moby/buildkit/source/http/httpsource.go @@ -17,26 +17,23 @@ import ( "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/session" "github.com/moby/buildkit/snapshot" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/source" + srctypes "github.com/moby/buildkit/source/types" "github.com/moby/buildkit/util/tracing" "github.com/moby/locker" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - bolt "go.etcd.io/bbolt" ) type Opt struct { CacheAccessor cache.Accessor - MetadataStore *metadata.Store Transport http.RoundTripper } type httpSource struct { - md *metadata.Store cache cache.Accessor locker *locker.Locker transport http.RoundTripper @@ -48,7 +45,6 @@ func NewSource(opt Opt) (source.Source, error) { transport = tracing.DefaultTransport } hs := &httpSource{ - md: opt.MetadataStore, cache: opt.CacheAccessor, locker: locker.New(), transport: transport, @@ -57,7 +53,7 @@ func NewSource(opt Opt) (source.Source, error) { } func (hs *httpSource) ID() string { - return source.HTTPSScheme + return srctypes.HTTPSScheme } type httpSourceHandler struct { @@ -123,41 +119,41 @@ func (hs *httpSourceHandler) formatCacheKey(filename string, dgst digest.Digest, return digest.FromBytes(dt) } -func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { +func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) { if hs.src.Checksum != "" { hs.cacheKey = hs.src.Checksum - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), nil, true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, nil), hs.src.Checksum, "").String(), hs.src.Checksum.String(), nil, true, nil } uh, err := hs.urlHash() if err != nil { - return "", nil, false, nil + return "", "", nil, false, nil } // look up metadata(previously stored headers) for that URL - sis, err := hs.md.Search(uh.String()) + mds, err := searchHTTPURLDigest(ctx, hs.cache, uh) if err != nil { - return "", nil, false, errors.Wrapf(err, "failed to search metadata for %s", uh) + return "", "", nil, false, errors.Wrapf(err, "failed to search metadata for %s", uh) } req, err := http.NewRequest("GET", hs.src.URL, nil) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } req = req.WithContext(ctx) - m := map[string]*metadata.StorageItem{} + m := map[string]cacheRefMetadata{} // If we request a single ETag in 'If-None-Match', some servers omit the // unambiguous ETag in their response. // See: https://github.com/moby/buildkit/issues/905 var onlyETag string - if len(sis) > 0 { - for _, si := range sis { + if len(mds) > 0 { + for _, md := range mds { // if metaDigest := getMetaDigest(si); metaDigest == hs.formatCacheKey("") { - if etag := getETag(si); etag != "" { - if dgst := getChecksum(si); dgst != "" { - m[etag] = si + if etag := md.getETag(); etag != "" { + if dgst := md.getHTTPChecksum(); dgst != "" { + m[etag] = md } } // } @@ -185,21 +181,21 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde resp, err := client.Do(req) if err == nil { if resp.StatusCode == http.StatusOK || resp.StatusCode == http.StatusNotModified { - respETag := resp.Header.Get("ETag") + respETag := etagValue(resp.Header.Get("ETag")) // If a 304 is returned without an ETag and we had only sent one ETag, // the response refers to the ETag we asked about. if respETag == "" && onlyETag != "" && resp.StatusCode == http.StatusNotModified { respETag = onlyETag } - si, ok := m[respETag] + md, ok := m[respETag] if ok { - hs.refID = si.ID() - dgst := getChecksum(si) + hs.refID = md.ID() + dgst := md.getHTTPChecksum() if dgst != "" { - modTime := getModTime(si) + modTime := md.getHTTPModTime() resp.Body.Close() - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), nil, true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), dgst.String(), nil, true, nil } } } @@ -210,13 +206,13 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde resp, err := client.Do(req) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } if resp.StatusCode < 200 || resp.StatusCode >= 400 { - return "", nil, false, errors.Errorf("invalid response status %d", resp.StatusCode) + return "", "", nil, false, errors.Errorf("invalid response status %d", resp.StatusCode) } if resp.StatusCode == http.StatusNotModified { - respETag := resp.Header.Get("ETag") + respETag := etagValue(resp.Header.Get("ETag")) if respETag == "" && onlyETag != "" { respETag = onlyETag @@ -224,29 +220,29 @@ func (hs *httpSourceHandler) CacheKey(ctx context.Context, g session.Group, inde // to .save() resp.Header.Set("ETag", onlyETag) } - si, ok := m[respETag] + md, ok := m[respETag] if !ok { - return "", nil, false, errors.Errorf("invalid not-modified ETag: %v", respETag) + return "", "", nil, false, errors.Errorf("invalid not-modified ETag: %v", respETag) } - hs.refID = si.ID() - dgst := getChecksum(si) + hs.refID = md.ID() + dgst := md.getHTTPChecksum() if dgst == "" { - return "", nil, false, errors.Errorf("invalid metadata change") + return "", "", nil, false, errors.Errorf("invalid metadata change") } - modTime := getModTime(si) + modTime := md.getHTTPModTime() resp.Body.Close() - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), nil, true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, modTime).String(), dgst.String(), nil, true, nil } ref, dgst, err := hs.save(ctx, resp, g) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } ref.Release(context.TODO()) hs.cacheKey = dgst - return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), nil, true, nil + return hs.formatCacheKey(getFileName(hs.src.URL, hs.src.Filename, resp), dgst, resp.Header.Get("Last-Modified")).String(), dgst.String(), nil, true, nil } func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response, s session.Group) (ref cache.ImmutableRef, dgst digest.Digest, retErr error) { @@ -348,24 +344,29 @@ func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response, s se return nil, "", err } newRef = nil + md := cacheRefMetadata{ref} hs.refID = ref.ID() dgst = digest.NewDigest(digest.SHA256, h) if respETag := resp.Header.Get("ETag"); respETag != "" { - setETag(ref.Metadata(), respETag) + respETag = etagValue(respETag) + if err := md.setETag(respETag); err != nil { + return nil, "", err + } uh, err := hs.urlHash() if err != nil { return nil, "", err } - setChecksum(ref.Metadata(), uh.String(), dgst) - if err := ref.Metadata().Commit(); err != nil { + if err := md.setHTTPChecksum(uh, dgst); err != nil { return nil, "", err } } if modTime := resp.Header.Get("Last-Modified"); modTime != "" { - setModTime(ref.Metadata(), modTime) + if err := md.setHTTPModTime(modTime); err != nil { + return nil, "", err + } } return ref, dgst, nil @@ -373,7 +374,7 @@ func (hs *httpSourceHandler) save(ctx context.Context, resp *http.Response, s se func (hs *httpSourceHandler) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) { if hs.refID != "" { - ref, err := hs.cache.Get(ctx, hs.refID) + ref, err := hs.cache.Get(ctx, hs.refID, nil) if err == nil { return ref, nil } @@ -404,84 +405,6 @@ func (hs *httpSourceHandler) Snapshot(ctx context.Context, g session.Group) (cac return ref, nil } -const keyETag = "etag" -const keyChecksum = "http.checksum" -const keyModTime = "http.modtime" - -func setETag(si *metadata.StorageItem, s string) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrap(err, "failed to create etag value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyETag, v) - }) - return nil -} - -func getETag(si *metadata.StorageItem) string { - v := si.Get(keyETag) - if v == nil { - return "" - } - var etag string - if err := v.Unmarshal(&etag); err != nil { - return "" - } - return etag -} - -func setModTime(si *metadata.StorageItem, s string) error { - v, err := metadata.NewValue(s) - if err != nil { - return errors.Wrap(err, "failed to create modtime value") - } - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyModTime, v) - }) - return nil -} - -func getModTime(si *metadata.StorageItem) string { - v := si.Get(keyModTime) - if v == nil { - return "" - } - var modTime string - if err := v.Unmarshal(&modTime); err != nil { - return "" - } - return modTime -} - -func setChecksum(si *metadata.StorageItem, url string, d digest.Digest) error { - v, err := metadata.NewValue(d) - if err != nil { - return errors.Wrap(err, "failed to create checksum value") - } - v.Index = url - si.Queue(func(b *bolt.Bucket) error { - return si.SetValue(b, keyChecksum, v) - }) - return nil -} - -func getChecksum(si *metadata.StorageItem) digest.Digest { - v := si.Get(keyChecksum) - if v == nil { - return "" - } - var dgstStr string - if err := v.Unmarshal(&dgstStr); err != nil { - return "" - } - dgst, err := digest.Parse(dgstStr) - if err != nil { - return "" - } - return dgst -} - func getFileName(urlStr, manualFilename string, resp *http.Response) string { if manualFilename != "" { return manualFilename @@ -505,3 +428,52 @@ func getFileName(urlStr, manualFilename string, resp *http.Response) string { } return "download" } + +func searchHTTPURLDigest(ctx context.Context, store cache.MetadataStore, dgst digest.Digest) ([]cacheRefMetadata, error) { + var results []cacheRefMetadata + mds, err := store.Search(ctx, string(dgst)) + if err != nil { + return nil, err + } + for _, md := range mds { + results = append(results, cacheRefMetadata{md}) + } + return results, nil +} + +type cacheRefMetadata struct { + cache.RefMetadata +} + +const keyHTTPChecksum = "http.checksum" +const keyETag = "etag" +const keyModTime = "http.modtime" + +func (md cacheRefMetadata) getHTTPChecksum() digest.Digest { + return digest.Digest(md.GetString(keyHTTPChecksum)) +} + +func (md cacheRefMetadata) setHTTPChecksum(urlDgst digest.Digest, d digest.Digest) error { + return md.SetString(keyHTTPChecksum, d.String(), urlDgst.String()) +} + +func (md cacheRefMetadata) getETag() string { + return md.GetString(keyETag) +} + +func (md cacheRefMetadata) setETag(s string) error { + return md.SetString(keyETag, s, "") +} + +func (md cacheRefMetadata) getHTTPModTime() string { + return md.GetString(keyModTime) +} + +func (md cacheRefMetadata) setHTTPModTime(s string) error { + return md.SetString(keyModTime, s, "") +} + +func etagValue(v string) string { + // remove weak for direct comparison + return strings.TrimPrefix(v, "W/") +} diff --git a/vendor/github.com/moby/buildkit/source/identifier.go b/vendor/github.com/moby/buildkit/source/identifier.go index 4c1251fc70..1032399e11 100644 --- a/vendor/github.com/moby/buildkit/source/identifier.go +++ b/vendor/github.com/moby/buildkit/source/identifier.go @@ -8,9 +8,11 @@ import ( "github.com/containerd/containerd/reference" "github.com/moby/buildkit/client" "github.com/moby/buildkit/solver/pb" + srctypes "github.com/moby/buildkit/source/types" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" + "github.com/tonistiigi/fsutil" ) var ( @@ -26,14 +28,6 @@ const ( ResolveModePreferLocal ) -const ( - DockerImageScheme = "docker-image" - GitScheme = "git" - LocalScheme = "local" - HTTPScheme = "http" - HTTPSScheme = "https" -) - type Identifier interface { ID() string // until sources are in process this string comparison could be avoided } @@ -46,15 +40,15 @@ func FromString(s string) (Identifier, error) { } switch parts[0] { - case DockerImageScheme: + case srctypes.DockerImageScheme: return NewImageIdentifier(parts[1]) - case GitScheme: + case srctypes.GitScheme: return NewGitIdentifier(parts[1]) - case LocalScheme: + case srctypes.LocalScheme: return NewLocalIdentifier(parts[1]) - case HTTPSScheme: + case srctypes.HTTPSScheme: return NewHTTPIdentifier(parts[1], true) - case HTTPScheme: + case srctypes.HTTPScheme: return NewHTTPIdentifier(parts[1], false) default: return nil, errors.Wrapf(errNotFound, "unknown schema %s", parts[0]) @@ -69,7 +63,7 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { if id, ok := id.(*ImageIdentifier); ok { if platform != nil { - id.Platform = &specs.Platform{ + id.Platform = &ocispecs.Platform{ OS: platform.OS, Architecture: platform.Architecture, Variant: platform.Variant, @@ -146,6 +140,13 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { id.FollowPaths = paths case pb.AttrSharedKeyHint: id.SharedKeyHint = v + case pb.AttrLocalDiffer: + switch v { + case pb.AttrLocalDifferMetadata, "": + id.Differ = fsutil.DiffMetadata + case pb.AttrLocalDifferNone: + id.Differ = fsutil.DiffNone + } } } } @@ -186,7 +187,7 @@ func FromLLB(op *pb.Op_Source, platform *pb.Platform) (Identifier, error) { type ImageIdentifier struct { Reference reference.Spec - Platform *specs.Platform + Platform *ocispecs.Platform ResolveMode ResolveMode RecordType client.UsageRecordType } @@ -204,7 +205,7 @@ func NewImageIdentifier(str string) (*ImageIdentifier, error) { } func (*ImageIdentifier) ID() string { - return DockerImageScheme + return srctypes.DockerImageScheme } type LocalIdentifier struct { @@ -214,6 +215,7 @@ type LocalIdentifier struct { ExcludePatterns []string FollowPaths []string SharedKeyHint string + Differ fsutil.DiffType } func NewLocalIdentifier(str string) (*LocalIdentifier, error) { @@ -221,7 +223,7 @@ func NewLocalIdentifier(str string) (*LocalIdentifier, error) { } func (*LocalIdentifier) ID() string { - return LocalScheme + return srctypes.LocalScheme } func NewHTTPIdentifier(str string, tls bool) (*HTTPIdentifier, error) { @@ -243,7 +245,7 @@ type HTTPIdentifier struct { } func (*HTTPIdentifier) ID() string { - return HTTPSScheme + return srctypes.HTTPSScheme } func (r ResolveMode) String() string { diff --git a/vendor/github.com/moby/buildkit/source/local/local.go b/vendor/github.com/moby/buildkit/source/local/local.go index 3d385ab18f..d2cd9d989c 100644 --- a/vendor/github.com/moby/buildkit/source/local/local.go +++ b/vendor/github.com/moby/buildkit/source/local/local.go @@ -6,10 +6,12 @@ import ( "fmt" "time" + srctypes "github.com/moby/buildkit/source/types" + "github.com/moby/buildkit/util/bklog" + "github.com/docker/docker/pkg/idtools" "github.com/moby/buildkit/cache" "github.com/moby/buildkit/cache/contenthash" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/session" "github.com/moby/buildkit/session/filesync" @@ -19,37 +21,28 @@ import ( "github.com/moby/buildkit/util/progress" digest "github.com/opencontainers/go-digest" "github.com/pkg/errors" - "github.com/sirupsen/logrus" "github.com/tonistiigi/fsutil" fstypes "github.com/tonistiigi/fsutil/types" - bolt "go.etcd.io/bbolt" "golang.org/x/time/rate" - "google.golang.org/grpc/codes" - "google.golang.org/grpc/status" ) -const keySharedKey = "local.sharedKey" - type Opt struct { CacheAccessor cache.Accessor - MetadataStore *metadata.Store } func NewSource(opt Opt) (source.Source, error) { ls := &localSource{ cm: opt.CacheAccessor, - md: opt.MetadataStore, } return ls, nil } type localSource struct { cm cache.Accessor - md *metadata.Store } func (ls *localSource) ID() string { - return source.LocalScheme + return srctypes.LocalScheme } func (ls *localSource) Resolve(ctx context.Context, id source.Identifier, sm *session.Manager, _ solver.Vertex) (source.SourceInstance, error) { @@ -71,13 +64,13 @@ type localSourceHandler struct { *localSource } -func (ls *localSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) { +func (ls *localSourceHandler) CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) { sessionID := ls.src.SessionID if sessionID == "" { id := g.SessionIterator().NextSession() if id == "" { - return "", nil, false, errors.New("could not access local files without session") + return "", "", nil, false, errors.New("could not access local files without session") } sessionID = id } @@ -88,65 +81,88 @@ func (ls *localSourceHandler) CacheKey(ctx context.Context, g session.Group, ind FollowPaths []string }{SessionID: sessionID, IncludePatterns: ls.src.IncludePatterns, ExcludePatterns: ls.src.ExcludePatterns, FollowPaths: ls.src.FollowPaths}) if err != nil { - return "", nil, false, err + return "", "", nil, false, err } - return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), nil, true, nil + return "session:" + ls.src.Name + ":" + digest.FromBytes(dt).String(), digest.FromBytes(dt).String(), nil, true, nil } func (ls *localSourceHandler) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) { + sessionID := ls.src.SessionID + if sessionID == "" { + return ls.snapshotWithAnySession(ctx, g) + } + + timeoutCtx, cancel := context.WithTimeout(ctx, 5*time.Second) + defer cancel() + + caller, err := ls.sm.Get(timeoutCtx, sessionID, false) + if err != nil { + return ls.snapshotWithAnySession(ctx, g) + } + + ref, err := ls.snapshot(ctx, caller) + if err != nil { + var serr filesync.InvalidSessionError + if errors.As(err, &serr) { + return ls.snapshotWithAnySession(ctx, g) + } + return nil, err + } + return ref, nil +} + +func (ls *localSourceHandler) snapshotWithAnySession(ctx context.Context, g session.Group) (cache.ImmutableRef, error) { var ref cache.ImmutableRef err := ls.sm.Any(ctx, g, func(ctx context.Context, _ string, c session.Caller) error { - r, err := ls.snapshot(ctx, g, c) + r, err := ls.snapshot(ctx, c) if err != nil { return err } ref = r return nil }) - if err != nil { - return nil, err - } - return ref, nil + return ref, err } -func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, caller session.Caller) (out cache.ImmutableRef, retErr error) { - sharedKey := keySharedKey + ":" + ls.src.Name + ":" + ls.src.SharedKeyHint + ":" + caller.SharedKey() // TODO: replace caller.SharedKey() with source based hint from client(absolute-path+nodeid) +func (ls *localSourceHandler) snapshot(ctx context.Context, caller session.Caller) (out cache.ImmutableRef, retErr error) { + sharedKey := ls.src.Name + ":" + ls.src.SharedKeyHint + ":" + caller.SharedKey() // TODO: replace caller.SharedKey() with source based hint from client(absolute-path+nodeid) var mutable cache.MutableRef - sis, err := ls.md.Search(sharedKey) + sis, err := searchSharedKey(ctx, ls.cm, sharedKey) if err != nil { return nil, err } for _, si := range sis { if m, err := ls.cm.GetMutable(ctx, si.ID()); err == nil { - logrus.Debugf("reusing ref for local: %s", m.ID()) + bklog.G(ctx).Debugf("reusing ref for local: %s", m.ID()) mutable = m break + } else { + bklog.G(ctx).Debugf("not reusing ref %s for local: %v", si.ID(), err) } } if mutable == nil { - m, err := ls.cm.New(ctx, nil, s, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name))) + m, err := ls.cm.New(ctx, nil, nil, cache.CachePolicyRetain, cache.WithRecordType(client.UsageRecordTypeLocalSource), cache.WithDescription(fmt.Sprintf("local source for %s", ls.src.Name))) if err != nil { return nil, err } mutable = m - logrus.Debugf("new ref for local: %s", mutable.ID()) + bklog.G(ctx).Debugf("new ref for local: %s", mutable.ID()) } defer func() { if retErr != nil && mutable != nil { // on error remove the record as checksum update is in undefined state - cache.CachePolicyDefault(mutable) - if err := mutable.Metadata().Commit(); err != nil { - logrus.Errorf("failed to reset mutable cachepolicy: %v", err) + if err := mutable.SetCachePolicyDefault(); err != nil { + bklog.G(ctx).Errorf("failed to reset mutable cachepolicy: %v", err) } - contenthash.ClearCacheContext(mutable.Metadata()) + contenthash.ClearCacheContext(mutable) go mutable.Release(context.TODO()) } }() - mount, err := mutable.Mount(ctx, false, s) + mount, err := mutable.Mount(ctx, false, nil) if err != nil { return nil, err } @@ -164,7 +180,7 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, cal } }() - cc, err := contenthash.GetCacheContext(ctx, mutable.Metadata(), mount.IdentityMapping()) + cc, err := contenthash.GetCacheContext(ctx, mutable) if err != nil { return nil, err } @@ -178,6 +194,7 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, cal DestDir: dest, CacheUpdater: &cacheUpdater{cc, mount.IdentityMapping()}, ProgressCb: newProgressHandler(ctx, "transferring "+ls.src.Name+":"), + Differ: ls.src.Differ, } if idmap := mount.IdentityMapping(); idmap != nil { @@ -196,9 +213,6 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, cal } if err := filesync.FSSync(ctx, caller, opt); err != nil { - if status.Code(err) == codes.NotFound { - return nil, errors.Errorf("local source %s not enabled from the client", ls.src.Name) - } return nil, err } @@ -207,32 +221,17 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, cal } lm = nil - if err := contenthash.SetCacheContext(ctx, mutable.Metadata(), cc); err != nil { + if err := contenthash.SetCacheContext(ctx, mutable, cc); err != nil { return nil, err } // skip storing snapshot by the shared key if it already exists - skipStoreSharedKey := false - si, _ := ls.md.Get(mutable.ID()) - if v := si.Get(keySharedKey); v != nil { - var str string - if err := v.Unmarshal(&str); err != nil { + md := cacheRefMetadata{mutable} + if md.getSharedKey() != sharedKey { + if err := md.setSharedKey(sharedKey); err != nil { return nil, err } - skipStoreSharedKey = str == sharedKey - } - if !skipStoreSharedKey { - v, err := metadata.NewValue(sharedKey) - if err != nil { - return nil, err - } - v.Index = sharedKey - if err := si.Update(func(b *bolt.Bucket) error { - return si.SetValue(b, sharedKey, v) - }); err != nil { - return nil, err - } - logrus.Debugf("saved %s as %s", mutable.ID(), sharedKey) + bklog.G(ctx).Debugf("saved %s as %s", mutable.ID(), sharedKey) } snap, err := mutable.Commit(ctx) @@ -247,7 +246,7 @@ func (ls *localSourceHandler) snapshot(ctx context.Context, s session.Group, cal func newProgressHandler(ctx context.Context, id string) func(int, bool) { limiter := rate.NewLimiter(rate.Every(100*time.Millisecond), 1) - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) now := time.Now() st := progress.Status{ Started: &now, @@ -280,3 +279,30 @@ func (cu *cacheUpdater) MarkSupported(bool) { func (cu *cacheUpdater) ContentHasher() fsutil.ContentHasher { return contenthash.NewFromStat } + +const keySharedKey = "local.sharedKey" +const sharedKeyIndex = keySharedKey + ":" + +func searchSharedKey(ctx context.Context, store cache.MetadataStore, k string) ([]cacheRefMetadata, error) { + var results []cacheRefMetadata + mds, err := store.Search(ctx, sharedKeyIndex+k) + if err != nil { + return nil, err + } + for _, md := range mds { + results = append(results, cacheRefMetadata{md}) + } + return results, nil +} + +type cacheRefMetadata struct { + cache.RefMetadata +} + +func (md cacheRefMetadata) getSharedKey() string { + return md.GetString(keySharedKey) +} + +func (md cacheRefMetadata) setSharedKey(key string) error { + return md.SetString(keySharedKey, key, sharedKeyIndex+key) +} diff --git a/vendor/github.com/moby/buildkit/source/manager.go b/vendor/github.com/moby/buildkit/source/manager.go index aba45bffe1..3f4a0cb478 100644 --- a/vendor/github.com/moby/buildkit/source/manager.go +++ b/vendor/github.com/moby/buildkit/source/manager.go @@ -16,7 +16,7 @@ type Source interface { } type SourceInstance interface { - CacheKey(ctx context.Context, g session.Group, index int) (string, solver.CacheOpts, bool, error) + CacheKey(ctx context.Context, g session.Group, index int) (string, string, solver.CacheOpts, bool, error) Snapshot(ctx context.Context, g session.Group) (cache.ImmutableRef, error) } diff --git a/vendor/github.com/moby/buildkit/source/types/types.go b/vendor/github.com/moby/buildkit/source/types/types.go new file mode 100644 index 0000000000..b96eac2333 --- /dev/null +++ b/vendor/github.com/moby/buildkit/source/types/types.go @@ -0,0 +1,9 @@ +package srctypes + +const ( + DockerImageScheme = "docker-image" + GitScheme = "git" + LocalScheme = "local" + HTTPScheme = "http" + HTTPSScheme = "https" +) diff --git a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go index addfccfade..d2feccfd5e 100644 --- a/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go +++ b/vendor/github.com/moby/buildkit/util/apicaps/pb/generate.go @@ -1,3 +1,3 @@ -package moby_buildkit_v1_apicaps //nolint:golint +package moby_buildkit_v1_apicaps //nolint:revive //go:generate protoc -I=. -I=../../../vendor/ -I=../../../../../../ --gogo_out=plugins=grpc:. caps.proto diff --git a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go index 6252147e0d..499e877184 100644 --- a/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go +++ b/vendor/github.com/moby/buildkit/util/appdefaults/appdefaults_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package appdefaults diff --git a/vendor/github.com/moby/buildkit/util/archutil/386_binary.go b/vendor/github.com/moby/buildkit/util/archutil/386_binary.go index dab00ecf5e..0309e1520f 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/386_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/386_binary.go @@ -1,3 +1,4 @@ +//go:build !386 // +build !386 package archutil @@ -5,4 +6,4 @@ package archutil // This file is generated by running make inside the archutil package. // Do not edit manually. -const Binary386 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\xd8\x31\x8a\xc2\x40\x14\x06\xe0\x7f\x36\xb3\xd9\x84\xdd\x62\x0e\x10\x96\x2d\xb6\xd8\x6a\x58\x21\x57\x50\x1b\x11\x2c\x3c\x80\x62\xd0\x2a\x09\xc9\x08\x5a\x99\x23\x78\x18\x0b\x4b\x2f\x20\xd8\x5b\x7b\x0f\x79\xc9\x0b\xa4\x48\x63\xff\x3e\xf8\x19\x7c\xf3\x7c\xa9\xdf\x1c\x86\x93\x91\x52\x0a\xad\x37\x78\xa8\x7f\x1d\x75\x10\x03\xf8\x37\x4d\x3d\xc6\x0f\x34\xfe\xa0\xdb\x7b\x52\xe9\x80\x72\x03\x40\xd1\x54\x33\x68\xee\x4d\x33\x83\x12\x02\xa0\xbc\x73\x9d\xfa\x4e\x94\x4a\x07\x94\x08\x40\xc4\xff\xa7\xcc\x1e\x6e\x85\x1e\x3e\x8f\xa5\x9e\x2f\x9e\x37\x9e\xce\xe9\x7b\x17\xaa\x29\x08\x21\x84\x10\x42\x08\x21\x84\x10\xa2\xcf\x99\x96\xe6\xc1\xfd\x5a\xc1\x96\x9b\xd2\x15\x6e\xb1\x84\x4d\x33\x97\xd8\x75\xba\xb5\x79\x91\xe5\x49\xe1\xf6\xb0\x2e\xd9\xb9\x57\xe6\x7e\x02\xf8\xa8\xdf\x13\x78\xcf\xe7\x1d\xbf\xa5\xf9\xfc\xe6\xbd\xdd\xe7\x37\x07\x5a\xf0\xc3\x4e\x9f\xea\x9c\x5e\xa7\x1e\x1a\xe0\xb7\xa7\xef\x19\x00\x00\xff\xff\x32\x71\x64\x98\xd0\x10\x00\x00" +const Binary386 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x64\x64\x64\x80\x01\x26\x06\x66\x06\x30\x6f\x02\x0b\x87\x09\x03\x03\x83\x8c\x00\x44\xdc\x84\x41\x81\x81\x99\x41\x83\x81\x99\x81\x89\x01\xae\xba\x81\x85\x03\x84\xa7\x30\x30\x30\x80\x30\x0b\x48\x4c\x80\x01\x22\x2f\x00\x31\x03\x84\x39\x19\x18\x18\x40\x98\x15\x2a\x1e\xf8\xb4\x24\x85\x01\x0b\x60\x83\x6a\x1b\x05\xa3\x60\x14\x8c\x82\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\x14\x8c\x82\x51\x30\x0a\x46\x01\x75\xc1\x0e\x50\x67\xdd\xf0\xf6\xd9\x06\x06\xbd\xe2\x8c\xe2\x92\xa2\x92\xc4\x24\x06\xbd\x92\xd4\x8a\x12\x12\xcc\xe0\x66\x80\xf4\xf9\xd9\xa0\xe3\x06\xa0\x4e\x3c\x27\x92\x3c\x23\x12\xcd\x8c\x24\xce\x29\xc0\xc0\x20\x88\x45\x1d\x20\x00\x00\xff\xff\x3d\x67\xa6\x38\x94\x10\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/archutil/386_check.go b/vendor/github.com/moby/buildkit/util/archutil/386_check.go index 5ef488f1e5..858c62f538 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/386_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/386_check.go @@ -1,7 +1,8 @@ +//go:build !386 // +build !386 package archutil -func i386Supported() error { - return check(Binary386) +func i386Supported() (string, error) { + return check("386", Binary386) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/386_check_386.go b/vendor/github.com/moby/buildkit/util/archutil/386_check_386.go index f2c0ee3242..afcd026994 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/386_check_386.go +++ b/vendor/github.com/moby/buildkit/util/archutil/386_check_386.go @@ -1,7 +1,8 @@ +//go:build 386 // +build 386 package archutil -func i386Supported() error { - return nil +func i386Supported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile index f4e1f1d54a..6ac641f06d 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/Dockerfile +++ b/vendor/github.com/moby/buildkit/util/archutil/Dockerfile @@ -1,9 +1,8 @@ -FROM debian:buster-slim AS base -RUN echo "deb http://os.loongnix.org/mirrors/debian/debian/ buster main" > /etc/apt/sources.list +FROM debian:bullseye-slim AS base RUN apt-get update && apt-get --no-install-recommends install -y \ + gcc-x86-64-linux-gnu \ binutils-arm-linux-gnueabihf \ binutils-aarch64-linux-gnu \ - binutils-x86-64-linux-gnu \ binutils-i686-linux-gnu \ binutils-riscv64-linux-gnu \ binutils-s390x-linux-gnu \ @@ -14,8 +13,8 @@ WORKDIR /src FROM base AS exit-amd64 -COPY fixtures/exit.amd64.s . -RUN x86_64-linux-gnu-as --noexecstack -o exit.o exit.amd64.s && x86_64-linux-gnu-ld -o exit -s exit.o +COPY fixtures/exit.amd64.S . +RUN x86_64-linux-gnu-gcc -static -nostdlib -o exit exit.amd64.S FROM base AS exit-386 COPY fixtures/exit.386.s . @@ -49,7 +48,7 @@ FROM base AS exit-mips64 COPY fixtures/exit.mips64.s . RUN mips64-linux-gnuabi64-as --noexecstack -o exit.o exit.mips64.s && mips64-linux-gnuabi64-ld -o exit -s exit.o -FROM golang:1.16-alpine AS generate +FROM golang:1.17-alpine AS generate WORKDIR /src COPY --from=exit-amd64 /src/exit amd64 COPY --from=exit-386 /src/exit 386 diff --git a/vendor/github.com/moby/buildkit/util/archutil/amd64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/amd64_binary.go index 59f135f001..29aaba1537 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/amd64_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/amd64_binary.go @@ -1,3 +1,4 @@ +//go:build !amd64 // +build !amd64 package archutil @@ -5,4 +6,4 @@ package archutil // This file is generated by running make inside the archutil package. // Do not edit manually. -const Binaryamd64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xaa\x77\xf5\x71\x63\x62\x64\x64\x80\x01\x26\x06\x3b\x06\x30\x4f\xc0\x01\xcc\x77\x80\x8a\x1b\x08\xc0\x95\x30\x38\x30\x58\x30\xb0\x30\x38\x30\xb0\x30\x30\x83\xd5\xb2\x30\x20\x03\x07\x14\xda\x01\x6a\x34\x8c\x66\x80\x9a\x03\xe2\xb2\x22\xf1\x61\xf6\xc1\x68\x1e\xa8\x30\x8c\x86\xa9\x63\x81\x62\x05\xa8\x79\x0a\x8c\x0e\xa8\x34\x54\x39\x8c\xe6\x80\xd2\x81\x4f\x4b\x52\xd8\x18\x88\x07\x30\x67\xb1\x40\xd9\x20\xb7\xba\xfb\x85\x82\xdc\x7d\x80\x05\xea\xfe\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\x14\x8c\x82\x51\x30\x0a\x46\xc1\x28\x18\x05\xa3\x60\xa8\x00\x8f\xe3\x07\x6c\x40\x94\xe1\x7f\x7e\x56\x06\xbd\xe2\x8c\xe2\x92\xa2\x92\xc4\x24\x06\xbd\xbc\xfc\x92\x54\xbd\xf4\xbc\x52\xbd\x82\xa2\xfc\x82\xd4\xa2\x92\x4a\x06\xbd\x92\xd4\x8a\x12\x8a\xed\xe3\x66\x60\x60\x60\x07\x8f\x33\xa0\xf7\xdf\x51\xfb\xed\x0c\x68\xfd\x77\x18\x90\x83\xf6\xbd\xe1\x7d\x79\xf8\xb8\x01\xda\x78\x01\x03\x62\x9c\x01\x9d\xcf\x8c\xc5\x5d\x3c\x50\xfd\x2a\x04\xf4\x03\x02\x00\x00\xff\xff\x90\x57\x64\xbb\x30\x11\x00\x00" +const Binaryamd64 = "\x1f\x8b\x08\x00\x00\x00\x00\x00\x02\xff\xec\x98\x3f\x8b\x13\x41\x18\xc6\x9f\xd9\x6c\xee\x0f\x08\xb9\xe0\x81\xc2\x59\xa8\x58\xd8\xb8\xe1\x54\x38\x05\x95\x55\x50\x07\xf1\xec\xce\xd2\x65\x93\x5d\xce\x80\x97\x5b\xb2\xb3\xe7\x0a\x07\x77\x47\xbe\x83\x58\x07\x14\x0b\x4b\xc1\x94\xa9\x5c\x2c\x6d\xac\x4d\x21\x04\xad\xec\x0c\x28\xca\x6e\xde\x49\xb2\x63\xa2\x82\xdd\x31\x3f\x08\xcf\xbc\xcf\xbe\x33\xef\x90\xa4\xd9\x67\xef\xe6\xdd\x5b\x06\x63\x90\x18\xb8\x86\xac\x5a\xb2\xb3\xda\x26\xff\x63\x79\xd4\x02\x1b\x97\x50\x80\x8d\x39\x14\xb3\x5e\x13\x93\xd8\x39\x3d\x42\x47\x4b\xc5\xd2\x50\xd2\xb2\x38\x51\xcb\x79\x52\x03\xb2\xa5\xca\x3e\x93\x3e\x7d\xb2\xfb\x34\x47\xea\x19\xf2\xa5\x9a\x13\xba\x0c\xa0\x00\xe0\xf6\xbd\x0d\xf8\xcf\xdf\x5c\xfe\xf0\xf2\xd3\xfd\xf5\x1f\x4f\x8f\x7e\x7f\xf1\x79\xef\xed\x85\xf7\xaf\xa1\xd1\x68\x34\x1a\x8d\x46\xa3\xd1\x68\x34\x1a\xcd\x21\x85\xaf\x76\x4b\x6d\xde\xfa\x36\xbf\x7b\x87\x77\x00\xec\xa7\x66\xa9\x7d\x95\x01\xfb\xbb\x6b\x3c\xe9\xb2\xac\xe9\x5d\xa9\xcd\x0f\x7a\xec\xf4\x33\xf0\x83\x41\x2a\xd1\x0a\xef\x30\x6a\x1f\x3e\x6e\xf5\x18\x6f\x0d\x58\x64\x7c\x59\xe4\x49\x72\x1d\x40\xb6\xb8\x91\x2e\xc0\x93\xee\x95\xf4\xe0\xe2\x9f\xee\x52\x00\x1b\xbd\xc7\xe7\x7d\x63\x9c\x0f\x60\x9c\x1f\x98\xf8\xfa\x53\xed\x2d\x53\x8a\xb1\xa1\xf4\x2f\x93\xff\x40\xf1\x8f\x91\xbf\xa9\xf8\xa7\xb2\xc8\xe1\xf7\xb9\x27\xa4\x7f\x32\xef\x9f\x9d\xe1\x57\x66\xf8\xa8\x88\xad\xa0\x52\xab\xc5\xb5\xb5\xc7\xcd\x75\x6b\x1b\x3b\xab\xd8\x39\x0f\x3f\xae\x0b\x38\x4e\x35\x0c\x9d\x50\xb8\x4d\x01\xc7\xf7\x5c\xe1\xc2\xf1\x1b\x1e\x60\x85\x4f\xb6\x84\x5b\x85\x15\x8a\xe6\x50\x1f\xca\x55\x63\x5b\xf8\xd6\x66\x23\xb2\xaa\x51\xfd\x91\x77\xae\xee\xc1\x12\x7e\x2c\xfe\xfb\xff\xb1\x02\x60\x3e\xfb\x86\xd4\xbc\x25\x9f\xb3\x40\xc9\x5b\x24\x16\xfd\x56\x73\xa3\x1c\xc7\xce\xe5\x39\x81\xd2\xcf\xa6\xd4\xc6\x94\x7b\x05\xb4\x7f\x81\x8d\xe7\xa6\xf7\x5c\xa0\xe7\xc7\x49\x17\x29\xf3\x51\x89\x29\xcf\xba\xf8\x97\xf9\xe5\x19\xfb\x5f\xfd\xe3\xfe\x5f\x01\x00\x00\xff\xff\xa5\x58\xb1\x16\x60\x13\x00\x00" diff --git a/vendor/github.com/moby/buildkit/util/archutil/amd64_check.go b/vendor/github.com/moby/buildkit/util/archutil/amd64_check.go index 3942860553..215b8aebfb 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/amd64_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/amd64_check.go @@ -1,7 +1,8 @@ +//go:build !amd64 // +build !amd64 package archutil -func amd64Supported() error { - return check(Binaryamd64) +func amd64Supported() (string, error) { + return check("amd64", Binaryamd64) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/amd64_check_amd64.go b/vendor/github.com/moby/buildkit/util/archutil/amd64_check_amd64.go index 538840d1bd..c1b59a51ca 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/amd64_check_amd64.go +++ b/vendor/github.com/moby/buildkit/util/archutil/amd64_check_amd64.go @@ -1,7 +1,12 @@ +//go:build amd64 // +build amd64 package archutil -func amd64Supported() error { - return nil +import ( + archvariant "github.com/tonistiigi/go-archvariant" +) + +func amd64Supported() (string, error) { + return archvariant.AMD64Variant(), nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/arm64_binary.go index f5b7feffca..74ee388535 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/arm64_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm64_binary.go @@ -1,3 +1,4 @@ +//go:build !arm64 // +build !arm64 package archutil diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm64_check.go b/vendor/github.com/moby/buildkit/util/archutil/arm64_check.go index 76600d070b..b6103f6434 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/arm64_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm64_check.go @@ -1,7 +1,8 @@ +//go:build !arm64 // +build !arm64 package archutil -func arm64Supported() error { - return check(Binaryarm64) +func arm64Supported() (string, error) { + return check("arm64", Binaryarm64) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm64_check_arm64.go b/vendor/github.com/moby/buildkit/util/archutil/arm64_check_arm64.go index 1b1c8c6ae0..ff134f9d12 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/arm64_check_arm64.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm64_check_arm64.go @@ -1,7 +1,8 @@ +//go:build arm64 // +build arm64 package archutil -func arm64Supported() error { - return nil +func arm64Supported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm_binary.go b/vendor/github.com/moby/buildkit/util/archutil/arm_binary.go index 15e6f25ac8..5fdaa577a6 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/arm_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm_binary.go @@ -1,3 +1,4 @@ +//go:build !arm // +build !arm package archutil diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm_check.go b/vendor/github.com/moby/buildkit/util/archutil/arm_check.go index 745e705053..6ccd887533 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/arm_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm_check.go @@ -1,7 +1,8 @@ +//go:build !arm // +build !arm package archutil -func armSupported() error { - return check(Binaryarm) +func armSupported() (string, error) { + return check("arm", Binaryarm) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/arm_check_arm.go b/vendor/github.com/moby/buildkit/util/archutil/arm_check_arm.go index d6dea6fa73..e0ab2241df 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/arm_check_arm.go +++ b/vendor/github.com/moby/buildkit/util/archutil/arm_check_arm.go @@ -1,7 +1,8 @@ +//go:build arm // +build arm package archutil -func armSupported() error { - return nil +func armSupported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/check_unix.go b/vendor/github.com/moby/buildkit/util/archutil/check_unix.go index 236c96a5e9..8b558a3176 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/check_unix.go +++ b/vendor/github.com/moby/buildkit/util/archutil/check_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package archutil @@ -11,6 +12,8 @@ import ( "os/exec" "path/filepath" "syscall" + + "github.com/pkg/errors" ) func withChroot(cmd *exec.Cmd, dir string) { @@ -19,33 +22,49 @@ func withChroot(cmd *exec.Cmd, dir string) { } } -func check(bin string) error { +func check(arch, bin string) (string, error) { tmpdir, err := ioutil.TempDir("", "qemu-check") if err != nil { - return err + return "", err } defer os.RemoveAll(tmpdir) pp := filepath.Join(tmpdir, "check") r, err := gzip.NewReader(bytes.NewReader([]byte(bin))) if err != nil { - return err + return "", err } defer r.Close() f, err := os.OpenFile(pp, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0700) if err != nil { - return err + return "", err } if _, err := io.Copy(f, r); err != nil { f.Close() - return err + return "", err } f.Close() cmd := exec.Command("/check") withChroot(cmd, tmpdir) err = cmd.Run() - return err + if arch != "amd64" { + return "", err + } + + // special handling for amd64. Exit code is 64 + amd64 variant + if err == nil { + return "", errors.Errorf("invalid zero exit code") + } + if exitError, ok := err.(*exec.ExitError); ok { + switch exitError.ExitCode() { + case 65: + return "v1", nil + case 66: + return "v2", nil + } + } + return "", err } diff --git a/vendor/github.com/moby/buildkit/util/archutil/check_windows.go b/vendor/github.com/moby/buildkit/util/archutil/check_windows.go index 18ec34dbae..6a65601140 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/check_windows.go +++ b/vendor/github.com/moby/buildkit/util/archutil/check_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package archutil @@ -10,6 +11,6 @@ import ( func withChroot(cmd *exec.Cmd, dir string) { } -func check(bin string) error { - return errors.New("binfmt is not supported on Windows") +func check(arch, bin string) (string, error) { + return "", errors.New("binfmt is not supported on Windows") } diff --git a/vendor/github.com/moby/buildkit/util/archutil/detect.go b/vendor/github.com/moby/buildkit/util/archutil/detect.go index 7cf6b514ad..44cb3133e1 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/detect.go +++ b/vendor/github.com/moby/buildkit/util/archutil/detect.go @@ -1,139 +1,141 @@ package archutil import ( + "sort" "strings" "sync" "github.com/containerd/containerd/platforms" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/sirupsen/logrus" ) var mu sync.Mutex -var arr []string +var arr []ocispecs.Platform -func SupportedPlatforms(noCache bool) []string { +func SupportedPlatforms(noCache bool) []ocispecs.Platform { mu.Lock() defer mu.Unlock() if !noCache && arr != nil { return arr } - def := defaultPlatform() - arr = append([]string{}, def) - if p := "linux/amd64"; def != p && amd64Supported() == nil { - arr = append(arr, p) + def := nativePlatform() + arr = append([]ocispecs.Platform{}, def) + + if def.OS != "linux" { + return arr } - if p := "linux/arm64"; def != p && arm64Supported() == nil { - arr = append(arr, p) + + if variant, err := amd64Supported(); err == nil { + p := "amd64" + if def.Architecture != p { + arr = append(arr, linux(p)) + } + for _, v := range amd64vector(variant) { + p := linux(p) + p.Variant = v + arr = append(arr, p) + } } - if p := "linux/riscv64"; def != p && riscv64Supported() == nil { - arr = append(arr, p) + + if p := "arm64"; def.Architecture != p { + if _, err := arm64Supported(); err == nil { + arr = append(arr, linux(p)) + } } - if p := "linux/ppc64le"; def != p && ppc64leSupported() == nil { - arr = append(arr, p) + if p := "riscv64"; def.Architecture != p { + if _, err := riscv64Supported(); err == nil { + arr = append(arr, linux(p)) + } } - if p := "linux/s390x"; def != p && s390xSupported() == nil { - arr = append(arr, p) + if p := "ppc64le"; def.Architecture != p { + if _, err := ppc64leSupported(); err == nil { + arr = append(arr, linux(p)) + } } - if p := "linux/386"; def != p && i386Supported() == nil { - arr = append(arr, p) + if p := "s390x"; def.Architecture != p { + if _, err := s390xSupported(); err == nil { + arr = append(arr, linux(p)) + } } - if p := "linux/mips64le"; def != p && mips64leSupported() == nil { - arr = append(arr, p) + if p := "386"; def.Architecture != p { + if _, err := i386Supported(); err == nil { + arr = append(arr, linux(p)) + } } - if p := "linux/mips64"; def != p && mips64Supported() == nil { - arr = append(arr, p) + if p := "mips64le"; def.Architecture != p { + if _, err := mips64leSupported(); err == nil { + arr = append(arr, linux(p)) + } } - if !strings.HasPrefix(def, "linux/arm/") && armSupported() == nil { - arr = append(arr, "linux/arm/v7", "linux/arm/v6") - } else if def == "linux/arm/v7" { - arr = append(arr, "linux/arm/v6") + if p := "mips64"; def.Architecture != p { + if _, err := mips64Supported(); err == nil { + arr = append(arr, linux(p)) + } + } + if p := "arm"; def.Architecture != p { + if _, err := armSupported(); err == nil { + p := linux("arm") + p.Variant = "v6" + arr = append(arr, linux("arm"), p) + } + } else if def.Variant == "" { + p := linux("arm") + p.Variant = "v6" + arr = append(arr, p) } return arr } -func Check(pp specs.Platform) bool { - p := platforms.Format(pp) - if p == "linux/amd64" && amd64Supported() == nil { - return true - } - if p == "linux/arm64" && arm64Supported() == nil { - return true - } - if p == "linux/riscv64" && riscv64Supported() == nil { - return true - } - if p == "linux/ppc64le" && ppc64leSupported() == nil { - return true - } - if p == "linux/s390x" && s390xSupported() == nil { - return true - } - if p == "linux/386" && i386Supported() == nil { - return true - } - if p == "linux/mips64le" && mips64leSupported() == nil { - return true - } - if p == "linux/mips64" && mips64Supported() == nil { - return true - } - if !strings.HasPrefix(p, "linux/arm/") && armSupported() == nil { - return true - } - - return false -} - //WarnIfUnsupported validates the platforms and show warning message if there is, //the end user could fix the issue based on those warning, and thus no need to drop //the platform from the candidates. -func WarnIfUnsupported(pfs []string) { - def := defaultPlatform() +func WarnIfUnsupported(pfs []ocispecs.Platform) { + def := nativePlatform() for _, p := range pfs { - if p != def { - if p == "linux/amd64" { - if err := amd64Supported(); err != nil { + if p.Architecture != def.Architecture { + if p.Architecture == "amd64" { + if _, err := amd64Supported(); err != nil { printPlatformWarning(p, err) } } - if p == "linux/arm64" { - if err := arm64Supported(); err != nil { + if p.Architecture == "arm64" { + if _, err := arm64Supported(); err != nil { printPlatformWarning(p, err) } } - if p == "linux/riscv64" { - if err := riscv64Supported(); err != nil { + if p.Architecture == "riscv64" { + if _, err := riscv64Supported(); err != nil { printPlatformWarning(p, err) } } - if p == "linux/ppc64le" { - if err := ppc64leSupported(); err != nil { + if p.Architecture == "ppc64le" { + if _, err := ppc64leSupported(); err != nil { printPlatformWarning(p, err) } } - if p == "linux/s390x" { - if err := s390xSupported(); err != nil { + if p.Architecture == "s390x" { + if _, err := s390xSupported(); err != nil { printPlatformWarning(p, err) } } - if p == "linux/386" { - if err := i386Supported(); err != nil { + if p.Architecture == "386" { + if _, err := i386Supported(); err != nil { printPlatformWarning(p, err) } } - if p == "linux/mips64le" { - if err := mips64leSupported(); err != nil { + if p.Architecture == "mips64le" { + if _, err := mips64leSupported(); err != nil { printPlatformWarning(p, err) } } - if p == "linux/mips64" { - if err := mips64Supported(); err != nil { + if p.Architecture == "mips64" { + if _, err := mips64Supported(); err != nil { printPlatformWarning(p, err) } } - if strings.HasPrefix(p, "linux/arm/v6") || strings.HasPrefix(p, "linux/arm/v7") { - if err := armSupported(); err != nil { + if p.Architecture == "arm" { + if _, err := armSupported(); err != nil { printPlatformWarning(p, err) } } @@ -141,16 +143,38 @@ func WarnIfUnsupported(pfs []string) { } } -func defaultPlatform() string { - return platforms.Format(platforms.Normalize(platforms.DefaultSpec())) +func nativePlatform() ocispecs.Platform { + return platforms.Normalize(platforms.DefaultSpec()) } -func printPlatformWarning(p string, err error) { - if strings.Contains(err.Error(), "exec format error") { - logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", p) - } else if strings.Contains(err.Error(), "no such file or directory") { - logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'archutil'.", p) - } else { - logrus.Warnf("platforms %s cannot pass the validation: %s", p, err.Error()) +func linux(arch string) ocispecs.Platform { + return ocispecs.Platform{ + OS: "linux", + Architecture: arch, + } +} + +func amd64vector(v string) (out []string) { + switch v { + case "v4": + out = append(out, "v4") + fallthrough + case "v3": + out = append(out, "v3") + fallthrough + case "v2": + out = append(out, "v2") + } + sort.Strings(out) + return +} + +func printPlatformWarning(p ocispecs.Platform, err error) { + if strings.Contains(err.Error(), "exec format error") { + logrus.Warnf("platform %s cannot pass the validation, kernel support for miscellaneous binary may have not enabled.", platforms.Format(p)) + } else if strings.Contains(err.Error(), "no such file or directory") { + logrus.Warnf("platforms %s cannot pass the validation, '-F' flag might have not set for 'archutil'.", platforms.Format(p)) + } else { + logrus.Warnf("platforms %s cannot pass the validation: %s", platforms.Format(p), err.Error()) } } diff --git a/vendor/github.com/moby/buildkit/util/archutil/mips64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/mips64_binary.go index ff6afda0b2..585afc3ff0 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/mips64_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/mips64_binary.go @@ -1,3 +1,4 @@ +//go:build !mips64 // +build !mips64 package archutil diff --git a/vendor/github.com/moby/buildkit/util/archutil/mips64_check.go b/vendor/github.com/moby/buildkit/util/archutil/mips64_check.go index b6ee649570..ddd71de532 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/mips64_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/mips64_check.go @@ -1,7 +1,8 @@ +//go:build !mips64 // +build !mips64 package archutil -func mips64Supported() error { - return check(Binarymips64) +func mips64Supported() (string, error) { + return check("mips64", Binarymips64) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/mips64_check_mips64.go b/vendor/github.com/moby/buildkit/util/archutil/mips64_check_mips64.go index c4907053ba..ae1c71c5f2 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/mips64_check_mips64.go +++ b/vendor/github.com/moby/buildkit/util/archutil/mips64_check_mips64.go @@ -1,7 +1,8 @@ +//go:build mips64 // +build mips64 package archutil -func mips64Supported() error { - return nil +func mips64Supported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/mips64le_binary.go b/vendor/github.com/moby/buildkit/util/archutil/mips64le_binary.go index 21c49bce07..e56bdb07a5 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/mips64le_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/mips64le_binary.go @@ -1,3 +1,4 @@ +//go:build !mips64le // +build !mips64le package archutil diff --git a/vendor/github.com/moby/buildkit/util/archutil/mips64le_check.go b/vendor/github.com/moby/buildkit/util/archutil/mips64le_check.go index b4d536f400..f97d335b2a 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/mips64le_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/mips64le_check.go @@ -1,7 +1,8 @@ +//go:build !mips64le // +build !mips64le package archutil -func mips64leSupported() error { - return check(Binarymips64le) +func mips64leSupported() (string, error) { + return check("mips64le", Binarymips64le) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/mips64le_check_mips64le.go b/vendor/github.com/moby/buildkit/util/archutil/mips64le_check_mips64le.go index 80061bcf28..4df415b172 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/mips64le_check_mips64le.go +++ b/vendor/github.com/moby/buildkit/util/archutil/mips64le_check_mips64le.go @@ -1,7 +1,8 @@ +//go:build mips64le // +build mips64le package archutil -func mips64leSupported() error { - return nil +func mips64leSupported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/ppc64le_binary.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_binary.go index 3e4d4b38e9..1be429cc40 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/ppc64le_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_binary.go @@ -1,3 +1,4 @@ +//go:build !ppc64le // +build !ppc64le package archutil diff --git a/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check.go index 017258390b..4a846c5cfb 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check.go @@ -1,7 +1,8 @@ +//go:build !ppc64le // +build !ppc64le package archutil -func ppc64leSupported() error { - return check(Binaryppc64le) +func ppc64leSupported() (string, error) { + return check("ppc64le", Binaryppc64le) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check_ppc64le.go b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check_ppc64le.go index 7da12d4b5c..54e05f0549 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check_ppc64le.go +++ b/vendor/github.com/moby/buildkit/util/archutil/ppc64le_check_ppc64le.go @@ -1,7 +1,8 @@ +//go:build ppc64le // +build ppc64le package archutil -func ppc64leSupported() error { - return nil +func ppc64leSupported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/riscv64_binary.go b/vendor/github.com/moby/buildkit/util/archutil/riscv64_binary.go index a5e115d6bd..69864eee32 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/riscv64_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/riscv64_binary.go @@ -1,3 +1,4 @@ +//go:build !riscv64 // +build !riscv64 package archutil diff --git a/vendor/github.com/moby/buildkit/util/archutil/riscv64_check.go b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check.go index c1272d0a46..63dd5a5b46 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/riscv64_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check.go @@ -1,7 +1,8 @@ +//go:build !riscv64 // +build !riscv64 package archutil -func riscv64Supported() error { - return check(Binaryriscv64) +func riscv64Supported() (string, error) { + return check("riscv64", Binaryriscv64) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/riscv64_check_riscv64.go b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check_riscv64.go index ceeb56075b..959f6de3bc 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/riscv64_check_riscv64.go +++ b/vendor/github.com/moby/buildkit/util/archutil/riscv64_check_riscv64.go @@ -1,7 +1,8 @@ +//go:build riscv64 // +build riscv64 package archutil -func riscv64Supported() error { - return nil +func riscv64Supported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/archutil/s390x_binary.go b/vendor/github.com/moby/buildkit/util/archutil/s390x_binary.go index 0f43a0b89f..17b7e8a257 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/s390x_binary.go +++ b/vendor/github.com/moby/buildkit/util/archutil/s390x_binary.go @@ -1,3 +1,4 @@ +//go:build !s390x // +build !s390x package archutil diff --git a/vendor/github.com/moby/buildkit/util/archutil/s390x_check.go b/vendor/github.com/moby/buildkit/util/archutil/s390x_check.go index 89f5cf4343..1c2dd9fbea 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/s390x_check.go +++ b/vendor/github.com/moby/buildkit/util/archutil/s390x_check.go @@ -1,7 +1,8 @@ +//go:build !s390x // +build !s390x package archutil -func s390xSupported() error { - return check(Binarys390x) +func s390xSupported() (string, error) { + return check("390x", Binarys390x) } diff --git a/vendor/github.com/moby/buildkit/util/archutil/s390x_check_s390x.go b/vendor/github.com/moby/buildkit/util/archutil/s390x_check_s390x.go index 664bb15e18..d8e8c2e6bb 100644 --- a/vendor/github.com/moby/buildkit/util/archutil/s390x_check_s390x.go +++ b/vendor/github.com/moby/buildkit/util/archutil/s390x_check_s390x.go @@ -1,7 +1,8 @@ +//go:build s390x // +build s390x package archutil -func s390xSupported() error { - return nil +func s390xSupported() (string, error) { + return "", nil } diff --git a/vendor/github.com/moby/buildkit/util/bklog/log.go b/vendor/github.com/moby/buildkit/util/bklog/log.go new file mode 100644 index 0000000000..d7f202210d --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/bklog/log.go @@ -0,0 +1,63 @@ +package bklog + +import ( + "context" + + "github.com/containerd/containerd/log" + "github.com/sirupsen/logrus" + "go.opentelemetry.io/otel/trace" +) + +func init() { + // overwrites containerd/log + log.G = GetLogger + log.L = L +} + +var ( + G = GetLogger + L = logrus.NewEntry(logrus.StandardLogger()) +) + +var ( + logWithTraceID = false +) + +func EnableLogWithTraceID(b bool) { + logWithTraceID = b +} + +type ( + loggerKey struct{} +) + +// WithLogger returns a new context with the provided logger. Use in +// combination with logger.WithField(s) for great effect. +func WithLogger(ctx context.Context, logger *logrus.Entry) context.Context { + return context.WithValue(ctx, loggerKey{}, logger) +} + +// GetLogger retrieves the current logger from the context. If no logger is +// available, the default logger is returned. +func GetLogger(ctx context.Context) (l *logrus.Entry) { + logger := ctx.Value(loggerKey{}) + + if logger != nil { + l = logger.(*logrus.Entry) + } else if logger := log.GetLogger(ctx); logger != nil { + l = logger + } else { + l = L + } + + if logWithTraceID { + if spanContext := trace.SpanFromContext(ctx).SpanContext(); spanContext.IsValid() { + return l.WithFields(logrus.Fields{ + "traceID": spanContext.TraceID(), + "spanID": spanContext.SpanID(), + }) + } + } + + return l +} diff --git a/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go b/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go new file mode 100644 index 0000000000..5adaa9b36f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/buildinfo/buildinfo.go @@ -0,0 +1,450 @@ +package buildinfo + +import ( + "context" + "encoding/base64" + "encoding/json" + "sort" + "strings" + + ctnref "github.com/containerd/containerd/reference" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/exporter/containerimage/exptypes" + "github.com/moby/buildkit/source" + binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/moby/buildkit/util/urlutil" + "github.com/pkg/errors" +) + +// Decode decodes a base64 encoded build info. +func Decode(enc string) (bi binfotypes.BuildInfo, _ error) { + dec, err := base64.StdEncoding.DecodeString(enc) + if err != nil { + return bi, err + } + err = json.Unmarshal(dec, &bi) + return bi, err +} + +// Encode encodes build info. +func Encode(ctx context.Context, metadata map[string][]byte, key string, llbSources map[string]string) ([]byte, error) { + var bi binfotypes.BuildInfo + if metadata == nil { + metadata = make(map[string][]byte) + } + if v, ok := metadata[key]; ok && v != nil { + if err := json.Unmarshal(v, &bi); err != nil { + return nil, err + } + } + if sources, err := mergeSources(llbSources, bi.Sources); err == nil { + bi.Sources = sources + } else { + return nil, err + } + bi.Sources = dedupSources(bi, allDepsSources(bi, nil)) + return json.Marshal(bi) +} + +// mergeSources combines and fixes build sources from frontend sources. +func mergeSources(llbSources map[string]string, frontendSources []binfotypes.Source) ([]binfotypes.Source, error) { + if llbSources == nil { + llbSources = make(map[string]string) + } + // iterate and combine build sources + mbs := map[string]binfotypes.Source{} + for llbSource, pin := range llbSources { + src, err := source.FromString(llbSource) + if err != nil { + return nil, err + } + switch sourceID := src.(type) { + case *source.ImageIdentifier: + for i, fsrc := range frontendSources { + if fsrc.Type != binfotypes.SourceTypeDockerImage { + continue + } + // use original user input from frontend sources + if fsrc.Alias == sourceID.Reference.String() || fsrc.Pin == pin { + if fsrc.Alias == "" { + fsrc.Alias = sourceID.Reference.String() + } + parsed, err := reference.ParseNormalizedNamed(fsrc.Ref) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", fsrc.Ref) + } + mbs[fsrc.Alias] = binfotypes.Source{ + Type: binfotypes.SourceTypeDockerImage, + Ref: reference.TagNameOnly(parsed).String(), + Pin: pin, + } + frontendSources = append(frontendSources[:i], frontendSources[i+1:]...) + break + } + } + if _, ok := mbs[sourceID.Reference.String()]; !ok { + mbs[sourceID.Reference.String()] = binfotypes.Source{ + Type: binfotypes.SourceTypeDockerImage, + Ref: sourceID.Reference.String(), + Pin: pin, + } + } + case *source.GitIdentifier: + sref := sourceID.Remote + if len(sourceID.Ref) > 0 { + sref += "#" + sourceID.Ref + } + if len(sourceID.Subdir) > 0 { + sref += ":" + sourceID.Subdir + } + if _, ok := mbs[sref]; !ok { + mbs[sref] = binfotypes.Source{ + Type: binfotypes.SourceTypeGit, + Ref: urlutil.RedactCredentials(sref), + Pin: pin, + } + } + case *source.HTTPIdentifier: + if _, ok := mbs[sourceID.URL]; !ok { + mbs[sourceID.URL] = binfotypes.Source{ + Type: binfotypes.SourceTypeHTTP, + Ref: urlutil.RedactCredentials(sourceID.URL), + Pin: pin, + } + } + } + } + + // leftover sources in frontend. Mostly duplicated ones we don't need but + // there is an edge case if no instruction except sources one is defined + // (e.g. FROM ...) that can be valid so take it into account. + for _, fsrc := range frontendSources { + if fsrc.Type != binfotypes.SourceTypeDockerImage { + continue + } + if _, ok := mbs[fsrc.Alias]; !ok { + parsed, err := reference.ParseNormalizedNamed(fsrc.Ref) + if err != nil { + return nil, errors.Wrapf(err, "failed to parse %s", fsrc.Ref) + } + mbs[fsrc.Alias] = binfotypes.Source{ + Type: binfotypes.SourceTypeDockerImage, + Ref: reference.TagNameOnly(parsed).String(), + Pin: fsrc.Pin, + } + } + } + + srcs := make([]binfotypes.Source, 0, len(mbs)) + for _, bs := range mbs { + srcs = append(srcs, bs) + } + sort.Slice(srcs, func(i, j int) bool { + return srcs[i].Ref < srcs[j].Ref + }) + + return srcs, nil +} + +// decodeDeps decodes dependencies (buildinfo) added via the input context. +func decodeDeps(key string, attrs map[string]*string) (map[string]binfotypes.BuildInfo, error) { + var platform string + // extract platform from metadata key + if skey := strings.SplitN(key, "/", 2); len(skey) == 2 { + platform = skey[1] + } + + res := make(map[string]binfotypes.BuildInfo) + for k, v := range attrs { + // dependencies are only handled via the input context + if v == nil || !strings.HasPrefix(k, "input-metadata:") { + continue + } + + // if platform is defined in the key, only decode dependencies + // for that platform and vice versa + hasPlatform := len(strings.SplitN(k, "::", 2)) == 2 + if (platform != "" && !hasPlatform) || (platform == "" && hasPlatform) { + continue + } + + // decode input metadata + var inputresp map[string]string + if err := json.Unmarshal([]byte(*v), &inputresp); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal input-metadata") + } + + // check buildinfo key is present + if _, ok := inputresp[exptypes.ExporterBuildInfo]; !ok { + continue + } + + // decode buildinfo + bi, err := Decode(inputresp[exptypes.ExporterBuildInfo]) + if err != nil { + return nil, errors.Wrap(err, "failed to decode buildinfo from input-metadata") + } + + // set dep key + var depkey string + kl := strings.SplitN(k, ":", 2) + if len(kl) != 2 { + continue + } + depkey = strings.SplitN(kl[1], "::", 2)[0] + if platform != "" { + depkey = strings.TrimSuffix(depkey, "::"+platform) + } + + res[depkey] = bi + } + if len(res) == 0 { + return nil, nil + } + return res, nil +} + +// dedupSources deduplicates regular sources from dependencies ones. +func dedupSources(bi binfotypes.BuildInfo, depsSources []binfotypes.Source) (srcs []binfotypes.Source) { + // dedup sources from deps + for i, src := range bi.Sources { + for _, dsrc := range depsSources { + if src == dsrc { + bi.Sources = append(bi.Sources[:i], bi.Sources[i+1:]...) + } else if src.Type == binfotypes.SourceTypeDockerImage { + _, dgst := ctnref.SplitObject(src.Ref) + if dgst != "" && src.Pin == dsrc.Pin { + bi.Sources = append(bi.Sources[:i], bi.Sources[i+1:]...) + } + } + } + } + // dedup regular sources + msrc := make(map[binfotypes.Source]struct{}) + for _, src := range bi.Sources { + msrc[src] = struct{}{} + } + for src := range msrc { + srcs = append(srcs, src) + } + sort.Slice(srcs, func(i, j int) bool { + return srcs[i].Ref < srcs[j].Ref + }) + return srcs +} + +// allDepsSources gathers dependencies sources. +func allDepsSources(bi binfotypes.BuildInfo, visited map[binfotypes.Source]struct{}) (res []binfotypes.Source) { + if visited == nil { + visited = make(map[binfotypes.Source]struct{}) + } + if len(bi.Deps) == 0 { + return res + } + for _, dbi := range bi.Deps { + for _, dsrc := range dbi.Sources { + if _, ok := visited[dsrc]; ok { + continue + } + visited[dsrc] = struct{}{} + } + res = allDepsSources(dbi, visited) + } + for src := range visited { + res = append(res, src) + } + return res +} + +// FormatOpts holds build info format options. +type FormatOpts struct { + RemoveAttrs bool +} + +// Format formats build info. +func Format(dt []byte, format FormatOpts) (_ []byte, err error) { + if len(dt) == 0 { + return dt, nil + } + var bi binfotypes.BuildInfo + if err := json.Unmarshal(dt, &bi); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal buildinfo for formatting") + } + if format.RemoveAttrs { + bi.Attrs = nil + } + if dt, err = json.Marshal(bi); err != nil { + return nil, err + } + return dt, nil +} + +var knownAttrs = []string{ + //"cmdline", + "context", + "filename", + "source", + + //"add-hosts", + //"cgroup-parent", + //"force-network-mode", + //"hostname", + //"image-resolve-mode", + //"platform", + "shm-size", + "target", + "ulimit", +} + +// filterAttrs filters frontent opt by picking only those that +// could effectively change the build result. +func filterAttrs(key string, attrs map[string]*string) map[string]*string { + var platform string + // extract platform from metadata key + skey := strings.SplitN(key, "/", 2) + if len(skey) == 2 { + platform = skey[1] + } + filtered := make(map[string]*string) + for k, v := range attrs { + if v == nil { + continue + } + // control args are filtered out + if isControlArg(k) { + continue + } + // always include + if strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") { + filtered[k] = v + continue + } + // input context key and value has to be cleaned up + // before being included + if strings.HasPrefix(k, "context:") { + ctxkey := strings.SplitN(k, "::", 2) + hasCtxPlatform := len(ctxkey) == 2 + // if platform is set and also defined in key, set context + // for the right one. + if hasCtxPlatform && platform != "" && platform != ctxkey[1] { + continue + } + if platform == "" && hasCtxPlatform { + ctxval := strings.TrimSuffix(*v, "::"+ctxkey[1]) + filtered[strings.TrimSuffix(k, "::"+ctxkey[1])] = &ctxval + continue + } + ctxival := strings.TrimSuffix(*v, "::"+platform) + filtered[strings.TrimSuffix(k, "::"+platform)] = &ctxival + continue + } + // filter only for known attributes + for _, knownAttr := range knownAttrs { + if knownAttr == k { + filtered[k] = v + break + } + } + } + return filtered +} + +var knownControlArgs = []string{ + "BUILDKIT_CACHE_MOUNT_NS", + "BUILDKIT_CONTEXT_KEEP_GIT_DIR", + "BUILDKIT_INLINE_BUILDINFO_ATTRS", + "BUILDKIT_INLINE_CACHE", + "BUILDKIT_MULTI_PLATFORM", + "BUILDKIT_SANDBOX_HOSTNAME", + "BUILDKIT_SYNTAX", +} + +// isControlArg checks if a build attributes is a control arg +func isControlArg(attrKey string) bool { + for _, k := range knownControlArgs { + if strings.HasPrefix(attrKey, "build-arg:"+k) { + return true + } + } + return false +} + +// GetMetadata returns buildinfo metadata for the specified key. If the key +// is already there, result will be merged. +func GetMetadata(metadata map[string][]byte, key string, reqFrontend string, reqAttrs map[string]string) ([]byte, error) { + if metadata == nil { + metadata = make(map[string][]byte) + } + var dtbi []byte + if v, ok := metadata[key]; ok && v != nil { + var mbi binfotypes.BuildInfo + if errm := json.Unmarshal(v, &mbi); errm != nil { + return nil, errors.Wrapf(errm, "failed to unmarshal build info for %q", key) + } + if reqFrontend != "" { + mbi.Frontend = reqFrontend + } + if deps, err := decodeDeps(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))); err == nil { + mbi.Deps = reduceMapBuildInfo(deps, mbi.Deps) + } else { + return nil, err + } + mbi.Attrs = filterAttrs(key, convertMap(reduceMapString(reqAttrs, mbi.Attrs))) + var err error + dtbi, err = json.Marshal(mbi) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) + } + } else { + deps, err := decodeDeps(key, convertMap(reqAttrs)) + if err != nil { + return nil, err + } + dtbi, err = json.Marshal(binfotypes.BuildInfo{ + Frontend: reqFrontend, + Attrs: filterAttrs(key, convertMap(reqAttrs)), + Deps: deps, + }) + if err != nil { + return nil, errors.Wrapf(err, "failed to marshal build info for %q", key) + } + } + return dtbi, nil +} + +func reduceMapString(m1 map[string]string, m2 map[string]*string) map[string]string { + if m1 == nil && m2 == nil { + return nil + } + if m1 == nil { + m1 = map[string]string{} + } + for k, v := range m2 { + if v != nil { + m1[k] = *v + } + } + return m1 +} + +func reduceMapBuildInfo(m1 map[string]binfotypes.BuildInfo, m2 map[string]binfotypes.BuildInfo) map[string]binfotypes.BuildInfo { + if m1 == nil && m2 == nil { + return nil + } + if m1 == nil { + m1 = map[string]binfotypes.BuildInfo{} + } + for k, v := range m2 { + m1[k] = v + } + return m1 +} + +func convertMap(m map[string]string) map[string]*string { + res := make(map[string]*string) + for k, v := range m { + value := v + res[k] = &value + } + return res +} diff --git a/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go b/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go new file mode 100644 index 0000000000..93abcd1b4f --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/buildinfo/types/types.go @@ -0,0 +1,52 @@ +package binfotypes + +import ( + srctypes "github.com/moby/buildkit/source/types" +) + +// ImageConfigField defines the key of build dependencies. +const ImageConfigField = "moby.buildkit.buildinfo.v1" + +// ImageConfig defines the structure of build dependencies +// inside image config. +type ImageConfig struct { + BuildInfo string `json:"moby.buildkit.buildinfo.v1,omitempty"` +} + +// BuildInfo defines the main structure added to image config as +// ImageConfigField key and returned in solver ExporterResponse as +// exptypes.ExporterBuildInfo key. +type BuildInfo struct { + // Frontend defines the frontend used to build. + Frontend string `json:"frontend,omitempty"` + // Attrs defines build request attributes. + Attrs map[string]*string `json:"attrs,omitempty"` + // Sources defines build dependencies. + Sources []Source `json:"sources,omitempty"` + // Deps defines context dependencies. + Deps map[string]BuildInfo `json:"deps,omitempty"` +} + +// Source defines a build dependency. +type Source struct { + // Type defines the SourceType source type (docker-image, git, http). + Type SourceType `json:"type,omitempty"` + // Ref is the reference of the source. + Ref string `json:"ref,omitempty"` + // Alias is a special field used to match with the actual source ref + // because frontend might have already transformed a string user typed + // before generating LLB. + Alias string `json:"alias,omitempty"` + // Pin is the source digest. + Pin string `json:"pin,omitempty"` +} + +// SourceType contains source type. +type SourceType string + +// List of source types. +const ( + SourceTypeDockerImage SourceType = srctypes.DockerImageScheme + SourceTypeGit SourceType = srctypes.GitScheme + SourceTypeHTTP SourceType = srctypes.HTTPScheme +) diff --git a/vendor/github.com/moby/buildkit/util/compression/compression.go b/vendor/github.com/moby/buildkit/util/compression/compression.go index 654b056335..ba44a9270b 100644 --- a/vendor/github.com/moby/buildkit/util/compression/compression.go +++ b/vendor/github.com/moby/buildkit/util/compression/compression.go @@ -7,8 +7,9 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" + "github.com/containerd/stargz-snapshotter/estargz" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -23,32 +24,118 @@ const ( // Gzip is used for blob data. Gzip + // EStargz is used for estargz data. + EStargz + + // Zstd is used for Zstandard data. + Zstd + // UnknownCompression means not supported yet. UnknownCompression Type = -1 ) +type Config struct { + Type Type + Force bool + Level *int +} + +func New(t Type) Config { + return Config{ + Type: t, + } +} + +func (c Config) SetForce(v bool) Config { + c.Force = v + return c +} + +func (c Config) SetLevel(l int) Config { + c.Level = &l + return c +} + +const ( + mediaTypeDockerSchema2LayerZstd = images.MediaTypeDockerSchema2Layer + ".zstd" + mediaTypeImageLayerZstd = ocispecs.MediaTypeImageLayer + "+zstd" // unreleased image-spec#790 +) + var Default = Gzip +func Parse(t string) Type { + switch t { + case "uncompressed": + return Uncompressed + case "gzip": + return Gzip + case "estargz": + return EStargz + case "zstd": + return Zstd + default: + return UnknownCompression + } +} + func (ct Type) String() string { switch ct { case Uncompressed: return "uncompressed" case Gzip: return "gzip" + case EStargz: + return "estargz" + case Zstd: + return "zstd" default: return "unknown" } } +func (ct Type) DefaultMediaType() string { + switch ct { + case Uncompressed: + return ocispecs.MediaTypeImageLayer + case Gzip, EStargz: + return ocispecs.MediaTypeImageLayerGzip + case Zstd: + return mediaTypeImageLayerZstd + default: + return ocispecs.MediaTypeImageLayer + "+unknown" + } +} + +func (ct Type) IsMediaType(mt string) bool { + mt, ok := toOCILayerType[mt] + if !ok { + return false + } + return mt == ct.DefaultMediaType() +} + +func FromMediaType(mediaType string) Type { + switch toOCILayerType[mediaType] { + case ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerNonDistributable: + return Uncompressed + case ocispecs.MediaTypeImageLayerGzip, ocispecs.MediaTypeImageLayerNonDistributableGzip: + return Gzip + case mediaTypeImageLayerZstd, ocispecs.MediaTypeImageLayerNonDistributableZstd: + return Zstd + default: + return UnknownCompression + } +} + // DetectLayerMediaType returns media type from existing blob data. func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Digest, oci bool) (string, error) { - ra, err := cs.ReaderAt(ctx, ocispec.Descriptor{Digest: id}) + ra, err := cs.ReaderAt(ctx, ocispecs.Descriptor{Digest: id}) if err != nil { return "", err } defer ra.Close() - ct, err := detectCompressionType(content.NewReader(ra)) + ct, err := detectCompressionType(io.NewSectionReader(ra, 0, ra.Size())) if err != nil { return "", err } @@ -56,21 +143,22 @@ func DetectLayerMediaType(ctx context.Context, cs content.Store, id digest.Diges switch ct { case Uncompressed: if oci { - return ocispec.MediaTypeImageLayer, nil + return ocispecs.MediaTypeImageLayer, nil } return images.MediaTypeDockerSchema2Layer, nil - case Gzip: + case Gzip, EStargz: if oci { - return ocispec.MediaTypeImageLayerGzip, nil + return ocispecs.MediaTypeImageLayerGzip, nil } return images.MediaTypeDockerSchema2LayerGzip, nil + default: return "", errors.Errorf("failed to detect layer %v compression type", id) } } // detectCompressionType detects compression type from real blob data. -func detectCompressionType(cr io.Reader) (Type, error) { +func detectCompressionType(cr *io.SectionReader) (Type, error) { var buf [10]byte var n int var err error @@ -85,8 +173,13 @@ func detectCompressionType(cr io.Reader) (Type, error) { return UnknownCompression, err } + if _, _, err := estargz.OpenFooter(cr); err == nil { + return EStargz, nil + } + for c, m := range map[Type][]byte{ Gzip: {0x1F, 0x8B, 0x08}, + Zstd: {0x28, 0xB5, 0x2F, 0xFD}, } { if n < len(m) { continue @@ -95,25 +188,35 @@ func detectCompressionType(cr io.Reader) (Type, error) { return c, nil } } + return Uncompressed, nil } var toDockerLayerType = map[string]string{ - ocispec.MediaTypeImageLayer: images.MediaTypeDockerSchema2Layer, - images.MediaTypeDockerSchema2Layer: images.MediaTypeDockerSchema2Layer, - ocispec.MediaTypeImageLayerGzip: images.MediaTypeDockerSchema2LayerGzip, - images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip, - images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2Layer, - images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerGzip, + ocispecs.MediaTypeImageLayer: images.MediaTypeDockerSchema2Layer, + images.MediaTypeDockerSchema2Layer: images.MediaTypeDockerSchema2Layer, + ocispecs.MediaTypeImageLayerGzip: images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2LayerGzip: images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2LayerForeign: images.MediaTypeDockerSchema2LayerForeign, + images.MediaTypeDockerSchema2LayerForeignGzip: images.MediaTypeDockerSchema2LayerForeignGzip, + ocispecs.MediaTypeImageLayerNonDistributable: images.MediaTypeDockerSchema2LayerForeign, + ocispecs.MediaTypeImageLayerNonDistributableGzip: images.MediaTypeDockerSchema2LayerForeignGzip, + mediaTypeImageLayerZstd: mediaTypeDockerSchema2LayerZstd, + mediaTypeDockerSchema2LayerZstd: mediaTypeDockerSchema2LayerZstd, } var toOCILayerType = map[string]string{ - ocispec.MediaTypeImageLayer: ocispec.MediaTypeImageLayer, - images.MediaTypeDockerSchema2Layer: ocispec.MediaTypeImageLayer, - ocispec.MediaTypeImageLayerGzip: ocispec.MediaTypeImageLayerGzip, - images.MediaTypeDockerSchema2LayerGzip: ocispec.MediaTypeImageLayerGzip, - images.MediaTypeDockerSchema2LayerForeign: ocispec.MediaTypeImageLayer, - images.MediaTypeDockerSchema2LayerForeignGzip: ocispec.MediaTypeImageLayerGzip, + ocispecs.MediaTypeImageLayer: ocispecs.MediaTypeImageLayer, + ocispecs.MediaTypeImageLayerNonDistributable: ocispecs.MediaTypeImageLayerNonDistributable, + ocispecs.MediaTypeImageLayerNonDistributableGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, + ocispecs.MediaTypeImageLayerNonDistributableZstd: ocispecs.MediaTypeImageLayerNonDistributableZstd, + images.MediaTypeDockerSchema2Layer: ocispecs.MediaTypeImageLayer, + ocispecs.MediaTypeImageLayerGzip: ocispecs.MediaTypeImageLayerGzip, + images.MediaTypeDockerSchema2LayerGzip: ocispecs.MediaTypeImageLayerGzip, + images.MediaTypeDockerSchema2LayerForeign: ocispecs.MediaTypeImageLayerNonDistributable, + images.MediaTypeDockerSchema2LayerForeignGzip: ocispecs.MediaTypeImageLayerNonDistributableGzip, + mediaTypeImageLayerZstd: mediaTypeImageLayerZstd, + mediaTypeDockerSchema2LayerZstd: mediaTypeImageLayerZstd, } func convertLayerMediaType(mediaType string, oci bool) string { @@ -130,8 +233,8 @@ func convertLayerMediaType(mediaType string, oci bool) string { return converted } -func ConvertAllLayerMediaTypes(oci bool, descs ...ocispec.Descriptor) []ocispec.Descriptor { - var converted []ocispec.Descriptor +func ConvertAllLayerMediaTypes(oci bool, descs ...ocispecs.Descriptor) []ocispecs.Descriptor { + var converted []ocispecs.Descriptor for _, desc := range descs { desc.MediaType = convertLayerMediaType(desc.MediaType, oci) converted = append(converted, desc) diff --git a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go index ac8c8baff3..31d2be6867 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/buffer.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/buffer.go @@ -10,7 +10,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -59,7 +59,7 @@ func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content }, nil } -func (b *buffer) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (b *buffer) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { r, err := b.getBytesReader(ctx, desc.Digest) if err != nil { return nil, err diff --git a/vendor/github.com/moby/buildkit/util/contentutil/copy.go b/vendor/github.com/moby/buildkit/util/contentutil/copy.go index b471d8b948..2509ce1a3b 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/copy.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/copy.go @@ -7,14 +7,14 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/images" - "github.com/containerd/containerd/remotes" + "github.com/moby/buildkit/util/resolver/limited" "github.com/moby/buildkit/util/resolver/retryhandler" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor, logger func([]byte)) error { - if _, err := retryhandler.New(remotes.FetchHandler(ingester, &localFetcher{provider}), logger)(ctx, desc); err != nil { +func Copy(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispecs.Descriptor, ref string, logger func([]byte)) error { + if _, err := retryhandler.New(limited.FetchHandler(ingester, &localFetcher{provider}, ref), logger)(ctx, desc); err != nil { return err } return nil @@ -24,7 +24,7 @@ type localFetcher struct { content.Provider } -func (f *localFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { +func (f *localFetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) { r, err := f.Provider.ReaderAt(ctx, desc) if err != nil { return nil, err @@ -34,26 +34,38 @@ func (f *localFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R type rc struct { content.ReaderAt - offset int + offset int64 } func (r *rc) Read(b []byte) (int, error) { - n, err := r.ReadAt(b, int64(r.offset)) - r.offset += n + n, err := r.ReadAt(b, r.offset) + r.offset += int64(n) if n > 0 && err == io.EOF { err = nil } return n, err } -func CopyChain(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispec.Descriptor) error { - var m sync.Mutex - manifestStack := []ocispec.Descriptor{} +func (r *rc) Seek(offset int64, whence int) (int64, error) { + switch whence { + case io.SeekStart: + r.offset = offset + case io.SeekCurrent: + r.offset += offset + case io.SeekEnd: + r.offset = r.Size() - offset + } + return r.offset, nil +} - filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { +func CopyChain(ctx context.Context, ingester content.Ingester, provider content.Provider, desc ocispecs.Descriptor) error { + var m sync.Mutex + manifestStack := []ocispecs.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest, - images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex: + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: m.Lock() manifestStack = append(manifestStack, desc) m.Unlock() @@ -65,7 +77,7 @@ func CopyChain(ctx context.Context, ingester content.Ingester, provider content. handlers := []images.Handler{ images.ChildrenHandler(provider), filterHandler, - retryhandler.New(remotes.FetchHandler(ingester, &localFetcher{provider}), func(_ []byte) {}), + retryhandler.New(limited.FetchHandler(ingester, &localFetcher{provider}, ""), func(_ []byte) {}), } if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { @@ -73,7 +85,7 @@ func CopyChain(ctx context.Context, ingester content.Ingester, provider content. } for i := len(manifestStack) - 1; i >= 0; i-- { - if err := Copy(ctx, ingester, provider, manifestStack[i], nil); err != nil { + if err := Copy(ctx, ingester, provider, manifestStack[i], "", nil); err != nil { return errors.WithStack(err) } } diff --git a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go index d55c101219..6414b12f7a 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/fetcher.go @@ -6,7 +6,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/remotes" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -20,7 +20,7 @@ type fetchedProvider struct { f remotes.Fetcher } -func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (p *fetchedProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { rc, err := p.f.Fetch(ctx, desc) if err != nil { return nil, err diff --git a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go index 3dafed7dc2..469096d340 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/multiprovider.go @@ -7,7 +7,7 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/errdefs" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -26,8 +26,52 @@ type MultiProvider struct { sub map[digest.Digest]content.Provider } +func (mp *MultiProvider) SnapshotLabels(descs []ocispecs.Descriptor, index int) map[string]string { + if len(descs) < index { + return nil + } + desc := descs[index] + type snapshotLabels interface { + SnapshotLabels([]ocispecs.Descriptor, int) map[string]string + } + + mp.mu.RLock() + if p, ok := mp.sub[desc.Digest]; ok { + mp.mu.RUnlock() + if cd, ok := p.(snapshotLabels); ok { + return cd.SnapshotLabels(descs, index) + } + } else { + mp.mu.RUnlock() + } + if cd, ok := mp.base.(snapshotLabels); ok { + return cd.SnapshotLabels(descs, index) + } + return nil +} + +func (mp *MultiProvider) CheckDescriptor(ctx context.Context, desc ocispecs.Descriptor) error { + type checkDescriptor interface { + CheckDescriptor(context.Context, ocispecs.Descriptor) error + } + + mp.mu.RLock() + if p, ok := mp.sub[desc.Digest]; ok { + mp.mu.RUnlock() + if cd, ok := p.(checkDescriptor); ok { + return cd.CheckDescriptor(ctx, desc) + } + } else { + mp.mu.RUnlock() + } + if cd, ok := mp.base.(checkDescriptor); ok { + return cd.CheckDescriptor(ctx, desc) + } + return nil +} + // ReaderAt returns a content.ReaderAt -func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { mp.mu.RLock() if p, ok := mp.sub[desc.Digest]; ok { mp.mu.RUnlock() diff --git a/vendor/github.com/moby/buildkit/util/contentutil/refs.go b/vendor/github.com/moby/buildkit/util/contentutil/refs.go index 9e41ea07d2..16fb9aafa5 100644 --- a/vendor/github.com/moby/buildkit/util/contentutil/refs.go +++ b/vendor/github.com/moby/buildkit/util/contentutil/refs.go @@ -9,45 +9,56 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/version" "github.com/moby/locker" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func ProviderFromRef(ref string) (ocispec.Descriptor, content.Provider, error) { +func ProviderFromRef(ref string) (ocispecs.Descriptor, content.Provider, error) { + headers := http.Header{} + headers.Set("User-Agent", version.UserAgent()) remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, + Client: http.DefaultClient, + Headers: headers, }) name, desc, err := remote.Resolve(context.TODO(), ref) if err != nil { - return ocispec.Descriptor{}, nil, err + return ocispecs.Descriptor{}, nil, err } fetcher, err := remote.Fetcher(context.TODO(), name) if err != nil { - return ocispec.Descriptor{}, nil, err + return ocispecs.Descriptor{}, nil, err } return desc, FromFetcher(fetcher), nil } func IngesterFromRef(ref string) (content.Ingester, error) { + headers := http.Header{} + headers.Set("User-Agent", version.UserAgent()) remote := docker.NewResolver(docker.ResolverOptions{ - Client: http.DefaultClient, + Client: http.DefaultClient, + Headers: headers, }) - pusher, err := remote.Pusher(context.TODO(), ref) + p, err := remote.Pusher(context.TODO(), ref) if err != nil { return nil, err } return &ingester{ locker: locker.New(), - pusher: pusher, + pusher: &pusher{p}, }, nil } +type pusher struct { + remotes.Pusher +} + type ingester struct { locker *locker.Locker pusher remotes.Pusher diff --git a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go index c79873e826..6e0557961c 100644 --- a/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go +++ b/vendor/github.com/moby/buildkit/util/entitlements/security/security_linux.go @@ -4,10 +4,12 @@ import ( "context" "fmt" "os" + "sync" "github.com/containerd/containerd/containers" "github.com/containerd/containerd/oci" - "github.com/containerd/containerd/sys" + "github.com/containerd/containerd/pkg/cap" + "github.com/containerd/containerd/pkg/userns" specs "github.com/opencontainers/runtime-spec/specs-go" "github.com/pkg/errors" "github.com/sirupsen/logrus" @@ -17,46 +19,11 @@ import ( // WithInsecureSpec sets spec with All capability. func WithInsecureSpec() oci.SpecOpts { return func(_ context.Context, _ oci.Client, _ *containers.Container, s *specs.Spec) error { - addCaps := []string{ - "CAP_FSETID", - "CAP_KILL", - "CAP_FOWNER", - "CAP_MKNOD", - "CAP_CHOWN", - "CAP_DAC_OVERRIDE", - "CAP_NET_RAW", - "CAP_SETGID", - "CAP_SETUID", - "CAP_SETPCAP", - "CAP_SETFCAP", - "CAP_NET_BIND_SERVICE", - "CAP_SYS_CHROOT", - "CAP_AUDIT_WRITE", - "CAP_MAC_ADMIN", - "CAP_MAC_OVERRIDE", - "CAP_DAC_READ_SEARCH", - "CAP_SYS_PTRACE", - "CAP_SYS_MODULE", - "CAP_SYSLOG", - "CAP_SYS_RAWIO", - "CAP_SYS_ADMIN", - "CAP_LINUX_IMMUTABLE", - "CAP_SYS_BOOT", - "CAP_SYS_NICE", - "CAP_SYS_PACCT", - "CAP_SYS_TTY_CONFIG", - "CAP_SYS_TIME", - "CAP_WAKE_ALARM", - "CAP_AUDIT_READ", - "CAP_AUDIT_CONTROL", - "CAP_SYS_RESOURCE", - "CAP_BLOCK_SUSPEND", - "CAP_IPC_LOCK", - "CAP_IPC_OWNER", - "CAP_LEASE", - "CAP_NET_ADMIN", - "CAP_NET_BROADCAST", + addCaps, err := getAllCaps() + if err != nil { + return err } + s.Process.Capabilities.Bounding = append(s.Process.Capabilities.Bounding, addCaps...) s.Process.Capabilities.Ambient = append(s.Process.Capabilities.Ambient, addCaps...) s.Process.Capabilities.Effective = append(s.Process.Capabilities.Effective, addCaps...) @@ -80,7 +47,7 @@ func WithInsecureSpec() oci.SpecOpts { }, } - if !sys.RunningInUserNS() { + if !userns.RunningInUserNS() { // Devices automatically mounted on insecure mode s.Linux.Devices = append(s.Linux.Devices, []specs.LinuxDevice{ // Writes to this come out as printk's, reads export the buffered printk records. (dmesg) @@ -153,10 +120,83 @@ func getFreeLoopID() (int, error) { } defer fd.Close() - const _LOOP_CTL_GET_FREE = 0x4C82 //nolint:golint + const _LOOP_CTL_GET_FREE = 0x4C82 //nolint:revive r1, _, uerr := unix.Syscall(unix.SYS_IOCTL, fd.Fd(), _LOOP_CTL_GET_FREE, 0) if uerr == 0 { return int(r1), nil } return 0, errors.Errorf("error getting free loop device: %v", uerr) } + +var ( + currentCaps []string + currentCapsError error //nolint:errname + currentCapsOnce sync.Once +) + +func getCurrentCaps() ([]string, error) { + currentCapsOnce.Do(func() { + currentCaps, currentCapsError = cap.Current() + }) + + return currentCaps, currentCapsError +} + +func getAllCaps() ([]string, error) { + availableCaps, err := getCurrentCaps() + if err != nil { + return nil, fmt.Errorf("error getting current capabilities: %s", err) + } + + // see if any of the base linux35Caps are not available to be granted + // they are either not supported by the kernel or dropped at the process level + for _, cap := range availableCaps { + if _, exists := linux35Caps[cap]; !exists { + logrus.Warnf("capability %s could not be granted for insecure mode", cap) + } + } + + return availableCaps, nil +} + +// linux35Caps provides a list of capabilities available on Linux 3.5 kernel +var linux35Caps = map[string]struct{}{ + "CAP_FSETID": {}, + "CAP_KILL": {}, + "CAP_FOWNER": {}, + "CAP_MKNOD": {}, + "CAP_CHOWN": {}, + "CAP_DAC_OVERRIDE": {}, + "CAP_NET_RAW": {}, + "CAP_SETGID": {}, + "CAP_SETUID": {}, + "CAP_SETPCAP": {}, + "CAP_SETFCAP": {}, + "CAP_NET_BIND_SERVICE": {}, + "CAP_SYS_CHROOT": {}, + "CAP_AUDIT_WRITE": {}, + "CAP_MAC_ADMIN": {}, + "CAP_MAC_OVERRIDE": {}, + "CAP_DAC_READ_SEARCH": {}, + "CAP_SYS_PTRACE": {}, + "CAP_SYS_MODULE": {}, + "CAP_SYSLOG": {}, + "CAP_SYS_RAWIO": {}, + "CAP_SYS_ADMIN": {}, + "CAP_LINUX_IMMUTABLE": {}, + "CAP_SYS_BOOT": {}, + "CAP_SYS_NICE": {}, + "CAP_SYS_PACCT": {}, + "CAP_SYS_TTY_CONFIG": {}, + "CAP_SYS_TIME": {}, + "CAP_WAKE_ALARM": {}, + "CAP_AUDIT_READ": {}, + "CAP_AUDIT_CONTROL": {}, + "CAP_SYS_RESOURCE": {}, + "CAP_BLOCK_SUSPEND": {}, + "CAP_IPC_LOCK": {}, + "CAP_IPC_OWNER": {}, + "CAP_LEASE": {}, + "CAP_NET_ADMIN": {}, + "CAP_NET_BROADCAST": {}, +} diff --git a/vendor/github.com/moby/buildkit/util/estargz/labels.go b/vendor/github.com/moby/buildkit/util/estargz/labels.go new file mode 100644 index 0000000000..50f9bf53bb --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/estargz/labels.go @@ -0,0 +1,38 @@ +package estargz + +import ( + "fmt" + "strings" + + ctdlabels "github.com/containerd/containerd/labels" + "github.com/containerd/stargz-snapshotter/estargz" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" +) + +func SnapshotLabels(ref string, descs []ocispecs.Descriptor, targetIndex int) map[string]string { + if len(descs) < targetIndex { + return nil + } + desc := descs[targetIndex] + labels := make(map[string]string) + for _, k := range []string{estargz.TOCJSONDigestAnnotation, estargz.StoreUncompressedSizeAnnotation} { + labels[k] = desc.Annotations[k] + } + labels["containerd.io/snapshot/remote/stargz.reference"] = ref + labels["containerd.io/snapshot/remote/stargz.digest"] = desc.Digest.String() + var ( + layersKey = "containerd.io/snapshot/remote/stargz.layers" + layers string + ) + for _, l := range descs[targetIndex:] { + ls := fmt.Sprintf("%s,", l.Digest.String()) + // This avoids the label hits the size limitation. + // Skipping layers is allowed here and only affects performance. + if err := ctdlabels.Validate(layersKey, layers+ls); err != nil { + break + } + layers += ls + } + labels[layersKey] = strings.TrimSuffix(layers, ",") + return labels +} diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go index fc9f7272a4..3c1b673e15 100644 --- a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go +++ b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go @@ -39,7 +39,7 @@ func (g *Group) Do(ctx context.Context, key string, fn func(ctx context.Context) return v, err } // backoff logic - if backoff >= 3*time.Second { + if backoff >= 15*time.Second { err = errors.Wrapf(errRetryTimeout, "flightcontrol") return v, err } @@ -132,8 +132,16 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { select { case <-c.ready: c.mu.Unlock() - <-c.cleaned - return nil, errRetry + if c.err != nil { // on error retry + <-c.cleaned + return nil, errRetry + } + pw, ok, _ := progress.NewFromContext(ctx) + if ok { + c.progressState.add(pw) + } + return c.result, nil + case <-c.ctx.done: // could return if no error c.mu.Unlock() <-c.cleaned @@ -141,7 +149,7 @@ func (c *call) wait(ctx context.Context) (v interface{}, err error) { default: } - pw, ok, ctx := progress.FromContext(ctx) + pw, ok, ctx := progress.NewFromContext(ctx) if ok { c.progressState.add(pw) } diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go index f12f10bc8a..f52f18673e 100644 --- a/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go +++ b/vendor/github.com/moby/buildkit/util/grpcerrors/grpcerrors.go @@ -6,7 +6,7 @@ import ( "github.com/containerd/typeurl" gogotypes "github.com/gogo/protobuf/types" - "github.com/golang/protobuf/proto" + "github.com/golang/protobuf/proto" // nolint:staticcheck "github.com/golang/protobuf/ptypes/any" "github.com/moby/buildkit/util/stack" "github.com/sirupsen/logrus" @@ -169,7 +169,7 @@ func FromGRPC(err error) error { } } - err = status.FromProto(n).Err() + err = &grpcStatusError{st: status.FromProto(n)} for _, s := range stacks { if s != nil { @@ -188,6 +188,21 @@ func FromGRPC(err error) error { return stack.Enable(err) } +type grpcStatusError struct { + st *status.Status +} + +func (e *grpcStatusError) Error() string { + if e.st.Code() == codes.OK || e.st.Code() == codes.Unknown { + return e.st.Message() + } + return e.st.Code().String() + ": " + e.st.Message() +} + +func (e *grpcStatusError) GRPCStatus() *status.Status { + return e.st +} + type withCode struct { code codes.Code error diff --git a/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go b/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go new file mode 100644 index 0000000000..2ef1e75cfc --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go @@ -0,0 +1,32 @@ +package imageutil + +import ( + "encoding/base64" + "encoding/json" + + binfotypes "github.com/moby/buildkit/util/buildinfo/types" + "github.com/pkg/errors" +) + +// BuildInfo returns build info from image config. +func BuildInfo(dt []byte) (*binfotypes.BuildInfo, error) { + if len(dt) == 0 { + return nil, nil + } + var config binfotypes.ImageConfig + if err := json.Unmarshal(dt, &config); err != nil { + return nil, errors.Wrap(err, "failed to unmarshal image config") + } + if len(config.BuildInfo) == 0 { + return nil, nil + } + dtbi, err := base64.StdEncoding.DecodeString(config.BuildInfo) + if err != nil { + return nil, err + } + var bi binfotypes.BuildInfo + if err = json.Unmarshal(dtbi, &bi); err != nil { + return nil, errors.Wrap(err, "failed to decode buildinfo from image config") + } + return &bi, nil +} diff --git a/vendor/github.com/moby/buildkit/util/imageutil/config.go b/vendor/github.com/moby/buildkit/util/imageutil/config.go index 0be587058a..cfb9d417b3 100644 --- a/vendor/github.com/moby/buildkit/util/imageutil/config.go +++ b/vendor/github.com/moby/buildkit/util/imageutil/config.go @@ -14,9 +14,10 @@ import ( "github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes/docker" "github.com/moby/buildkit/util/leaseutil" + "github.com/moby/buildkit/util/resolver/limited" "github.com/moby/buildkit/util/resolver/retryhandler" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -43,7 +44,7 @@ func AddLease(f func(context.Context) error) { leasesMu.Unlock() } -func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *specs.Platform) (digest.Digest, []byte, error) { +func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *ocispecs.Platform) (digest.Digest, []byte, error) { // TODO: fix buildkit to take interface instead of struct var platform platforms.MatchComparer if p != nil { @@ -68,7 +69,7 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co }() } - desc := specs.Descriptor{ + desc := ocispecs.Descriptor{ Digest: ref.Digest(), } if desc.Digest != "" { @@ -101,7 +102,7 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co children := childrenConfigHandler(cache, platform) handlers := []images.Handler{ - retryhandler.New(remotes.FetchHandler(cache, fetcher), func(_ []byte) {}), + retryhandler.New(limited.FetchHandler(cache, fetcher, str), func(_ []byte) {}), children, } if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil { @@ -121,10 +122,10 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co } func childrenConfigHandler(provider content.Provider, platform platforms.MatchComparer) images.HandlerFunc { - return func(ctx context.Context, desc specs.Descriptor) ([]specs.Descriptor, error) { - var descs []specs.Descriptor + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + var descs []ocispecs.Descriptor switch desc.MediaType { - case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest: + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest: p, err := content.ReadBlob(ctx, provider, desc) if err != nil { return nil, err @@ -132,19 +133,19 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo // TODO(stevvooe): We just assume oci manifest, for now. There may be // subtle differences from the docker version. - var manifest specs.Manifest + var manifest ocispecs.Manifest if err := json.Unmarshal(p, &manifest); err != nil { return nil, err } descs = append(descs, manifest.Config) - case images.MediaTypeDockerSchema2ManifestList, specs.MediaTypeImageIndex: + case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: p, err := content.ReadBlob(ctx, provider, desc) if err != nil { return nil, err } - var index specs.Index + var index ocispecs.Index if err := json.Unmarshal(p, &index); err != nil { return nil, err } @@ -158,7 +159,7 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo } else { descs = append(descs, index.Manifests...) } - case images.MediaTypeDockerSchema2Config, specs.MediaTypeImageConfig, docker.LegacyConfigMediaType: + case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType: // childless data types. return nil, nil default: @@ -183,19 +184,39 @@ func DetectManifestMediaType(ra content.ReaderAt) (string, error) { func DetectManifestBlobMediaType(dt []byte) (string, error) { var mfst struct { - MediaType string `json:"mediaType"` + MediaType *string `json:"mediaType"` Config json.RawMessage `json:"config"` + Manifests json.RawMessage `json:"manifests"` + Layers json.RawMessage `json:"layers"` } if err := json.Unmarshal(dt, &mfst); err != nil { return "", err } - if mfst.MediaType != "" { - return mfst.MediaType, nil + mt := images.MediaTypeDockerSchema2ManifestList + + if mfst.Config != nil || mfst.Layers != nil { + mt = images.MediaTypeDockerSchema2Manifest + + if mfst.Manifests != nil { + return "", errors.Errorf("invalid ambiguous manifest and manifest list") + } } - if mfst.Config != nil { - return images.MediaTypeDockerSchema2Manifest, nil + + if mfst.MediaType != nil { + switch *mfst.MediaType { + case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + if mt != images.MediaTypeDockerSchema2ManifestList { + return "", errors.Errorf("mediaType in manifest does not match manifest contents") + } + mt = *mfst.MediaType + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest: + if mt != images.MediaTypeDockerSchema2Manifest { + return "", errors.Errorf("mediaType in manifest does not match manifest contents") + } + mt = *mfst.MediaType + } } - return images.MediaTypeDockerSchema2ManifestList, nil + return mt, nil } diff --git a/vendor/github.com/moby/buildkit/util/imageutil/schema1.go b/vendor/github.com/moby/buildkit/util/imageutil/schema1.go index 591676fffd..10838bf50d 100644 --- a/vendor/github.com/moby/buildkit/util/imageutil/schema1.go +++ b/vendor/github.com/moby/buildkit/util/imageutil/schema1.go @@ -9,11 +9,11 @@ import ( "github.com/containerd/containerd/remotes" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) -func readSchema1Config(ctx context.Context, ref string, desc specs.Descriptor, fetcher remotes.Fetcher, cache ContentCache) (digest.Digest, []byte, error) { +func readSchema1Config(ctx context.Context, ref string, desc ocispecs.Descriptor, fetcher remotes.Fetcher, cache ContentCache) (digest.Digest, []byte, error) { rc, err := fetcher.Fetch(ctx, desc) if err != nil { return "", nil, err @@ -44,22 +44,22 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) { return nil, errors.Errorf("invalid schema1 manifest") } - var img specs.Image + var img ocispecs.Image if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil { return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history") } - img.RootFS = specs.RootFS{ + img.RootFS = ocispecs.RootFS{ Type: "layers", // filled in by exporter } - img.History = make([]specs.History, len(m.History)) + img.History = make([]ocispecs.History, len(m.History)) for i := range m.History { var h v1History if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil { return nil, errors.Wrap(err, "failed to unmarshal history") } - img.History[len(m.History)-i-1] = specs.History{ + img.History[len(m.History)-i-1] = ocispecs.History{ Author: h.Author, Comment: h.Comment, Created: &h.Created, diff --git a/vendor/github.com/moby/buildkit/util/network/host.go b/vendor/github.com/moby/buildkit/util/network/host.go index 09aefdfc25..c50268d45f 100644 --- a/vendor/github.com/moby/buildkit/util/network/host.go +++ b/vendor/github.com/moby/buildkit/util/network/host.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package network diff --git a/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go new file mode 100644 index 0000000000..12f153f0b6 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/overlay/overlay_linux.go @@ -0,0 +1,468 @@ +//go:build linux +// +build linux + +package overlay + +import ( + "bytes" + "context" + "fmt" + "io" + "io/ioutil" + "os" + "path/filepath" + "strings" + "sync" + "syscall" + + "github.com/containerd/containerd/archive" + "github.com/containerd/containerd/mount" + "github.com/containerd/continuity/devices" + "github.com/containerd/continuity/fs" + "github.com/containerd/continuity/sysx" + "github.com/pkg/errors" + "golang.org/x/sys/unix" +) + +// GetUpperdir parses the passed mounts and identifies the directory +// that contains diff between upper and lower. +func GetUpperdir(lower, upper []mount.Mount) (string, error) { + var upperdir string + if len(lower) == 0 && len(upper) == 1 { // upper is the bottommost snapshot + // Get layer directories of upper snapshot + upperM := upper[0] + if upperM.Type != "bind" { + return "", errors.Errorf("bottommost upper must be bind mount but %q", upperM.Type) + } + upperdir = upperM.Source + } else if len(lower) == 1 && len(upper) == 1 { + // Get layer directories of lower snapshot + var lowerlayers []string + lowerM := lower[0] + switch lowerM.Type { + case "bind": + // lower snapshot is a bind mount of one layer + lowerlayers = []string{lowerM.Source} + case "overlay": + // lower snapshot is an overlay mount of multiple layers + var err error + lowerlayers, err = GetOverlayLayers(lowerM) + if err != nil { + return "", err + } + default: + return "", errors.Errorf("cannot get layer information from mount option (type = %q)", lowerM.Type) + } + + // Get layer directories of upper snapshot + upperM := upper[0] + if upperM.Type != "overlay" { + return "", errors.Errorf("upper snapshot isn't overlay mounted (type = %q)", upperM.Type) + } + upperlayers, err := GetOverlayLayers(upperM) + if err != nil { + return "", err + } + + // Check if the diff directory can be determined + if len(upperlayers) != len(lowerlayers)+1 { + return "", errors.Errorf("cannot determine diff of more than one upper directories") + } + for i := 0; i < len(lowerlayers); i++ { + if upperlayers[i] != lowerlayers[i] { + return "", errors.Errorf("layer %d must be common between upper and lower snapshots", i) + } + } + upperdir = upperlayers[len(upperlayers)-1] // get the topmost layer that indicates diff + } else { + return "", errors.Errorf("multiple mount configurations are not supported") + } + if upperdir == "" { + return "", errors.Errorf("cannot determine upperdir from mount option") + } + return upperdir, nil +} + +// GetOverlayLayers returns all layer directories of an overlayfs mount. +func GetOverlayLayers(m mount.Mount) ([]string, error) { + var u string + var uFound bool + var l []string // l[0] = bottommost + for _, o := range m.Options { + if strings.HasPrefix(o, "upperdir=") { + u, uFound = strings.TrimPrefix(o, "upperdir="), true + } else if strings.HasPrefix(o, "lowerdir=") { + l = strings.Split(strings.TrimPrefix(o, "lowerdir="), ":") + for i, j := 0, len(l)-1; i < j; i, j = i+1, j-1 { + l[i], l[j] = l[j], l[i] // make l[0] = bottommost + } + } else if strings.HasPrefix(o, "workdir=") || o == "index=off" || o == "userxattr" || strings.HasPrefix(o, "redirect_dir=") { + // these options are possible to specfied by the snapshotter but not indicate dir locations. + continue + } else { + // encountering an unknown option. return error and fallback to walking differ + // to avoid unexpected diff. + return nil, errors.Errorf("unknown option %q specified by snapshotter", o) + } + } + if uFound { + return append(l, u), nil + } + return l, nil +} + +// WriteUpperdir writes a layer tar archive into the specified writer, based on +// the diff information stored in the upperdir. +func WriteUpperdir(ctx context.Context, w io.Writer, upperdir string, lower []mount.Mount) error { + emptyLower, err := ioutil.TempDir("", "buildkit") // empty directory used for the lower of diff view + if err != nil { + return errors.Wrapf(err, "failed to create temp dir") + } + defer os.Remove(emptyLower) + upperView := []mount.Mount{ + { + Type: "overlay", + Source: "overlay", + Options: []string{fmt.Sprintf("lowerdir=%s", strings.Join([]string{upperdir, emptyLower}, ":"))}, + }, + } + return mount.WithTempMount(ctx, lower, func(lowerRoot string) error { + return mount.WithTempMount(ctx, upperView, func(upperViewRoot string) error { + cw := archive.NewChangeWriter(&cancellableWriter{ctx, w}, upperViewRoot) + if err := Changes(ctx, cw.HandleChange, upperdir, upperViewRoot, lowerRoot); err != nil { + if err2 := cw.Close(); err2 != nil { + return errors.Wrapf(err, "failed to record upperdir changes (close error: %v)", err2) + } + return errors.Wrapf(err, "failed to record upperdir changes") + } + return cw.Close() + }) + }) +} + +type cancellableWriter struct { + ctx context.Context + w io.Writer +} + +func (w *cancellableWriter) Write(p []byte) (int, error) { + if err := w.ctx.Err(); err != nil { + return 0, err + } + return w.w.Write(p) +} + +// Changes is continuty's `fs.Change`-like method but leverages overlayfs's +// "upperdir" for computing the diff. "upperdirView" is overlayfs mounted view of +// the upperdir that doesn't contain whiteouts. This is used for computing +// changes under opaque directories. +func Changes(ctx context.Context, changeFn fs.ChangeFunc, upperdir, upperdirView, base string) error { + return filepath.Walk(upperdir, func(path string, f os.FileInfo, err error) error { + if err != nil { + return err + } + if ctx.Err() != nil { + return ctx.Err() + } + + // Rebase path + path, err = filepath.Rel(upperdir, path) + if err != nil { + return err + } + path = filepath.Join(string(os.PathSeparator), path) + + // Skip root + if path == string(os.PathSeparator) { + return nil + } + + // Check redirect + if redirect, err := checkRedirect(upperdir, path, f); err != nil { + return err + } else if redirect { + // Return error when redirect_dir is enabled which can result to a wrong diff. + // TODO: support redirect_dir + return fmt.Errorf("redirect_dir is used but it's not supported in overlayfs differ") + } + + // Check if this is a deleted entry + isDelete, skip, err := checkDelete(upperdir, path, base, f) + if err != nil { + return err + } else if skip { + return nil + } + + var kind fs.ChangeKind + var skipRecord bool + if isDelete { + // This is a deleted entry. + kind = fs.ChangeKindDelete + // Leave f set to the FileInfo for the whiteout device in case the caller wants it, e.g. + // the merge code uses it to hardlink in the whiteout device to merged snapshots + } else if baseF, err := os.Lstat(filepath.Join(base, path)); err == nil { + // File exists in the base layer. Thus this is modified. + kind = fs.ChangeKindModify + // Avoid including directory that hasn't been modified. If /foo/bar/baz is modified, + // then /foo will apper here even if it's not been modified because it's the parent of bar. + if same, err := sameDirent(baseF, f, filepath.Join(base, path), filepath.Join(upperdirView, path)); same { + skipRecord = true // Both are the same, don't record the change + } else if err != nil { + return err + } + } else if os.IsNotExist(err) || errors.Is(err, unix.ENOTDIR) { + // File doesn't exist in the base layer. Thus this is added. + kind = fs.ChangeKindAdd + } else if err != nil { + return errors.Wrap(err, "failed to stat base file during overlay diff") + } + + if !skipRecord { + if err := changeFn(kind, path, f, nil); err != nil { + return err + } + } + + if f != nil { + if isOpaque, err := checkOpaque(upperdir, path, base, f); err != nil { + return err + } else if isOpaque { + // This is an opaque directory. Start a new walking differ to get adds/deletes of + // this directory. We use "upperdirView" directory which doesn't contain whiteouts. + if err := fs.Changes(ctx, filepath.Join(base, path), filepath.Join(upperdirView, path), + func(k fs.ChangeKind, p string, f os.FileInfo, err error) error { + return changeFn(k, filepath.Join(path, p), f, err) // rebase path to be based on the opaque dir + }, + ); err != nil { + return err + } + return filepath.SkipDir // We completed this directory. Do not walk files under this directory anymore. + } + } + return nil + }) +} + +// checkDelete checks if the specified file is a whiteout +func checkDelete(upperdir string, path string, base string, f os.FileInfo) (delete, skip bool, _ error) { + if f.Mode()&os.ModeCharDevice != 0 { + if _, ok := f.Sys().(*syscall.Stat_t); ok { + maj, min, err := devices.DeviceInfo(f) + if err != nil { + return false, false, errors.Wrapf(err, "failed to get device info") + } + if maj == 0 && min == 0 { + // This file is a whiteout (char 0/0) that indicates this is deleted from the base + if _, err := os.Lstat(filepath.Join(base, path)); err != nil { + if !os.IsNotExist(err) { + return false, false, errors.Wrapf(err, "failed to lstat") + } + // This file doesn't exist even in the base dir. + // We don't need whiteout. Just skip this file. + return false, true, nil + } + return true, false, nil + } + } + } + return false, false, nil +} + +// checkDelete checks if the specified file is an opaque directory +func checkOpaque(upperdir string, path string, base string, f os.FileInfo) (isOpaque bool, _ error) { + if f.IsDir() { + for _, oKey := range []string{"trusted.overlay.opaque", "user.overlay.opaque"} { + opaque, err := sysx.LGetxattr(filepath.Join(upperdir, path), oKey) + if err != nil && err != unix.ENODATA { + return false, errors.Wrapf(err, "failed to retrieve %s attr", oKey) + } else if len(opaque) == 1 && opaque[0] == 'y' { + // This is an opaque whiteout directory. + if _, err := os.Lstat(filepath.Join(base, path)); err != nil { + if !os.IsNotExist(err) { + return false, errors.Wrapf(err, "failed to lstat") + } + // This file doesn't exist even in the base dir. We don't need treat this as an opaque. + return false, nil + } + return true, nil + } + } + } + return false, nil +} + +// checkRedirect checks if the specified path enables redirect_dir. +func checkRedirect(upperdir string, path string, f os.FileInfo) (bool, error) { + if f.IsDir() { + rKey := "trusted.overlay.redirect" + redirect, err := sysx.LGetxattr(filepath.Join(upperdir, path), rKey) + if err != nil && err != unix.ENODATA { + return false, errors.Wrapf(err, "failed to retrieve %s attr", rKey) + } + return len(redirect) > 0, nil + } + return false, nil +} + +// sameDirent performs continity-compatible comparison of files and directories. +// https://github.com/containerd/continuity/blob/v0.1.0/fs/path.go#L91-L133 +// This will only do a slow content comparison of two files if they have all the +// same metadata and both have truncated nanosecond mtime timestamps. In practice, +// this can only happen if both the base file in the lowerdirs has a truncated +// timestamp (i.e. was unpacked from a tar) and the user did something like +// "mv foo tmp && mv tmp foo" that results in the file being copied up to the +// upperdir without making any changes to it. This is much rarer than similar +// cases in the double-walking differ, where the slow content comparison will +// be used whenever a file with a truncated timestamp is in the lowerdir at +// all and left unmodified. +func sameDirent(f1, f2 os.FileInfo, f1fullPath, f2fullPath string) (bool, error) { + if os.SameFile(f1, f2) { + return true, nil + } + + equalStat, err := compareSysStat(f1.Sys(), f2.Sys()) + if err != nil || !equalStat { + return equalStat, err + } + + if eq, err := compareCapabilities(f1fullPath, f2fullPath); err != nil || !eq { + return eq, err + } + + if !f1.IsDir() { + if f1.Size() != f2.Size() { + return false, nil + } + t1 := f1.ModTime() + t2 := f2.ModTime() + + if t1.Unix() != t2.Unix() { + return false, nil + } + + // If the timestamp may have been truncated in both of the + // files, check content of file to determine difference + if t1.Nanosecond() == 0 && t2.Nanosecond() == 0 { + if (f1.Mode() & os.ModeSymlink) == os.ModeSymlink { + return compareSymlinkTarget(f1fullPath, f2fullPath) + } + if f1.Size() == 0 { + return true, nil + } + return compareFileContent(f1fullPath, f2fullPath) + } else if t1.Nanosecond() != t2.Nanosecond() { + return false, nil + } + } + + return true, nil +} + +// Ported from continuity project +// https://github.com/containerd/continuity/blob/v0.1.0/fs/diff_unix.go#L43-L54 +// Copyright The containerd Authors. +func compareSysStat(s1, s2 interface{}) (bool, error) { + ls1, ok := s1.(*syscall.Stat_t) + if !ok { + return false, nil + } + ls2, ok := s2.(*syscall.Stat_t) + if !ok { + return false, nil + } + + return ls1.Mode == ls2.Mode && ls1.Uid == ls2.Uid && ls1.Gid == ls2.Gid && ls1.Rdev == ls2.Rdev, nil +} + +// Ported from continuity project +// https://github.com/containerd/continuity/blob/v0.1.0/fs/diff_unix.go#L56-L66 +// Copyright The containerd Authors. +func compareCapabilities(p1, p2 string) (bool, error) { + c1, err := sysx.LGetxattr(p1, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p1) + } + c2, err := sysx.LGetxattr(p2, "security.capability") + if err != nil && err != sysx.ENODATA { + return false, errors.Wrapf(err, "failed to get xattr for %s", p2) + } + return bytes.Equal(c1, c2), nil +} + +// Ported from continuity project +// https://github.com/containerd/continuity/blob/bce1c3f9669b6f3e7f6656ee715b0b4d75fa64a6/fs/path.go#L135 +// Copyright The containerd Authors. +func compareSymlinkTarget(p1, p2 string) (bool, error) { + t1, err := os.Readlink(p1) + if err != nil { + return false, err + } + t2, err := os.Readlink(p2) + if err != nil { + return false, err + } + return t1 == t2, nil +} + +var bufPool = sync.Pool{ + New: func() interface{} { + b := make([]byte, 32*1024) + return &b + }, +} + +// Ported from continuity project +// https://github.com/containerd/continuity/blob/bce1c3f9669b6f3e7f6656ee715b0b4d75fa64a6/fs/path.go#L151 +// Copyright The containerd Authors. +func compareFileContent(p1, p2 string) (bool, error) { + f1, err := os.Open(p1) + if err != nil { + return false, err + } + defer f1.Close() + if stat, err := f1.Stat(); err != nil { + return false, err + } else if !stat.Mode().IsRegular() { + return false, errors.Errorf("%s is not a regular file", p1) + } + + f2, err := os.Open(p2) + if err != nil { + return false, err + } + defer f2.Close() + if stat, err := f2.Stat(); err != nil { + return false, err + } else if !stat.Mode().IsRegular() { + return false, errors.Errorf("%s is not a regular file", p2) + } + + b1 := bufPool.Get().(*[]byte) + defer bufPool.Put(b1) + b2 := bufPool.Get().(*[]byte) + defer bufPool.Put(b2) + for { + n1, err1 := io.ReadFull(f1, *b1) + if err1 == io.ErrUnexpectedEOF { + // it's expected to get EOF when file size isn't a multiple of chunk size, consolidate these error types + err1 = io.EOF + } + if err1 != nil && err1 != io.EOF { + return false, err1 + } + n2, err2 := io.ReadFull(f2, *b2) + if err2 == io.ErrUnexpectedEOF { + err2 = io.EOF + } + if err2 != nil && err2 != io.EOF { + return false, err2 + } + if n1 != n2 || !bytes.Equal((*b1)[:n1], (*b2)[:n2]) { + return false, nil + } + if err1 == io.EOF && err2 == io.EOF { + return true, nil + } + } +} diff --git a/vendor/github.com/moby/buildkit/util/progress/controller/controller.go b/vendor/github.com/moby/buildkit/util/progress/controller/controller.go new file mode 100644 index 0000000000..3d9693d44c --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/progress/controller/controller.go @@ -0,0 +1,96 @@ +package controller + +import ( + "context" + "sync" + "time" + + "github.com/moby/buildkit/client" + "github.com/moby/buildkit/identity" + "github.com/moby/buildkit/solver/pb" + "github.com/moby/buildkit/util/progress" + digest "github.com/opencontainers/go-digest" +) + +type Controller struct { + count int64 + started *time.Time + writer progress.Writer + mu sync.Mutex + id string + + Digest digest.Digest + Name string + WriterFactory progress.WriterFactory + ProgressGroup *pb.ProgressGroup +} + +var _ progress.Controller = &Controller{} + +func (c *Controller) Start(ctx context.Context) (context.Context, func(error)) { + c.mu.Lock() + defer c.mu.Unlock() + c.count++ + if c.count == 1 { + if c.started == nil { + now := time.Now() + c.started = &now + c.writer, _, _ = c.WriterFactory(ctx) + c.id = identity.NewID() + } + + if c.Digest != "" { + c.writer.Write(c.id, client.Vertex{ + Digest: c.Digest, + Name: c.Name, + Started: c.started, + ProgressGroup: c.ProgressGroup, + }) + } + } + return progress.WithProgress(ctx, c.writer), func(err error) { + c.mu.Lock() + defer c.mu.Unlock() + c.count-- + if c.count == 0 { + now := time.Now() + var errString string + if err != nil { + errString = err.Error() + } + if c.Digest != "" { + c.writer.Write(c.id, client.Vertex{ + Digest: c.Digest, + Name: c.Name, + Started: c.started, + Completed: &now, + Error: errString, + ProgressGroup: c.ProgressGroup, + }) + } + c.writer.Close() + c.started = nil + c.id = "" + } + } +} + +func (c *Controller) Status(id string, action string) func() { + start := time.Now() + if c.writer != nil { + c.writer.Write(id, progress.Status{ + Action: action, + Started: &start, + }) + } + return func() { + complete := time.Now() + if c.writer != nil { + c.writer.Write(id, progress.Status{ + Action: action, + Started: &start, + Completed: &complete, + }) + } + } +} diff --git a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go index 15944d8c62..a5c831c873 100644 --- a/vendor/github.com/moby/buildkit/util/progress/logs/logs.go +++ b/vendor/github.com/moby/buildkit/util/progress/logs/logs.go @@ -10,6 +10,7 @@ import ( "sync" "time" + "github.com/armon/circbuf" "github.com/moby/buildkit/client" "github.com/moby/buildkit/identity" "github.com/moby/buildkit/util/progress" @@ -27,12 +28,17 @@ const ( var configCheckOnce sync.Once -func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser) { - return newStreamWriter(ctx, stdout, printOutput), newStreamWriter(ctx, stderr, printOutput) +func NewLogStreams(ctx context.Context, printOutput bool) (io.WriteCloser, io.WriteCloser, func()) { + stdout := newStreamWriter(ctx, stdout, printOutput) + stderr := newStreamWriter(ctx, stderr, printOutput) + return stdout, stderr, func() { + stdout.flushBuffer() + stderr.flushBuffer() + } } -func newStreamWriter(ctx context.Context, stream int, printOutput bool) io.WriteCloser { - pw, _, _ := progress.FromContext(ctx) +func newStreamWriter(ctx context.Context, stream int, printOutput bool) *streamWriter { + pw, _, _ := progress.NewFromContext(ctx) return &streamWriter{ pw: pw, stream: stream, @@ -49,6 +55,7 @@ type streamWriter struct { size int clipping bool clipReasonSpeed bool + buf *circbuf.Buffer } func (sw *streamWriter) checkLimit(n int) int { @@ -97,7 +104,19 @@ func (sw *streamWriter) clipLimitMessage() string { func (sw *streamWriter) Write(dt []byte) (int, error) { oldSize := len(dt) - dt = append([]byte{}, dt[:sw.checkLimit(len(dt))]...) + limit := sw.checkLimit(len(dt)) + if sw.buf == nil && limit < len(dt) { + var err error + sw.buf, err = circbuf.NewBuffer(256 * 1024) + if err != nil { + return 0, err + } + } + if sw.buf != nil { + sw.buf.Write(dt) + } + + dt = append([]byte{}, dt[:limit]...) if sw.clipping && oldSize == len(dt) { sw.clipping = false @@ -107,32 +126,49 @@ func (sw *streamWriter) Write(dt []byte) (int, error) { sw.clipping = true } - if len(dt) != 0 { - sw.pw.Write(identity.NewID(), client.VertexLog{ - Stream: sw.stream, - Data: dt, - }) - if sw.printOutput { - switch sw.stream { - case 1: - return os.Stdout.Write(dt) - case 2: - return os.Stderr.Write(dt) - default: - return 0, errors.Errorf("invalid stream %d", sw.stream) - } - } + _, err := sw.write(dt) + if err != nil { + return 0, err } return oldSize, nil } +func (sw *streamWriter) write(dt []byte) (int, error) { + if len(dt) == 0 { + return 0, nil + } + sw.pw.Write(identity.NewID(), client.VertexLog{ + Stream: sw.stream, + Data: dt, + }) + if sw.printOutput { + switch sw.stream { + case 1: + return os.Stdout.Write(dt) + case 2: + return os.Stderr.Write(dt) + default: + return 0, errors.Errorf("invalid stream %d", sw.stream) + } + } + return len(dt), nil +} + +func (sw *streamWriter) flushBuffer() { + if sw.buf == nil { + return + } + _, _ = sw.write(sw.buf.Bytes()) + sw.buf = nil +} + func (sw *streamWriter) Close() error { return sw.pw.Close() } func LoggerFromContext(ctx context.Context) func([]byte) { return func(dt []byte) { - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) defer pw.Close() pw.Write(identity.NewID(), client.VertexLog{ Stream: stderr, diff --git a/vendor/github.com/moby/buildkit/util/progress/multireader.go b/vendor/github.com/moby/buildkit/util/progress/multireader.go index 2bd3f2ca86..8d8bbf54c5 100644 --- a/vendor/github.com/moby/buildkit/util/progress/multireader.go +++ b/vendor/github.com/moby/buildkit/util/progress/multireader.go @@ -28,7 +28,7 @@ func (mr *MultiReader) Reader(ctx context.Context) Reader { defer mr.mu.Unlock() pr, ctx, closeWriter := NewContext(ctx) - pw, _, ctx := FromContext(ctx) + pw, _, ctx := NewFromContext(ctx) w := pw.(*progressWriter) mr.writers[w] = closeWriter diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go index 3ce212948c..83ca6672a8 100644 --- a/vendor/github.com/moby/buildkit/util/progress/progress.go +++ b/vendor/github.com/moby/buildkit/util/progress/progress.go @@ -18,22 +18,37 @@ type contextKeyT string var contextKey = contextKeyT("buildkit/util/progress") -// FromContext returns a progress writer from a context. -func FromContext(ctx context.Context, opts ...WriterOption) (Writer, bool, context.Context) { +// WriterFactory will generate a new progress Writer and return a new Context +// with the new Writer stored. It is the callers responsibility to Close the +// returned Writer to avoid resource leaks. +type WriterFactory func(ctx context.Context) (Writer, bool, context.Context) + +// FromContext returns a WriterFactory to generate new progress writers based +// on a Writer previously stored in the Context. +func FromContext(ctx context.Context, opts ...WriterOption) WriterFactory { v := ctx.Value(contextKey) - pw, ok := v.(*progressWriter) - if !ok { - if pw, ok := v.(*MultiWriter); ok { - return pw, true, ctx + return func(ctx context.Context) (Writer, bool, context.Context) { + pw, ok := v.(*progressWriter) + if !ok { + if pw, ok := v.(*MultiWriter); ok { + return pw, true, ctx + } + return &noOpWriter{}, false, ctx } - return &noOpWriter{}, false, ctx + pw = newWriter(pw) + for _, o := range opts { + o(pw) + } + ctx = context.WithValue(ctx, contextKey, pw) + return pw, true, ctx } - pw = newWriter(pw) - for _, o := range opts { - o(pw) - } - ctx = context.WithValue(ctx, contextKey, pw) - return pw, true, ctx +} + +// NewFromContext creates a new Writer based on a Writer previously stored +// in the Context and returns a new Context with the new Writer stored. It is +// the callers responsibility to Close the returned Writer to avoid resource leaks. +func NewFromContext(ctx context.Context, opts ...WriterOption) (Writer, bool, context.Context) { + return FromContext(ctx, opts...)(ctx) } type WriterOption func(Writer) diff --git a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go index f16a06998d..b743706ed9 100644 --- a/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go +++ b/vendor/github.com/moby/buildkit/util/pull/pullprogress/progress.go @@ -9,7 +9,7 @@ import ( "github.com/containerd/containerd/errdefs" "github.com/containerd/containerd/remotes" "github.com/moby/buildkit/util/progress" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) @@ -24,7 +24,7 @@ type ProviderWithProgress struct { Manager PullManager } -func (p *ProviderWithProgress) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) { +func (p *ProviderWithProgress) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (content.ReaderAt, error) { ra, err := p.Provider.ReaderAt(ctx, desc) if err != nil { return nil, err @@ -57,7 +57,7 @@ type FetcherWithProgress struct { Manager PullManager } -func (f *FetcherWithProgress) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) { +func (f *FetcherWithProgress) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) { rc, err := f.Fetcher.Fetch(ctx, desc) if err != nil { return nil, err @@ -85,8 +85,7 @@ func (r readerWithCancel) Close() error { return r.ReadCloser.Close() } -func trackProgress(ctx context.Context, desc ocispec.Descriptor, manager PullManager, doneCh chan<- struct{}) { - +func trackProgress(ctx context.Context, desc ocispecs.Descriptor, manager PullManager, doneCh chan<- struct{}) { defer close(doneCh) ticker := time.NewTicker(150 * time.Millisecond) @@ -96,7 +95,7 @@ func trackProgress(ctx context.Context, desc ocispec.Descriptor, manager PullMan ticker.Stop() }() - pw, _, _ := progress.FromContext(ctx) + pw, _, _ := progress.NewFromContext(ctx) defer pw.Close() ingestRef := remotes.MakeRefKey(ctx, desc) @@ -133,6 +132,5 @@ func trackProgress(ctx context.Context, desc ocispec.Descriptor, manager PullMan }) return } - } } diff --git a/vendor/github.com/moby/buildkit/util/push/push.go b/vendor/github.com/moby/buildkit/util/push/push.go new file mode 100644 index 0000000000..ffa3d35f32 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/push/push.go @@ -0,0 +1,347 @@ +package push + +import ( + "context" + "encoding/json" + "fmt" + "strings" + "sync" + "time" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/errdefs" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/log" + "github.com/containerd/containerd/remotes" + "github.com/containerd/containerd/remotes/docker" + "github.com/docker/distribution/reference" + "github.com/moby/buildkit/session" + "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/util/imageutil" + "github.com/moby/buildkit/util/progress" + "github.com/moby/buildkit/util/progress/logs" + "github.com/moby/buildkit/util/resolver" + resolverconfig "github.com/moby/buildkit/util/resolver/config" + "github.com/moby/buildkit/util/resolver/limited" + "github.com/moby/buildkit/util/resolver/retryhandler" + digest "github.com/opencontainers/go-digest" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/pkg/errors" + "github.com/sirupsen/logrus" +) + +type pusher struct { + remotes.Pusher +} + +// Pusher creates and new pusher instance for resolver +// containerd resolver.Pusher() method is broken and should not be called directly +// we need to wrap to mask interface detection +func Pusher(ctx context.Context, resolver remotes.Resolver, ref string) (remotes.Pusher, error) { + p, err := resolver.Pusher(ctx, ref) + if err != nil { + return nil, err + } + return &pusher{Pusher: p}, nil +} + +func Push(ctx context.Context, sm *session.Manager, sid string, provider content.Provider, manager content.Manager, dgst digest.Digest, ref string, insecure bool, hosts docker.RegistryHosts, byDigest bool, annotations map[digest.Digest]map[string]string) error { + desc := ocispecs.Descriptor{ + Digest: dgst, + } + parsed, err := reference.ParseNormalizedNamed(ref) + if err != nil { + return err + } + if byDigest && !reference.IsNameOnly(parsed) { + return errors.Errorf("can't push tagged ref %s by digest", parsed.String()) + } + + if byDigest { + ref = parsed.Name() + } else { + // add digest to ref, this is what containderd uses to choose root manifest from all manifests + r, err := reference.WithDigest(reference.TagNameOnly(parsed), dgst) + if err != nil { + return errors.Wrapf(err, "failed to combine ref %s with digest %s", ref, dgst) + } + ref = r.String() + } + + scope := "push" + if insecure { + insecureTrue := true + httpTrue := true + hosts = resolver.NewRegistryConfig(map[string]resolverconfig.RegistryConfig{ + reference.Domain(parsed): { + Insecure: &insecureTrue, + PlainHTTP: &httpTrue, + }, + }) + scope += ":insecure" + } + + resolver := resolver.DefaultPool.GetResolver(hosts, ref, scope, sm, session.NewGroup(sid)) + + pusher, err := Pusher(ctx, resolver, ref) + if err != nil { + return err + } + + var m sync.Mutex + manifestStack := []ocispecs.Descriptor{} + + filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + m.Lock() + manifestStack = append(manifestStack, desc) + m.Unlock() + return nil, images.ErrStopHandler + default: + return nil, nil + } + }) + + pushHandler := retryhandler.New(limited.PushHandler(pusher, provider, ref), logs.LoggerFromContext(ctx)) + pushUpdateSourceHandler, err := updateDistributionSourceHandler(manager, pushHandler, ref) + if err != nil { + return err + } + + handlers := append([]images.Handler{}, + images.HandlerFunc(annotateDistributionSourceHandler(manager, annotations, childrenHandler(provider))), + filterHandler, + dedupeHandler(pushUpdateSourceHandler), + ) + + ra, err := provider.ReaderAt(ctx, desc) + if err != nil { + return err + } + + mtype, err := imageutil.DetectManifestMediaType(ra) + if err != nil { + return err + } + + layersDone := oneOffProgress(ctx, "pushing layers") + err = images.Dispatch(ctx, skipNonDistributableBlobs(images.Handlers(handlers...)), nil, ocispecs.Descriptor{ + Digest: dgst, + Size: ra.Size(), + MediaType: mtype, + }) + if err := layersDone(err); err != nil { + return err + } + + mfstDone := oneOffProgress(ctx, fmt.Sprintf("pushing manifest for %s", ref)) + for i := len(manifestStack) - 1; i >= 0; i-- { + if _, err := pushHandler(ctx, manifestStack[i]); err != nil { + return mfstDone(err) + } + } + return mfstDone(nil) +} + +// TODO: the containerd function for this is filtering too much, that needs to be fixed. +// For now we just carry this. +func skipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + if images.IsNonDistributable(desc.MediaType) { + log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob") + return nil, images.ErrSkipDesc + } + return f(ctx, desc) + } +} + +func annotateDistributionSourceHandler(manager content.Manager, annotations map[digest.Digest]map[string]string, f images.HandlerFunc) func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + children, err := f(ctx, desc) + if err != nil { + return nil, err + } + + // only add distribution source for the config or blob data descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest, + images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + default: + return children, nil + } + + for i := range children { + child := children[i] + + if m, ok := annotations[child.Digest]; ok { + for k, v := range m { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + } + children[i] = child + + info, err := manager.Info(ctx, child.Digest) + if errors.Is(err, errdefs.ErrNotFound) { + continue + } else if err != nil { + return nil, err + } + + for k, v := range info.Labels { + if !strings.HasPrefix(k, "containerd.io/distribution.source.") { + continue + } + + if child.Annotations == nil { + child.Annotations = map[string]string{} + } + child.Annotations[k] = v + } + + children[i] = child + } + return children, nil + } +} + +func oneOffProgress(ctx context.Context, id string) func(err error) error { + pw, _, _ := progress.NewFromContext(ctx) + now := time.Now() + st := progress.Status{ + Started: &now, + } + pw.Write(id, st) + return func(err error) error { + // TODO: set error on status + now := time.Now() + st.Completed = &now + pw.Write(id, st) + pw.Close() + return err + } +} + +func childrenHandler(provider content.Provider) images.HandlerFunc { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + var descs []ocispecs.Descriptor + switch desc.MediaType { + case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + // TODO(stevvooe): We just assume oci manifest, for now. There may be + // subtle differences from the docker version. + var manifest ocispecs.Manifest + if err := json.Unmarshal(p, &manifest); err != nil { + return nil, err + } + + descs = append(descs, manifest.Config) + descs = append(descs, manifest.Layers...) + case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex: + p, err := content.ReadBlob(ctx, provider, desc) + if err != nil { + return nil, err + } + + var index ocispecs.Index + if err := json.Unmarshal(p, &index); err != nil { + return nil, err + } + + for _, m := range index.Manifests { + if m.Digest != "" { + descs = append(descs, m) + } + } + case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, + images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, + ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip: + // childless data types. + return nil, nil + default: + logrus.Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType) + } + + return descs, nil + } +} + +// updateDistributionSourceHandler will update distribution source label after +// pushing layer successfully. +// +// FIXME(fuweid): There is race condition for current design of distribution +// source label if there are pull/push jobs consuming same layer. +func updateDistributionSourceHandler(manager content.Manager, pushF images.HandlerFunc, ref string) (images.HandlerFunc, error) { + updateF, err := docker.AppendDistributionSourceLabel(manager, ref) + if err != nil { + return nil, err + } + + return images.HandlerFunc(func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + var islayer bool + + switch desc.MediaType { + case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip, + ocispecs.MediaTypeImageLayer, ocispecs.MediaTypeImageLayerGzip: + islayer = true + } + + children, err := pushF(ctx, desc) + if err != nil { + return nil, err + } + + // update distribution source to layer + if islayer { + if _, err := updateF(ctx, desc); err != nil { + logrus.Warnf("failed to update distribution source for layer %v: %v", desc.Digest, err) + } + } + return children, nil + }), nil +} + +func dedupeHandler(h images.HandlerFunc) images.HandlerFunc { + var g flightcontrol.Group + res := map[digest.Digest][]ocispecs.Descriptor{} + var mu sync.Mutex + + return images.HandlerFunc(func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + res, err := g.Do(ctx, desc.Digest.String(), func(ctx context.Context) (interface{}, error) { + mu.Lock() + if r, ok := res[desc.Digest]; ok { + mu.Unlock() + return r, nil + } + mu.Unlock() + + children, err := h(ctx, desc) + if err != nil { + return nil, err + } + + mu.Lock() + res[desc.Digest] = children + mu.Unlock() + return children, nil + }) + if err != nil { + return nil, err + } + if res == nil { + return nil, nil + } + return res.([]ocispecs.Descriptor), nil + }) +} diff --git a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go index 96755c362f..ed8034ccbc 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/authorizer.go +++ b/vendor/github.com/moby/buildkit/util/resolver/authorizer.go @@ -11,25 +11,29 @@ import ( "time" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" "github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker/auth" remoteserrors "github.com/containerd/containerd/remotes/errors" "github.com/moby/buildkit/session" sessionauth "github.com/moby/buildkit/session/auth" + log "github.com/moby/buildkit/util/bklog" "github.com/moby/buildkit/util/flightcontrol" + "github.com/moby/buildkit/version" "github.com/pkg/errors" "github.com/sirupsen/logrus" ) +const defaultExpiration = 60 + type authHandlerNS struct { counter int64 // needs to be 64bit aligned for 32bit systems - mu sync.Mutex - handlers map[string]*authHandler - hosts map[string][]docker.RegistryHost - sm *session.Manager - g flightcontrol.Group + handlers map[string]*authHandler + muHandlers sync.Mutex + hosts map[string][]docker.RegistryHost + muHosts sync.Mutex + sm *session.Manager + g flightcontrol.Group } func newAuthHandlerNS(sm *session.Manager) *authHandlerNS { @@ -118,8 +122,8 @@ func newDockerAuthorizer(client *http.Client, handlers *authHandlerNS, sm *sessi // Authorize handles auth request. func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error { - a.handlers.mu.Lock() - defer a.handlers.mu.Unlock() + a.handlers.muHandlers.Lock() + defer a.handlers.muHandlers.Unlock() // skip if there is no auth handler ah := a.handlers.get(ctx, req.URL.Host, a.sm, a.session) @@ -141,8 +145,8 @@ func (a *dockerAuthorizer) getCredentials(host string) (sessionID, username, sec } func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error { - a.handlers.mu.Lock() - defer a.handlers.mu.Unlock() + a.handlers.muHandlers.Lock() + defer a.handlers.muHandlers.Unlock() last := responses[len(responses)-1] host := last.Request.URL.Host @@ -349,11 +353,17 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se if err != nil { return nil, err } + if resp.ExpiresIn == 0 { + resp.ExpiresIn = defaultExpiration + } issuedAt, expires = time.Unix(resp.IssuedAt, 0), int(resp.ExpiresIn) token = resp.Token return nil, nil } + hdr := http.Header{} + hdr.Set("User-Agent", version.UserAgent()) + // fetch token for the resource scope if to.Secret != "" { defer func() { @@ -369,10 +379,13 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se // As of September 2017, GCR is known to return 404. // As of February 2018, JFrog Artifactory is known to return 401. if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 { - resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, nil, "buildkit-client", to) + resp, err := auth.FetchTokenWithOAuth(ctx, ah.client, hdr, "buildkit-client", to) if err != nil { return nil, err } + if resp.ExpiresIn == 0 { + resp.ExpiresIn = defaultExpiration + } issuedAt, expires = resp.IssuedAt, resp.ExpiresIn token = resp.AccessToken return nil, nil @@ -384,15 +397,21 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se } return nil, err } + if resp.ExpiresIn == 0 { + resp.ExpiresIn = defaultExpiration + } issuedAt, expires = resp.IssuedAt, resp.ExpiresIn token = resp.Token return nil, nil } // do request anonymously - resp, err := auth.FetchToken(ctx, ah.client, nil, to) + resp, err := auth.FetchToken(ctx, ah.client, hdr, to) if err != nil { return nil, errors.Wrap(err, "failed to fetch anonymous token") } + if resp.ExpiresIn == 0 { + resp.ExpiresIn = defaultExpiration + } issuedAt, expires = resp.IssuedAt, resp.ExpiresIn token = resp.Token @@ -400,11 +419,6 @@ func (ah *authHandler) fetchToken(ctx context.Context, sm *session.Manager, g se } func invalidAuthorization(c auth.Challenge, responses []*http.Response) error { - lastResponse := responses[len(responses)-1] - if lastResponse.StatusCode == http.StatusUnauthorized { - return errors.Wrapf(docker.ErrInvalidAuthorization, "authorization status: %v", lastResponse.StatusCode) - } - errStr := c.Parameters["error"] if errStr == "" { return nil @@ -433,25 +447,28 @@ type scopes map[string]map[string]struct{} func parseScopes(s []string) scopes { // https://docs.docker.com/registry/spec/auth/scope/ m := map[string]map[string]struct{}{} - for _, scope := range s { - parts := strings.SplitN(scope, ":", 3) - names := []string{parts[0]} - if len(parts) > 1 { - names = append(names, parts[1]) - } - var actions []string - if len(parts) == 3 { - actions = append(actions, strings.Split(parts[2], ",")...) - } - name := strings.Join(names, ":") - ma, ok := m[name] - if !ok { - ma = map[string]struct{}{} - m[name] = ma - } + for _, scopeStr := range s { + // The scopeStr may have strings that contain multiple scopes separated by a space. + for _, scope := range strings.Split(scopeStr, " ") { + parts := strings.SplitN(scope, ":", 3) + names := []string{parts[0]} + if len(parts) > 1 { + names = append(names, parts[1]) + } + var actions []string + if len(parts) == 3 { + actions = append(actions, strings.Split(parts[2], ",")...) + } + name := strings.Join(names, ":") + ma, ok := m[name] + if !ok { + ma = map[string]struct{}{} + m[name] = ma + } - for _, a := range actions { - ma[a] = struct{}{} + for _, a := range actions { + ma[a] = struct{}{} + } } } return m diff --git a/vendor/github.com/moby/buildkit/util/resolver/config/config.go b/vendor/github.com/moby/buildkit/util/resolver/config/config.go new file mode 100644 index 0000000000..125255037b --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/resolver/config/config.go @@ -0,0 +1,15 @@ +package config + +type RegistryConfig struct { + Mirrors []string `toml:"mirrors"` + PlainHTTP *bool `toml:"http"` + Insecure *bool `toml:"insecure"` + RootCAs []string `toml:"ca"` + KeyPairs []TLSKeyPair `toml:"keypair"` + TLSConfigDir []string `toml:"tlsconfigdir"` +} + +type TLSKeyPair struct { + Key string `toml:"key"` + Certificate string `toml:"cert"` +} diff --git a/vendor/github.com/moby/buildkit/util/resolver/limited/group.go b/vendor/github.com/moby/buildkit/util/resolver/limited/group.go new file mode 100644 index 0000000000..7fdd947a02 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/resolver/limited/group.go @@ -0,0 +1,175 @@ +package limited + +import ( + "context" + "io" + "runtime" + "strings" + "sync" + + "github.com/containerd/containerd/content" + "github.com/containerd/containerd/images" + "github.com/containerd/containerd/remotes" + "github.com/docker/distribution/reference" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/sirupsen/logrus" + "golang.org/x/sync/semaphore" +) + +type contextKeyT string + +var contextKey = contextKeyT("buildkit/util/resolver/limited") + +var Default = New(4) + +type Group struct { + mu sync.Mutex + size int + sem map[string][2]*semaphore.Weighted +} + +type req struct { + g *Group + ref string +} + +func (r *req) acquire(ctx context.Context, desc ocispecs.Descriptor) (context.Context, func(), error) { + if v := ctx.Value(contextKey); v != nil { + return ctx, func() {}, nil + } + + ctx = context.WithValue(ctx, contextKey, struct{}{}) + + // json request get one additional connection + highPriority := strings.HasSuffix(desc.MediaType, "+json") + + r.g.mu.Lock() + s, ok := r.g.sem[r.ref] + if !ok { + s = [2]*semaphore.Weighted{ + semaphore.NewWeighted(int64(r.g.size)), + semaphore.NewWeighted(int64(r.g.size + 1)), + } + r.g.sem[r.ref] = s + } + r.g.mu.Unlock() + if !highPriority { + if err := s[0].Acquire(ctx, 1); err != nil { + return ctx, nil, err + } + } + if err := s[1].Acquire(ctx, 1); err != nil { + if !highPriority { + s[0].Release(1) + } + return ctx, nil, err + } + return ctx, func() { + s[1].Release(1) + if !highPriority { + s[0].Release(1) + } + }, nil +} + +func New(size int) *Group { + return &Group{ + size: size, + sem: make(map[string][2]*semaphore.Weighted), + } +} + +func (g *Group) req(ref string) *req { + return &req{g: g, ref: domain(ref)} +} + +func (g *Group) WrapFetcher(f remotes.Fetcher, ref string) remotes.Fetcher { + return &fetcher{Fetcher: f, req: g.req(ref)} +} + +func (g *Group) PushHandler(pusher remotes.Pusher, provider content.Provider, ref string) images.HandlerFunc { + ph := remotes.PushHandler(pusher, provider) + req := g.req(ref) + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { + ctx, release, err := req.acquire(ctx, desc) + if err != nil { + return nil, err + } + defer release() + return ph(ctx, desc) + } +} + +type fetcher struct { + remotes.Fetcher + req *req +} + +func (f *fetcher) Fetch(ctx context.Context, desc ocispecs.Descriptor) (io.ReadCloser, error) { + ctx, release, err := f.req.acquire(ctx, desc) + if err != nil { + return nil, err + } + rc, err := f.Fetcher.Fetch(ctx, desc) + if err != nil { + release() + return nil, err + } + + rcw := &readCloser{ReadCloser: rc} + closer := func() { + if !rcw.closed { + logrus.Warnf("fetcher not closed cleanly: %s", desc.Digest) + } + release() + } + rcw.release = closer + runtime.SetFinalizer(rcw, func(rc *readCloser) { + rc.close() + }) + + if s, ok := rc.(io.Seeker); ok { + return &readCloserSeeker{rcw, s}, nil + } + + return rcw, nil +} + +type readCloserSeeker struct { + *readCloser + io.Seeker +} + +type readCloser struct { + io.ReadCloser + once sync.Once + closed bool + release func() +} + +func (r *readCloser) Close() error { + r.closed = true + r.close() + return r.ReadCloser.Close() +} + +func (r *readCloser) close() { + r.once.Do(r.release) +} + +func FetchHandler(ingester content.Ingester, fetcher remotes.Fetcher, ref string) images.HandlerFunc { + return remotes.FetchHandler(ingester, Default.WrapFetcher(fetcher, ref)) +} + +func PushHandler(pusher remotes.Pusher, provider content.Provider, ref string) images.HandlerFunc { + return Default.PushHandler(pusher, provider, ref) +} + +func domain(ref string) string { + if ref != "" { + if named, err := reference.ParseNormalizedNamed(ref); err == nil { + return reference.Domain(named) + } + } + return ref +} diff --git a/vendor/github.com/moby/buildkit/util/resolver/pool.go b/vendor/github.com/moby/buildkit/util/resolver/pool.go index 810e962049..292ca2e614 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/pool.go +++ b/vendor/github.com/moby/buildkit/util/resolver/pool.go @@ -3,6 +3,7 @@ package resolver import ( "context" "fmt" + "net/http" "strings" "sync" "sync/atomic" @@ -14,7 +15,8 @@ import ( distreference "github.com/docker/distribution/reference" "github.com/moby/buildkit/session" "github.com/moby/buildkit/source" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + "github.com/moby/buildkit/version" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) // DefaultPool is the default shared resolver pool instance @@ -40,7 +42,7 @@ func (p *Pool) gc() { defer p.mu.Unlock() for k, ns := range p.m { - ns.mu.Lock() + ns.muHandlers.Lock() for key, h := range ns.handlers { if time.Since(h.lastUsed) < 10*time.Minute { continue @@ -58,7 +60,7 @@ func (p *Pool) gc() { if len(ns.handlers) == 0 { delete(p.m, k) } - ns.mu.Unlock() + ns.muHandlers.Unlock() } time.AfterFunc(5*time.Minute, p.gc) @@ -104,8 +106,11 @@ func newResolver(hosts docker.RegistryHosts, handler *authHandlerNS, sm *session g: g, handler: handler, } + headers := http.Header{} + headers.Set("User-Agent", version.UserAgent()) r.Resolver = docker.NewResolver(docker.ResolverOptions{ - Hosts: r.HostsFunc, + Hosts: r.HostsFunc, + Headers: headers, }) return r } @@ -128,9 +133,9 @@ func (r *Resolver) HostsFunc(host string) ([]docker.RegistryHost, error) { return func(domain string) ([]docker.RegistryHost, error) { v, err := r.handler.g.Do(context.TODO(), domain, func(ctx context.Context) (interface{}, error) { // long lock not needed because flightcontrol.Do - r.handler.mu.Lock() + r.handler.muHosts.Lock() v, ok := r.handler.hosts[domain] - r.handler.mu.Unlock() + r.handler.muHosts.Unlock() if ok { return v, nil } @@ -138,18 +143,21 @@ func (r *Resolver) HostsFunc(host string) ([]docker.RegistryHost, error) { if err != nil { return nil, err } - r.handler.mu.Lock() + r.handler.muHosts.Lock() r.handler.hosts[domain] = res - r.handler.mu.Unlock() + r.handler.muHosts.Unlock() return res, nil }) if err != nil || v == nil { return nil, err } - res := v.([]docker.RegistryHost) - if len(res) == 0 { + vv := v.([]docker.RegistryHost) + if len(vv) == 0 { return nil, nil } + // make a copy so authorizer is set on unique instance + res := make([]docker.RegistryHost, len(vv)) + copy(res, vv) auth := newDockerAuthorizer(res[0].Client, r.handler, r.sm, r.g) for i := range res { res[i].Authorizer = auth @@ -163,6 +171,9 @@ func (r *Resolver) WithSession(s session.Group) *Resolver { r2 := *r r2.auth = nil r2.g = s + r2.Resolver = docker.NewResolver(docker.ResolverOptions{ + Hosts: r2.HostsFunc, // this refers to the newly-configured session so we need to recreate the resolver. + }) return &r2 } @@ -184,7 +195,7 @@ func (r *Resolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, er } // Resolve attempts to resolve the reference into a name and descriptor. -func (r *Resolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) { +func (r *Resolver) Resolve(ctx context.Context, ref string) (string, ocispecs.Descriptor, error) { if r.mode == source.ResolveModePreferLocal && r.is != nil { if img, err := r.is.Get(ctx, ref); err == nil { return ref, img.Target, nil @@ -194,7 +205,7 @@ func (r *Resolver) Resolve(ctx context.Context, ref string) (string, ocispec.Des n, desc, err := r.Resolver.Resolve(ctx, ref) if err == nil { atomic.AddInt64(&r.handler.counter, 1) - return n, desc, err + return n, desc, nil } if r.mode == source.ResolveModeDefault && r.is != nil { @@ -203,5 +214,5 @@ func (r *Resolver) Resolve(ctx context.Context, ref string) (string, ocispec.Des } } - return "", ocispec.Descriptor{}, err + return "", ocispecs.Descriptor{}, err } diff --git a/vendor/github.com/moby/buildkit/util/resolver/resolver.go b/vendor/github.com/moby/buildkit/util/resolver/resolver.go index 64b895f73a..a23f4b15cf 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/resolver.go +++ b/vendor/github.com/moby/buildkit/util/resolver/resolver.go @@ -13,11 +13,12 @@ import ( "time" "github.com/containerd/containerd/remotes/docker" + "github.com/moby/buildkit/util/resolver/config" "github.com/moby/buildkit/util/tracing" "github.com/pkg/errors" ) -func fillInsecureOpts(host string, c RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) { +func fillInsecureOpts(host string, c config.RegistryConfig, h docker.RegistryHost) ([]docker.RegistryHost, error) { var hosts []docker.RegistryHost tc, err := loadTLSConfig(c) @@ -64,7 +65,7 @@ func fillInsecureOpts(host string, c RegistryConfig, h docker.RegistryHost) ([]d return hosts, nil } -func loadTLSConfig(c RegistryConfig) (*tls.Config, error) { +func loadTLSConfig(c config.RegistryConfig) (*tls.Config, error) { for _, d := range c.TLSConfigDir { fs, err := ioutil.ReadDir(d) if err != nil && !errors.Is(err, os.ErrNotExist) && !errors.Is(err, os.ErrPermission) { @@ -75,7 +76,7 @@ func loadTLSConfig(c RegistryConfig) (*tls.Config, error) { c.RootCAs = append(c.RootCAs, filepath.Join(d, f.Name())) } if strings.HasSuffix(f.Name(), ".cert") { - c.KeyPairs = append(c.KeyPairs, TLSKeyPair{ + c.KeyPairs = append(c.KeyPairs, config.TLSKeyPair{ Certificate: filepath.Join(d, f.Name()), Key: filepath.Join(d, strings.TrimSuffix(f.Name(), ".cert")+".key"), }) @@ -114,22 +115,8 @@ func loadTLSConfig(c RegistryConfig) (*tls.Config, error) { return tc, nil } -type RegistryConfig struct { - Mirrors []string `toml:"mirrors"` - PlainHTTP *bool `toml:"http"` - Insecure *bool `toml:"insecure"` - RootCAs []string `toml:"ca"` - KeyPairs []TLSKeyPair `toml:"keypair"` - TLSConfigDir []string `toml:"tlsconfigdir"` -} - -type TLSKeyPair struct { - Key string `toml:"key"` - Certificate string `toml:"cert"` -} - // NewRegistryConfig converts registry config to docker.RegistryHosts callback -func NewRegistryConfig(m map[string]RegistryConfig) docker.RegistryHosts { +func NewRegistryConfig(m map[string]config.RegistryConfig) docker.RegistryHosts { return docker.Registries( func(host string) ([]docker.RegistryHost, error) { c, ok := m[host] @@ -203,8 +190,9 @@ func newDefaultTransport() *http.Transport { Timeout: 30 * time.Second, KeepAlive: 60 * time.Second, }).DialContext, - MaxIdleConns: 10, - IdleConnTimeout: 30 * time.Second, + MaxIdleConns: 30, + IdleConnTimeout: 120 * time.Second, + MaxIdleConnsPerHost: 4, TLSHandshakeTimeout: 10 * time.Second, ExpectContinueTimeout: 5 * time.Second, TLSNextProto: make(map[string]func(authority string, c *tls.Conn) http.RoundTripper), diff --git a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go index e30f83d7f9..554076b07b 100644 --- a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go +++ b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go @@ -5,18 +5,17 @@ import ( "fmt" "io" "net" - "strings" "syscall" "time" "github.com/containerd/containerd/images" remoteserrors "github.com/containerd/containerd/remotes/errors" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc { - return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { + return func(ctx context.Context, desc ocispecs.Descriptor) ([]ocispecs.Descriptor, error) { backoff := time.Second for { descs, err := f(ctx, desc) @@ -57,7 +56,7 @@ func retryError(err error) bool { return true } - if errors.Is(err, io.EOF) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.EPIPE) { + if errors.Is(err, io.EOF) || errors.Is(err, syscall.ECONNRESET) || errors.Is(err, syscall.EPIPE) || errors.Is(err, net.ErrClosed) { return true } // catches TLS timeout or other network-related temporary errors @@ -69,10 +68,5 @@ func retryError(err error) bool { return true } - // net.ErrClosed exposed in go1.16 - if strings.Contains(err.Error(), "use of closed network connection") { - return true - } - return false } diff --git a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_nonlinux.go b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_nonlinux.go index bb08ec1945..fbc607a975 100644 --- a/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_nonlinux.go +++ b/vendor/github.com/moby/buildkit/util/rootless/specconv/specconv_nonlinux.go @@ -1,3 +1,4 @@ +//go:build !linux // +build !linux package specconv diff --git a/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go b/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go index b6f2367860..3c7583ffdd 100644 --- a/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go +++ b/vendor/github.com/moby/buildkit/util/sshutil/keyscan.go @@ -23,7 +23,7 @@ func addDefaultPort(hostport string, defaultPort int) string { return hostport } -// SshKeyScan scans a ssh server for the hostkey; server should be in the form hostname, or hostname:port +// SSHKeyScan scans a ssh server for the hostkey; server should be in the form hostname, or hostname:port func SSHKeyScan(server string) (string, error) { var key string KeyScanCallback := func(hostport string, remote net.Addr, pubKey ssh.PublicKey) error { @@ -45,7 +45,7 @@ func SSHKeyScan(server string) (string, error) { err = nil } if conn != nil { - conn.Close() + _ = conn.Close() } return key, err } diff --git a/vendor/github.com/moby/buildkit/util/stack/generate.go b/vendor/github.com/moby/buildkit/util/stack/generate.go index 97516baa99..f3e967787c 100644 --- a/vendor/github.com/moby/buildkit/util/stack/generate.go +++ b/vendor/github.com/moby/buildkit/util/stack/generate.go @@ -1,3 +1,3 @@ package stack -//go:generate protoc -I=. -I=../../vendor/ --go_out=. stack.proto +//go:generate protoc -I=. -I=../../vendor/ --go_out=. --go_opt=paths=source_relative --go_opt=Mstack.proto=/util/stack stack.proto diff --git a/vendor/github.com/moby/buildkit/util/suggest/error.go b/vendor/github.com/moby/buildkit/util/suggest/error.go new file mode 100644 index 0000000000..0ad7b5c867 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/suggest/error.go @@ -0,0 +1,71 @@ +package suggest + +import ( + "strings" + + "github.com/agext/levenshtein" +) + +// WrapError wraps error with a suggestion for fixing it +func WrapError(err error, val string, options []string, caseSensitive bool) error { + if err == nil { + return nil + } + orig := val + if !caseSensitive { + val = strings.ToLower(val) + } + var match string + mindist := 3 // same as hcl + for _, opt := range options { + if !caseSensitive { + opt = strings.ToLower(opt) + } + if val == opt { + // exact match means error was unrelated to the value + return err + } + dist := levenshtein.Distance(val, opt, nil) + if dist < mindist { + if !caseSensitive { + match = matchCase(opt, orig) + } else { + match = opt + } + mindist = dist + } + } + + if match == "" { + return err + } + + return &suggestError{ + err: err, + match: match, + } +} + +type suggestError struct { + err error + match string +} + +func (e *suggestError) Error() string { + return e.err.Error() + " (did you mean " + e.match + "?)" +} + +// Unwrap returns the underlying error. +func (e *suggestError) Unwrap() error { + return e.err +} + +func matchCase(val, orig string) string { + if orig == strings.ToLower(orig) { + return strings.ToLower(val) + } + if orig == strings.ToUpper(orig) { + return strings.ToUpper(val) + } + return val +} diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go index f3762e69d3..ff01143eef 100644 --- a/vendor/github.com/moby/buildkit/util/system/path_unix.go +++ b/vendor/github.com/moby/buildkit/util/system/path_unix.go @@ -1,3 +1,4 @@ +//go:build !windows // +build !windows package system diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go index 3fc4744948..8514166827 100644 --- a/vendor/github.com/moby/buildkit/util/system/path_windows.go +++ b/vendor/github.com/moby/buildkit/util/system/path_windows.go @@ -1,3 +1,4 @@ +//go:build windows // +build windows package system diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go deleted file mode 100644 index 62afa03fef..0000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_linux.go +++ /dev/null @@ -1,29 +0,0 @@ -// +build linux,seccomp - -package system - -import ( - "sync" - - "golang.org/x/sys/unix" -) - -var seccompSupported bool -var seccompOnce sync.Once - -func SeccompSupported() bool { - seccompOnce.Do(func() { - seccompSupported = getSeccompSupported() - }) - return seccompSupported -} - -func getSeccompSupported() bool { - if err := unix.Prctl(unix.PR_GET_SECCOMP, 0, 0, 0, 0); err != unix.EINVAL { - // Make sure the kernel has CONFIG_SECCOMP_FILTER. - if err := unix.Prctl(unix.PR_SET_SECCOMP, unix.SECCOMP_MODE_FILTER, 0, 0, 0); err != unix.EINVAL { - return true - } - } - return false -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go b/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go deleted file mode 100644 index e348c379a9..0000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_nolinux.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !linux,seccomp - -package system - -func SeccompSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go b/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go deleted file mode 100644 index 84cfb7fa83..0000000000 --- a/vendor/github.com/moby/buildkit/util/system/seccomp_noseccomp.go +++ /dev/null @@ -1,7 +0,0 @@ -// +build !seccomp - -package system - -func SeccompSupported() bool { - return false -} diff --git a/vendor/github.com/moby/buildkit/util/tracing/exec/traceexec.go b/vendor/github.com/moby/buildkit/util/tracing/exec/traceexec.go new file mode 100644 index 0000000000..b0da7314c1 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/exec/traceexec.go @@ -0,0 +1,77 @@ +package detect + +import ( + "context" + + "go.opentelemetry.io/otel/propagation" +) + +const ( + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" +) + +// Environ returns list of environment variables that need to be sent to the child process +// in order for it to pick up cross-process tracing from same state. +func Environ(ctx context.Context) []string { + var tm textMap + tc := propagation.TraceContext{} + tc.Inject(ctx, &tm) + + var env []string + + // deprecated: removed in v0.11.0 + // previously defined in https://github.com/open-telemetry/opentelemetry-swift/blob/4ea467ed4b881d7329bf2254ca7ed7f2d9d6e1eb/Sources/OpenTelemetrySdk/Trace/Propagation/EnvironmentContextPropagator.swift#L14-L15 + if tm.parent != "" { + env = append(env, "OTEL_TRACE_PARENT="+tm.parent) + } + if tm.state != "" { + env = append(env, "OTEL_TRACE_STATE="+tm.state) + } + + // open-telemetry/opentelemetry-specification#740 + if tm.parent != "" { + env = append(env, "TRACEPARENT="+tm.parent) + } + if tm.state != "" { + env = append(env, "TRACESTATE="+tm.state) + } + + return env +} + +type textMap struct { + parent string + state string +} + +func (tm *textMap) Get(key string) string { + switch key { + case traceparentHeader: + return tm.parent + case tracestateHeader: + return tm.state + default: + return "" + } +} + +func (tm *textMap) Set(key string, value string) { + switch key { + case traceparentHeader: + tm.parent = value + case tracestateHeader: + tm.state = value + } +} + +func (tm *textMap) Keys() []string { + var k []string + if tm.parent != "" { + k = append(k, traceparentHeader) + } + if tm.state != "" { + k = append(k, tracestateHeader) + } + return k +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/multispan.go b/vendor/github.com/moby/buildkit/util/tracing/multispan.go index 2b157a1de3..0bca141fa1 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/multispan.go +++ b/vendor/github.com/moby/buildkit/util/tracing/multispan.go @@ -1,21 +1,21 @@ package tracing import ( - opentracing "github.com/opentracing/opentracing-go" + "go.opentelemetry.io/otel/trace" ) // MultiSpan allows shared tracing to multiple spans. // TODO: This is a temporary solution and doesn't really support shared tracing yet. Instead the first always wins. type MultiSpan struct { - opentracing.Span + trace.Span } func NewMultiSpan() *MultiSpan { return &MultiSpan{} } -func (ms *MultiSpan) Add(s opentracing.Span) { +func (ms *MultiSpan) Add(s trace.Span) { if ms.Span == nil { ms.Span = s } diff --git a/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go new file mode 100644 index 0000000000..638b08ce90 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/client.go @@ -0,0 +1,101 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracegrpc + +import ( + "context" + "errors" + "fmt" + "sync" + "time" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + + "google.golang.org/grpc" + + coltracepb "go.opentelemetry.io/proto/otlp/collector/trace/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +type client struct { + connection *Connection + + lock sync.Mutex + tracesClient coltracepb.TraceServiceClient +} + +var _ otlptrace.Client = (*client)(nil) + +var ( + errNoClient = errors.New("no client") +) + +// NewClient creates a new gRPC trace client. +func NewClient(cc *grpc.ClientConn) otlptrace.Client { + c := &client{} + c.connection = NewConnection(cc, c.handleNewConnection) + + return c +} + +func (c *client) handleNewConnection(cc *grpc.ClientConn) { + c.lock.Lock() + defer c.lock.Unlock() + if cc != nil { + c.tracesClient = coltracepb.NewTraceServiceClient(cc) + } else { + c.tracesClient = nil + } +} + +// Start establishes a connection to the collector. +func (c *client) Start(ctx context.Context) error { + return c.connection.StartConnection(ctx) +} + +// Stop shuts down the connection to the collector. +func (c *client) Stop(ctx context.Context) error { + return c.connection.Shutdown(ctx) +} + +// UploadTraces sends a batch of spans to the collector. +func (c *client) UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error { + if !c.connection.Connected() { + return fmt.Errorf("traces exporter is disconnected from the server: %w", c.connection.LastConnectError()) + } + + ctx, cancel := c.connection.ContextWithStop(ctx) + defer cancel() + ctx, tCancel := context.WithTimeout(ctx, 30*time.Second) + defer tCancel() + + ctx = c.connection.ContextWithMetadata(ctx) + err := func() error { + c.lock.Lock() + defer c.lock.Unlock() + if c.tracesClient == nil { + return errNoClient + } + + _, err := c.tracesClient.Export(ctx, &coltracepb.ExportTraceServiceRequest{ + ResourceSpans: protoSpans, + }) + return err + }() + if err != nil { + c.connection.SetStateDisconnected(err) + } + return err +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/connection.go b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/connection.go new file mode 100644 index 0000000000..a244882197 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/otlptracegrpc/connection.go @@ -0,0 +1,219 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptracegrpc + +import ( + "context" + "math/rand" + "sync" + "sync/atomic" + "time" + "unsafe" + + "google.golang.org/grpc" + "google.golang.org/grpc/metadata" +) + +type Connection struct { + // Ensure pointer is 64-bit aligned for atomic operations on both 32 and 64 bit machines. + lastConnectErrPtr unsafe.Pointer + + // mu protects the Connection as it is accessed by the + // exporter goroutines and background Connection goroutine + mu sync.Mutex + cc *grpc.ClientConn + + // these fields are read-only after constructor is finished + metadata metadata.MD + newConnectionHandler func(cc *grpc.ClientConn) + + // these channels are created once + disconnectedCh chan bool + backgroundConnectionDoneCh chan struct{} + stopCh chan struct{} + + // this is for tests, so they can replace the closing + // routine without a worry of modifying some global variable + // or changing it back to original after the test is done + closeBackgroundConnectionDoneCh func(ch chan struct{}) +} + +func NewConnection(cc *grpc.ClientConn, handler func(cc *grpc.ClientConn)) *Connection { + c := new(Connection) + c.newConnectionHandler = handler + c.cc = cc + c.closeBackgroundConnectionDoneCh = func(ch chan struct{}) { + close(ch) + } + return c +} + +func (c *Connection) StartConnection(ctx context.Context) error { + c.stopCh = make(chan struct{}) + c.disconnectedCh = make(chan bool, 1) + c.backgroundConnectionDoneCh = make(chan struct{}) + + if err := c.connect(ctx); err == nil { + c.setStateConnected() + } else { + c.SetStateDisconnected(err) + } + go c.indefiniteBackgroundConnection() + + // TODO: proper error handling when initializing connections. + // We can report permanent errors, e.g., invalid settings. + return nil +} + +func (c *Connection) LastConnectError() error { + errPtr := (*error)(atomic.LoadPointer(&c.lastConnectErrPtr)) + if errPtr == nil { + return nil + } + return *errPtr +} + +func (c *Connection) saveLastConnectError(err error) { + var errPtr *error + if err != nil { + errPtr = &err + } + atomic.StorePointer(&c.lastConnectErrPtr, unsafe.Pointer(errPtr)) +} + +func (c *Connection) SetStateDisconnected(err error) { + c.saveLastConnectError(err) + select { + case c.disconnectedCh <- true: + default: + } + c.newConnectionHandler(nil) +} + +func (c *Connection) setStateConnected() { + c.saveLastConnectError(nil) +} + +func (c *Connection) Connected() bool { + return c.LastConnectError() == nil +} + +const defaultConnReattemptPeriod = 10 * time.Second + +func (c *Connection) indefiniteBackgroundConnection() { + defer func() { + c.closeBackgroundConnectionDoneCh(c.backgroundConnectionDoneCh) + }() + + connReattemptPeriod := defaultConnReattemptPeriod + + // No strong seeding required, nano time can + // already help with pseudo uniqueness. + rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) + + // maxJitterNanos: 70% of the connectionReattemptPeriod + maxJitterNanos := int64(0.7 * float64(connReattemptPeriod)) + + for { + // Otherwise these will be the normal scenarios to enable + // reconnection if we trip out. + // 1. If we've stopped, return entirely + // 2. Otherwise block until we are disconnected, and + // then retry connecting + select { + case <-c.stopCh: + return + + case <-c.disconnectedCh: + // Quickly check if we haven't stopped at the + // same time. + select { + case <-c.stopCh: + return + + default: + } + + // Normal scenario that we'll wait for + } + + if err := c.connect(context.Background()); err == nil { + c.setStateConnected() + } else { + // this code is unreachable in most cases + // c.connect does not establish Connection + c.SetStateDisconnected(err) + } + + // Apply some jitter to avoid lockstep retrials of other + // collector-exporters. Lockstep retrials could result in an + // innocent DDOS, by clogging the machine's resources and network. + jitter := time.Duration(rng.Int63n(maxJitterNanos)) + select { + case <-c.stopCh: + return + case <-time.After(connReattemptPeriod + jitter): + } + } +} + +func (c *Connection) connect(ctx context.Context) error { + c.newConnectionHandler(c.cc) + return nil +} + +func (c *Connection) ContextWithMetadata(ctx context.Context) context.Context { + if c.metadata.Len() > 0 { + return metadata.NewOutgoingContext(ctx, c.metadata) + } + return ctx +} + +func (c *Connection) Shutdown(ctx context.Context) error { + close(c.stopCh) + // Ensure that the backgroundConnector returns + select { + case <-c.backgroundConnectionDoneCh: + case <-ctx.Done(): + return ctx.Err() + } + + c.mu.Lock() + cc := c.cc + c.cc = nil + c.mu.Unlock() + + if cc != nil { + return cc.Close() + } + + return nil +} + +func (c *Connection) ContextWithStop(ctx context.Context) (context.Context, context.CancelFunc) { + // Unify the parent context Done signal with the Connection's + // stop channel. + ctx, cancel := context.WithCancel(ctx) + go func(ctx context.Context, cancel context.CancelFunc) { + select { + case <-ctx.Done(): + // Nothing to do, either cancelled or deadline + // happened. + case <-c.stopCh: + cancel() + } + }(ctx, cancel) + return ctx, cancel +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/vendor/github.com/moby/buildkit/util/tracing/tracing.go index 8f2d4dd962..fd7f0ba7d5 100644 --- a/vendor/github.com/moby/buildkit/util/tracing/tracing.go +++ b/vendor/github.com/moby/buildkit/util/tracing/tracing.go @@ -3,49 +3,44 @@ package tracing import ( "context" "fmt" - "io" "net/http" + "net/http/httptrace" - "github.com/opentracing-contrib/go-stdlib/nethttp" - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" + "github.com/moby/buildkit/util/bklog" + "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" + "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" ) // StartSpan starts a new span as a child of the span in context. // If there is no span in context then this is a no-op. -// The difference from opentracing.StartSpanFromContext is that this method -// does not depend on global tracer. -func StartSpan(ctx context.Context, operationName string, opts ...opentracing.StartSpanOption) (opentracing.Span, context.Context) { - parent := opentracing.SpanFromContext(ctx) - tracer := opentracing.Tracer(&opentracing.NoopTracer{}) - if parent != nil { - tracer = parent.Tracer() - opts = append(opts, opentracing.ChildOf(parent.Context())) - } - span := tracer.StartSpan(operationName, opts...) - if parent != nil { - return span, opentracing.ContextWithSpan(ctx, span) +func StartSpan(ctx context.Context, operationName string, opts ...trace.SpanStartOption) (trace.Span, context.Context) { + parent := trace.SpanFromContext(ctx) + tracer := trace.NewNoopTracerProvider().Tracer("") + if parent != nil && parent.SpanContext().IsValid() { + tracer = parent.TracerProvider().Tracer("") } + ctx, span := tracer.Start(ctx, operationName, opts...) + ctx = bklog.WithLogger(ctx, bklog.GetLogger(ctx).WithField("span", operationName)) return span, ctx } // FinishWithError finalizes the span and sets the error if one is passed -func FinishWithError(span opentracing.Span, err error) { +func FinishWithError(span trace.Span, err error) { if err != nil { - fields := []log.Field{ - log.String("event", "error"), - log.String("message", err.Error()), - } + span.RecordError(err) if _, ok := err.(interface { Cause() error }); ok { - fields = append(fields, log.String("stack", fmt.Sprintf("%+v", err))) + span.SetAttributes(attribute.String(string(semconv.ExceptionStacktraceKey), fmt.Sprintf("%+v", err))) } - span.LogFields(fields...) - ext.Error.Set(span, true) + span.SetStatus(codes.Error, err.Error()) } - span.Finish() + span.End() } // ContextWithSpanFromContext sets the tracing span of a context from other @@ -53,63 +48,26 @@ func FinishWithError(span opentracing.Span, err error) { // context.WithoutCancel() that would copy the context but reset ctx.Done func ContextWithSpanFromContext(ctx, ctx2 context.Context) context.Context { // if already is a span then noop - if span := opentracing.SpanFromContext(ctx); span != nil { + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { return ctx } - if span := opentracing.SpanFromContext(ctx2); span != nil { - return opentracing.ContextWithSpan(ctx, span) + if span := trace.SpanFromContext(ctx2); span != nil && span.SpanContext().IsValid() { + return trace.ContextWithSpan(ctx, span) } return ctx } -var DefaultTransport http.RoundTripper = &Transport{ - RoundTripper: &nethttp.Transport{RoundTripper: http.DefaultTransport}, -} +var DefaultTransport = NewTransport(http.DefaultTransport) var DefaultClient = &http.Client{ Transport: DefaultTransport, } -type Transport struct { - http.RoundTripper -} - func NewTransport(rt http.RoundTripper) http.RoundTripper { - return &Transport{ - RoundTripper: &nethttp.Transport{RoundTripper: rt}, - } -} - -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - span := opentracing.SpanFromContext(req.Context()) - if span == nil { // no tracer connected with either request or transport - return t.RoundTripper.RoundTrip(req) - } - - req, tracer := nethttp.TraceRequest(span.Tracer(), req) - - resp, err := t.RoundTripper.RoundTrip(req) - if err != nil { - tracer.Finish() - return resp, err - } - - if req.Method == "HEAD" { - tracer.Finish() - } else { - resp.Body = closeTracker{resp.Body, tracer.Finish} - } - - return resp, err -} - -type closeTracker struct { - io.ReadCloser - finish func() -} - -func (c closeTracker) Close() error { - err := c.ReadCloser.Close() - c.finish() - return err + return otelhttp.NewTransport(rt, + otelhttp.WithPropagators(propagation.NewCompositeTextMapPropagator(propagation.TraceContext{}, propagation.Baggage{})), + otelhttp.WithClientTrace(func(ctx context.Context) *httptrace.ClientTrace { + return otelhttptrace.NewClientTrace(ctx, otelhttptrace.WithoutSubSpans()) + }), + ) } diff --git a/vendor/github.com/moby/buildkit/util/tracing/transform/attribute.go b/vendor/github.com/moby/buildkit/util/tracing/transform/attribute.go new file mode 100644 index 0000000000..2debe88359 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/transform/attribute.go @@ -0,0 +1,91 @@ +package transform + +import ( + "go.opentelemetry.io/otel/attribute" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// Attributes transforms a slice of OTLP attribute key-values into a slice of KeyValues +func Attributes(attrs []*commonpb.KeyValue) []attribute.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]attribute.KeyValue, 0, len(attrs)) + for _, a := range attrs { + kv := attribute.KeyValue{ + Key: attribute.Key(a.Key), + Value: toValue(a.Value), + } + out = append(out, kv) + } + return out +} + +func toValue(v *commonpb.AnyValue) attribute.Value { + switch vv := v.Value.(type) { + case *commonpb.AnyValue_BoolValue: + return attribute.BoolValue(vv.BoolValue) + case *commonpb.AnyValue_IntValue: + return attribute.Int64Value(vv.IntValue) + case *commonpb.AnyValue_DoubleValue: + return attribute.Float64Value(vv.DoubleValue) + case *commonpb.AnyValue_StringValue: + return attribute.StringValue(vv.StringValue) + case *commonpb.AnyValue_ArrayValue: + return arrayValues(vv.ArrayValue.Values) + default: + return attribute.StringValue("INVALID") + } +} + +func boolArray(kv []*commonpb.AnyValue) attribute.Value { + arr := make([]bool, len(kv)) + for i, v := range kv { + arr[i] = v.GetBoolValue() + } + return attribute.BoolSliceValue(arr) +} + +func intArray(kv []*commonpb.AnyValue) attribute.Value { + arr := make([]int64, len(kv)) + for i, v := range kv { + arr[i] = v.GetIntValue() + } + return attribute.Int64SliceValue(arr) +} + +func doubleArray(kv []*commonpb.AnyValue) attribute.Value { + arr := make([]float64, len(kv)) + for i, v := range kv { + arr[i] = v.GetDoubleValue() + } + return attribute.Float64SliceValue(arr) +} + +func stringArray(kv []*commonpb.AnyValue) attribute.Value { + arr := make([]string, len(kv)) + for i, v := range kv { + arr[i] = v.GetStringValue() + } + return attribute.StringSliceValue(arr) +} + +func arrayValues(kv []*commonpb.AnyValue) attribute.Value { + if len(kv) == 0 { + return attribute.StringSliceValue([]string{}) + } + + switch kv[0].Value.(type) { + case *commonpb.AnyValue_BoolValue: + return boolArray(kv) + case *commonpb.AnyValue_IntValue: + return intArray(kv) + case *commonpb.AnyValue_DoubleValue: + return doubleArray(kv) + case *commonpb.AnyValue_StringValue: + return stringArray(kv) + default: + return attribute.StringSliceValue([]string{}) + } +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go b/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go new file mode 100644 index 0000000000..216a364c63 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/transform/instrumentation.go @@ -0,0 +1,17 @@ +package transform + +import ( + commonpb "go.opentelemetry.io/proto/otlp/common/v1" + + "go.opentelemetry.io/otel/sdk/instrumentation" +) + +func instrumentationLibrary(il *commonpb.InstrumentationLibrary) instrumentation.Library { + if il == nil { + return instrumentation.Library{} + } + return instrumentation.Library{ + Name: il.Name, + Version: il.Version, + } +} diff --git a/vendor/github.com/moby/buildkit/util/tracing/transform/span.go b/vendor/github.com/moby/buildkit/util/tracing/transform/span.go new file mode 100644 index 0000000000..f07d0c98e9 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/tracing/transform/span.go @@ -0,0 +1,254 @@ +package transform + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + v11 "go.opentelemetry.io/proto/otlp/common/v1" + v1 "go.opentelemetry.io/proto/otlp/resource/v1" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +const ( + maxMessageEventsPerSpan = 128 +) + +// Spans transforms slice of OTLP ResourceSpan into a slice of SpanSnapshots. +func Spans(sdl []*tracepb.ResourceSpans) []tracesdk.ReadOnlySpan { + if len(sdl) == 0 { + return nil + } + + var out []tracesdk.ReadOnlySpan + + for _, sd := range sdl { + if sd == nil { + continue + } + + for _, sdi := range sd.InstrumentationLibrarySpans { + sda := make([]tracesdk.ReadOnlySpan, len(sdi.Spans)) + for i, s := range sdi.Spans { + sda[i] = &readOnlySpan{ + pb: s, + il: sdi.InstrumentationLibrary, + resource: sd.Resource, + schemaURL: sd.SchemaUrl, + } + } + out = append(out, sda...) + } + } + + return out +} + +type readOnlySpan struct { + // Embed the interface to implement the private method. + tracesdk.ReadOnlySpan + + pb *tracepb.Span + il *v11.InstrumentationLibrary + resource *v1.Resource + schemaURL string +} + +func (s *readOnlySpan) Name() string { + return s.pb.Name +} + +func (s *readOnlySpan) SpanContext() trace.SpanContext { + var tid trace.TraceID + copy(tid[:], s.pb.TraceId) + var sid trace.SpanID + copy(sid[:], s.pb.SpanId) + + st, _ := trace.ParseTraceState(s.pb.TraceState) + + return trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: st, + }) +} + +func (s *readOnlySpan) Parent() trace.SpanContext { + if len(s.pb.ParentSpanId) == 0 { + return trace.SpanContext{} + } + var tid trace.TraceID + copy(tid[:], s.pb.TraceId) + var psid trace.SpanID + copy(psid[:], s.pb.ParentSpanId) + return trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + SpanID: psid, + }) +} + +func (s *readOnlySpan) SpanKind() trace.SpanKind { + return spanKind(s.pb.Kind) +} + +func (s *readOnlySpan) StartTime() time.Time { + return time.Unix(0, int64(s.pb.StartTimeUnixNano)) +} + +func (s *readOnlySpan) EndTime() time.Time { + return time.Unix(0, int64(s.pb.EndTimeUnixNano)) +} + +func (s *readOnlySpan) Attributes() []attribute.KeyValue { + return Attributes(s.pb.Attributes) +} + +func (s *readOnlySpan) Links() []tracesdk.Link { + return links(s.pb.Links) +} + +func (s *readOnlySpan) Events() []tracesdk.Event { + return spanEvents(s.pb.Events) +} + +func (s *readOnlySpan) Status() tracesdk.Status { + return tracesdk.Status{ + Code: statusCode(s.pb.Status), + Description: s.pb.Status.GetMessage(), + } +} + +func (s *readOnlySpan) InstrumentationLibrary() instrumentation.Library { + return instrumentationLibrary(s.il) +} + +// Resource returns information about the entity that produced the span. +func (s *readOnlySpan) Resource() *resource.Resource { + if s.resource == nil { + return nil + } + if s.schemaURL != "" { + return resource.NewWithAttributes(s.schemaURL, Attributes(s.resource.Attributes)...) + } + return resource.NewSchemaless(Attributes(s.resource.Attributes)...) +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *readOnlySpan) DroppedAttributes() int { + return int(s.pb.DroppedAttributesCount) +} + +// DroppedLinks returns the number of links dropped by the span due to +// limits being reached. +func (s *readOnlySpan) DroppedLinks() int { + return int(s.pb.DroppedLinksCount) +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *readOnlySpan) DroppedEvents() int { + return int(s.pb.DroppedEventsCount) +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *readOnlySpan) ChildSpanCount() int { + return 0 +} + +var _ tracesdk.ReadOnlySpan = &readOnlySpan{} + +// status transform a OTLP span status into span code. +func statusCode(st *tracepb.Status) codes.Code { + switch st.Code { + case tracepb.Status_STATUS_CODE_ERROR: + return codes.Error + default: + return codes.Ok + } +} + +// links transforms OTLP span links to span Links. +func links(links []*tracepb.Span_Link) []tracesdk.Link { + if len(links) == 0 { + return nil + } + + sl := make([]tracesdk.Link, 0, len(links)) + for _, otLink := range links { + // This redefinition is necessary to prevent otLink.*ID[:] copies + // being reused -- in short we need a new otLink per iteration. + otLink := otLink + + var tid trace.TraceID + copy(tid[:], otLink.TraceId) + var sid trace.SpanID + copy(sid[:], otLink.SpanId) + + sctx := trace.NewSpanContext(trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + }) + + sl = append(sl, tracesdk.Link{ + SpanContext: sctx, + Attributes: Attributes(otLink.Attributes), + }) + } + return sl +} + +// spanEvents transforms OTLP span events to span Events. +func spanEvents(es []*tracepb.Span_Event) []tracesdk.Event { + if len(es) == 0 { + return nil + } + + evCount := len(es) + if evCount > maxMessageEventsPerSpan { + evCount = maxMessageEventsPerSpan + } + events := make([]tracesdk.Event, 0, evCount) + messageEvents := 0 + + // Transform message events + for _, e := range es { + if messageEvents >= maxMessageEventsPerSpan { + break + } + messageEvents++ + events = append(events, + tracesdk.Event{ + Name: e.Name, + Time: time.Unix(0, int64(e.TimeUnixNano)), + Attributes: Attributes(e.Attributes), + DroppedAttributeCount: int(e.DroppedAttributesCount), + }, + ) + } + + return events +} + +// spanKind transforms a an OTLP span kind to SpanKind. +func spanKind(kind tracepb.Span_SpanKind) trace.SpanKind { + switch kind { + case tracepb.Span_SPAN_KIND_INTERNAL: + return trace.SpanKindInternal + case tracepb.Span_SPAN_KIND_CLIENT: + return trace.SpanKindClient + case tracepb.Span_SPAN_KIND_SERVER: + return trace.SpanKindServer + case tracepb.Span_SPAN_KIND_PRODUCER: + return trace.SpanKindProducer + case tracepb.Span_SPAN_KIND_CONSUMER: + return trace.SpanKindConsumer + default: + return trace.SpanKindUnspecified + } +} diff --git a/vendor/github.com/moby/buildkit/util/urlutil/redact.go b/vendor/github.com/moby/buildkit/util/urlutil/redact.go new file mode 100644 index 0000000000..385e6aef86 --- /dev/null +++ b/vendor/github.com/moby/buildkit/util/urlutil/redact.go @@ -0,0 +1,33 @@ +package urlutil + +import ( + "net/url" +) + +const mask = "xxxxx" + +// RedactCredentials takes a URL and redacts username and password from it. +// e.g. "https://user:password@host.tld/path.git" will be changed to +// "https://xxxxx:xxxxx@host.tld/path.git". +func RedactCredentials(s string) string { + ru, err := url.Parse(s) + if err != nil { + return s // string is not a URL, just return it + } + var ( + hasUsername bool + hasPassword bool + ) + if ru.User != nil { + hasUsername = len(ru.User.Username()) > 0 + _, hasPassword = ru.User.Password() + } + if hasUsername && hasPassword { + ru.User = url.UserPassword(mask, mask) + } else if hasUsername { + ru.User = url.User(mask) + } else if hasPassword { + ru.User = url.UserPassword(ru.User.Username(), mask) + } + return ru.String() +} diff --git a/vendor/github.com/moby/buildkit/util/winlayers/applier.go b/vendor/github.com/moby/buildkit/util/winlayers/applier.go index 91e415232a..c9c76b27df 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/applier.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/applier.go @@ -17,7 +17,7 @@ import ( "github.com/containerd/containerd/images" "github.com/containerd/containerd/mount" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" ) @@ -37,17 +37,17 @@ type winApplier struct { a diff.Applier } -func (s *winApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispec.Descriptor, err error) { +func (s *winApplier) Apply(ctx context.Context, desc ocispecs.Descriptor, mounts []mount.Mount, opts ...diff.ApplyOpt) (d ocispecs.Descriptor, err error) { if !hasWindowsLayerMode(ctx) { return s.a.Apply(ctx, desc, mounts, opts...) } compressed, err := images.DiffCompression(ctx, desc.MediaType) if err != nil { - return ocispec.Descriptor{}, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) + return ocispecs.Descriptor{}, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", desc.MediaType) } - var ocidesc ocispec.Descriptor + var ocidesc ocispecs.Descriptor if err := mount.WithTempMount(ctx, mounts, func(root string) error { ra, err := s.cs.ReaderAt(ctx, desc) if err != nil { @@ -92,15 +92,14 @@ func (s *winApplier) Apply(ctx context.Context, desc ocispec.Descriptor, mounts return err } - ocidesc = ocispec.Descriptor{ - MediaType: ocispec.MediaTypeImageLayer, + ocidesc = ocispecs.Descriptor{ + MediaType: ocispecs.MediaTypeImageLayer, Size: rc.c, Digest: digester.Digest(), } return nil - }); err != nil { - return ocispec.Descriptor{}, err + return ocispecs.Descriptor{}, err } return ocidesc, nil } diff --git a/vendor/github.com/moby/buildkit/util/winlayers/differ.go b/vendor/github.com/moby/buildkit/util/winlayers/differ.go index cdbc335d49..fc8ba7f7e7 100644 --- a/vendor/github.com/moby/buildkit/util/winlayers/differ.go +++ b/vendor/github.com/moby/buildkit/util/winlayers/differ.go @@ -14,12 +14,13 @@ import ( "github.com/containerd/containerd/content" "github.com/containerd/containerd/diff" "github.com/containerd/containerd/errdefs" - "github.com/containerd/containerd/log" "github.com/containerd/containerd/mount" digest "github.com/opencontainers/go-digest" - ocispec "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" "github.com/pkg/errors" "github.com/sirupsen/logrus" + + log "github.com/moby/buildkit/util/bklog" ) const ( @@ -35,7 +36,7 @@ func NewWalkingDiffWithWindows(store content.Store, d diff.Comparer) diff.Compar } } -var emptyDesc = ocispec.Descriptor{} +var emptyDesc = ocispecs.Descriptor{} type winDiffer struct { store content.Store @@ -44,7 +45,7 @@ type winDiffer struct { // Compare creates a diff between the given mounts and uploads the result // to the content store. -func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispec.Descriptor, err error) { +func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opts ...diff.Opt) (d ocispecs.Descriptor, err error) { if !hasWindowsLayerMode(ctx) { return s.d.Compare(ctx, lower, upper, opts...) } @@ -57,19 +58,19 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt } if config.MediaType == "" { - config.MediaType = ocispec.MediaTypeImageLayerGzip + config.MediaType = ocispecs.MediaTypeImageLayerGzip } var isCompressed bool switch config.MediaType { - case ocispec.MediaTypeImageLayer: - case ocispec.MediaTypeImageLayerGzip: + case ocispecs.MediaTypeImageLayer: + case ocispecs.MediaTypeImageLayerGzip: isCompressed = true default: return emptyDesc, errors.Wrapf(errdefs.ErrNotImplemented, "unsupported diff media type: %v", config.MediaType) } - var ocidesc ocispec.Descriptor + var ocidesc ocispecs.Descriptor if err := mount.WithTempMount(ctx, lower, func(lowerRoot string) error { return mount.WithTempMount(ctx, upper, func(upperRoot string) error { var newReference bool @@ -80,7 +81,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt cw, err := s.store.Writer(ctx, content.WithRef(config.Reference), - content.WithDescriptor(ocispec.Descriptor{ + content.WithDescriptor(ocispecs.Descriptor{ MediaType: config.MediaType, // most contentstore implementations just ignore this })) if err != nil { @@ -108,8 +109,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt if err != nil { return errors.Wrap(err, "failed to get compressed stream") } - var w io.Writer = io.MultiWriter(compressed, dgstr.Hash()) - w, discard, done := makeWindowsLayer(w) + w, discard, done := makeWindowsLayer(io.MultiWriter(compressed, dgstr.Hash())) err = archive.WriteDiff(ctx, w, lowerRoot, upperRoot) if err != nil { discard(err) @@ -148,7 +148,7 @@ func (s *winDiffer) Compare(ctx context.Context, lower, upper []mount.Mount, opt return errors.Wrap(err, "failed to get info from content store") } - ocidesc = ocispec.Descriptor{ + ocidesc = ocispecs.Descriptor{ MediaType: config.MediaType, Size: info.Size, Digest: info.Digest, @@ -212,7 +212,6 @@ func makeWindowsLayer(w io.Writer) (io.Writer, func(error), chan error) { tarWriter := tar.NewWriter(w) err := func() error { - h := &tar.Header{ Name: "Hives", Typeflag: tar.TypeDir, diff --git a/vendor/github.com/moby/buildkit/version/version.go b/vendor/github.com/moby/buildkit/version/version.go new file mode 100644 index 0000000000..137c4a13fb --- /dev/null +++ b/vendor/github.com/moby/buildkit/version/version.go @@ -0,0 +1,62 @@ +/* + Copyright The BuildKit Authors. + Copyright The containerd Authors. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. +*/ + +package version + +import ( + "regexp" + "sync" +) + +const ( + defaultVersion = "0.0.0+unknown" +) + +var ( + // Package is filled at linking time + Package = "github.com/moby/buildkit" + + // Version holds the complete version number. Filled in at linking time. + Version = defaultVersion + + // Revision is filled with the VCS (e.g. git) revision being used to build + // the program at linking time. + Revision = "" +) + +var ( + reRelease *regexp.Regexp + reDev *regexp.Regexp + reOnce sync.Once +) + +func UserAgent() string { + version := defaultVersion + + reOnce.Do(func() { + reRelease = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+$`) + reDev = regexp.MustCompile(`^(v[0-9]+\.[0-9]+)\.[0-9]+`) + }) + + if matches := reRelease.FindAllStringSubmatch(version, 1); len(matches) > 0 { + version = matches[0][1] + } else if matches := reDev.FindAllStringSubmatch(version, 1); len(matches) > 0 { + version = matches[0][1] + "-dev" + } + + return "buildkit/" + version +} diff --git a/vendor/github.com/moby/buildkit/worker/cacheresult.go b/vendor/github.com/moby/buildkit/worker/cacheresult.go index 0e684e7d57..a635a53502 100644 --- a/vendor/github.com/moby/buildkit/worker/cacheresult.go +++ b/vendor/github.com/moby/buildkit/worker/cacheresult.go @@ -5,7 +5,7 @@ import ( "strings" "time" - "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" "github.com/moby/buildkit/util/compression" @@ -28,11 +28,10 @@ func (s *cacheResultStorage) Save(res solver.Result, createdAt time.Time) (solve return solver.CacheResult{}, errors.Errorf("invalid result: %T", res.Sys()) } if ref.ImmutableRef != nil { - if !cache.HasCachePolicyRetain(ref.ImmutableRef) { - if err := cache.CachePolicyRetain(ref.ImmutableRef); err != nil { + if !ref.ImmutableRef.HasCachePolicyRetain() { + if err := ref.ImmutableRef.SetCachePolicyRetain(); err != nil { return solver.CacheResult{}, err } - ref.ImmutableRef.Metadata().Commit() } } return solver.CacheResult{ID: ref.ID(), CreatedAt: createdAt}, nil @@ -68,7 +67,7 @@ func (s *cacheResultStorage) load(ctx context.Context, id string, hidden bool) ( return NewWorkerRefResult(ref, w), nil } -func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheResult, g session.Group) (*solver.Remote, error) { +func (s *cacheResultStorage) LoadRemotes(ctx context.Context, res solver.CacheResult, compressionopt *compression.Config, g session.Group) ([]*solver.Remote, error) { w, refID, err := s.getWorkerRef(res.ID) if err != nil { return nil, err @@ -77,13 +76,24 @@ func (s *cacheResultStorage) LoadRemote(ctx context.Context, res solver.CacheRes if err != nil { return nil, err } - defer ref.Release(context.TODO()) + if ref != nil { + defer ref.Release(context.TODO()) + } wref := WorkerRef{ref, w} - remote, err := wref.GetRemote(ctx, false, compression.Default, g) + all := true // load as many compression blobs as possible + if compressionopt == nil { + comp := compression.New(compression.Default) + compressionopt = &comp + all = false + } + refCfg := cacheconfig.RefConfig{ + Compression: *compressionopt, + } + remotes, err := wref.GetRemotes(ctx, false, refCfg, all, g) if err != nil { return nil, nil // ignore error. loadRemote is best effort } - return remote, nil + return remotes, nil } func (s *cacheResultStorage) Exists(id string) bool { ref, err := s.load(context.TODO(), id, true) diff --git a/vendor/github.com/moby/buildkit/worker/result.go b/vendor/github.com/moby/buildkit/worker/result.go index 9f4cdc5bfa..5691c630f6 100644 --- a/vendor/github.com/moby/buildkit/worker/result.go +++ b/vendor/github.com/moby/buildkit/worker/result.go @@ -4,9 +4,9 @@ import ( "context" "github.com/moby/buildkit/cache" + cacheconfig "github.com/moby/buildkit/cache/config" "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" - "github.com/moby/buildkit/util/compression" ) func NewWorkerRefResult(ref cache.ImmutableRef, worker Worker) solver.Result { @@ -26,16 +26,19 @@ func (wr *WorkerRef) ID() string { return wr.Worker.ID() + "::" + refID } -// GetRemote method abstracts ImmutableRef's GetRemote to allow a Worker to override. +// GetRemotes method abstracts ImmutableRef's GetRemotes to allow a Worker to override. // This is needed for moby integration. -// Use this method instead of calling ImmutableRef.GetRemote() directly. -func (wr *WorkerRef) GetRemote(ctx context.Context, createIfNeeded bool, compressionType compression.Type, g session.Group) (*solver.Remote, error) { +// Use this method instead of calling ImmutableRef.GetRemotes() directly. +func (wr *WorkerRef) GetRemotes(ctx context.Context, createIfNeeded bool, refCfg cacheconfig.RefConfig, all bool, g session.Group) ([]*solver.Remote, error) { if w, ok := wr.Worker.(interface { - GetRemote(context.Context, cache.ImmutableRef, bool, compression.Type, session.Group) (*solver.Remote, error) + GetRemotes(context.Context, cache.ImmutableRef, bool, cacheconfig.RefConfig, bool, session.Group) ([]*solver.Remote, error) }); ok { - return w.GetRemote(ctx, wr.ImmutableRef, createIfNeeded, compressionType, g) + return w.GetRemotes(ctx, wr.ImmutableRef, createIfNeeded, refCfg, all, g) } - return wr.ImmutableRef.GetRemote(ctx, createIfNeeded, compressionType, g) + if wr.ImmutableRef == nil { + return nil, nil + } + return wr.ImmutableRef.GetRemotes(ctx, createIfNeeded, refCfg, all, g) } type workerRefResult struct { diff --git a/vendor/github.com/moby/buildkit/worker/worker.go b/vendor/github.com/moby/buildkit/worker/worker.go index db42c245a2..743513bb0a 100644 --- a/vendor/github.com/moby/buildkit/worker/worker.go +++ b/vendor/github.com/moby/buildkit/worker/worker.go @@ -5,7 +5,6 @@ import ( "github.com/containerd/containerd/content" "github.com/moby/buildkit/cache" - "github.com/moby/buildkit/cache/metadata" "github.com/moby/buildkit/client" "github.com/moby/buildkit/client/llb" "github.com/moby/buildkit/executor" @@ -14,14 +13,14 @@ import ( "github.com/moby/buildkit/session" "github.com/moby/buildkit/solver" digest "github.com/opencontainers/go-digest" - specs "github.com/opencontainers/image-spec/specs-go/v1" + ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ) type Worker interface { // ID needs to be unique in the cluster ID() string Labels() map[string]string - Platforms(noCache bool) []specs.Platform + Platforms(noCache bool) []ocispecs.Platform GCPolicy() []client.PruneInfo LoadRef(ctx context.Context, id string, hidden bool) (cache.ImmutableRef, error) @@ -36,7 +35,6 @@ type Worker interface { ContentStore() content.Store Executor() executor.Executor CacheManager() cache.Manager - MetadataStore() *metadata.Store } type Infos interface { @@ -50,6 +48,9 @@ const ( LabelExecutor = labelPrefix + "executor" // "oci" or "containerd" LabelSnapshotter = labelPrefix + "snapshotter" // containerd snapshotter name ("overlay", "native", ...) LabelHostname = labelPrefix + "hostname" + LabelNetwork = labelPrefix + "network" // "cni" or "host" + LabelApparmorProfile = labelPrefix + "apparmor.profile" + LabelOCIProcessMode = labelPrefix + "oci.process-mode" // OCI worker: process mode ("sandbox", "no-sandbox") LabelContainerdUUID = labelPrefix + "containerd.uuid" // containerd worker: containerd UUID LabelContainerdNamespace = labelPrefix + "containerd.namespace" // containerd worker: containerd namespace ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go index 35d8108958..581cf7cdfa 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/annotations.go @@ -53,4 +53,10 @@ const ( // AnnotationDescription is the annotation key for the human-readable description of the software packaged in the image. AnnotationDescription = "org.opencontainers.image.description" + + // AnnotationBaseImageDigest is the annotation key for the digest of the image's base image. + AnnotationBaseImageDigest = "org.opencontainers.image.base.digest" + + // AnnotationBaseImageName is the annotation key for the image reference of the image's base image. + AnnotationBaseImageName = "org.opencontainers.image.base.name" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go index fe799bd698..ffff4b6d18 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/config.go @@ -89,9 +89,20 @@ type Image struct { // Architecture is the CPU architecture which the binaries in this image are built to run on. Architecture string `json:"architecture"` + // Variant is the variant of the specified CPU architecture which image binaries are intended to run on. + Variant string `json:"variant,omitempty"` + // OS is the name of the operating system which the image is built to run on. OS string `json:"os"` + // OSVersion is an optional field specifying the operating system + // version, for example on Windows `10.0.14393.1066`. + OSVersion string `json:"os.version,omitempty"` + + // OSFeatures is an optional field specifying an array of strings, + // each listing a required OS feature (for example on Windows `win32k`). + OSFeatures []string `json:"os.features,omitempty"` + // Config defines the execution parameters which should be used as a base when running a container using the image. Config ImageConfig `json:"config,omitempty"` diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go index bad7bb97f4..4f35ac134f 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/v1/mediatype.go @@ -34,6 +34,10 @@ const ( // referenced by the manifest. MediaTypeImageLayerGzip = "application/vnd.oci.image.layer.v1.tar+gzip" + // MediaTypeImageLayerZstd is the media type used for zstd compressed + // layers referenced by the manifest. + MediaTypeImageLayerZstd = "application/vnd.oci.image.layer.v1.tar+zstd" + // MediaTypeImageLayerNonDistributable is the media type for layers referenced by // the manifest but with distribution restrictions. MediaTypeImageLayerNonDistributable = "application/vnd.oci.image.layer.nondistributable.v1.tar" @@ -43,6 +47,11 @@ const ( // restrictions. MediaTypeImageLayerNonDistributableGzip = "application/vnd.oci.image.layer.nondistributable.v1.tar+gzip" + // MediaTypeImageLayerNonDistributableZstd is the media type for zstd + // compressed layers referenced by the manifest but with distribution + // restrictions. + MediaTypeImageLayerNonDistributableZstd = "application/vnd.oci.image.layer.nondistributable.v1.tar+zstd" + // MediaTypeImageConfig specifies the media type for the image configuration. MediaTypeImageConfig = "application/vnd.oci.image.config.v1+json" ) diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go index 0d9543f160..31f99cf645 100644 --- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go +++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go @@ -25,7 +25,7 @@ const ( VersionPatch = 2 // VersionDev indicates development branch. Releases will be empty string. - VersionDev = "" + VersionDev = "-dev" ) // Version is the specification version that the package types support. diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go deleted file mode 100644 index bfb305ffa4..0000000000 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/client.go +++ /dev/null @@ -1,354 +0,0 @@ -// +build go1.7 - -package nethttp - -import ( - "context" - "io" - "net/http" - "net/http/httptrace" - "net/url" - - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" - "github.com/opentracing/opentracing-go/log" -) - -type contextKey int - -const ( - keyTracer contextKey = iota -) - -const defaultComponentName = "net/http" - -// Transport wraps a RoundTripper. If a request is being traced with -// Tracer, Transport will inject the current span into the headers, -// and set HTTP related tags on the span. -type Transport struct { - // The actual RoundTripper to use for the request. A nil - // RoundTripper defaults to http.DefaultTransport. - http.RoundTripper -} - -type clientOptions struct { - operationName string - componentName string - urlTagFunc func(u *url.URL) string - disableClientTrace bool - disableInjectSpanContext bool - spanObserver func(span opentracing.Span, r *http.Request) -} - -// ClientOption contols the behavior of TraceRequest. -type ClientOption func(*clientOptions) - -// OperationName returns a ClientOption that sets the operation -// name for the client-side span. -func OperationName(operationName string) ClientOption { - return func(options *clientOptions) { - options.operationName = operationName - } -} - -// URLTagFunc returns a ClientOption that uses given function f -// to set the span's http.url tag. Can be used to change the default -// http.url tag, eg to redact sensitive information. -func URLTagFunc(f func(u *url.URL) string) ClientOption { - return func(options *clientOptions) { - options.urlTagFunc = f - } -} - -// ComponentName returns a ClientOption that sets the component -// name for the client-side span. -func ComponentName(componentName string) ClientOption { - return func(options *clientOptions) { - options.componentName = componentName - } -} - -// ClientTrace returns a ClientOption that turns on or off -// extra instrumentation via httptrace.WithClientTrace. -func ClientTrace(enabled bool) ClientOption { - return func(options *clientOptions) { - options.disableClientTrace = !enabled - } -} - -// InjectSpanContext returns a ClientOption that turns on or off -// injection of the Span context in the request HTTP headers. -// If this option is not used, the default behaviour is to -// inject the span context. -func InjectSpanContext(enabled bool) ClientOption { - return func(options *clientOptions) { - options.disableInjectSpanContext = !enabled - } -} - -// ClientSpanObserver returns a ClientOption that observes the span -// for the client-side span. -func ClientSpanObserver(f func(span opentracing.Span, r *http.Request)) ClientOption { - return func(options *clientOptions) { - options.spanObserver = f - } -} - -// TraceRequest adds a ClientTracer to req, tracing the request and -// all requests caused due to redirects. When tracing requests this -// way you must also use Transport. -// -// Example: -// -// func AskGoogle(ctx context.Context) error { -// client := &http.Client{Transport: &nethttp.Transport{}} -// req, err := http.NewRequest("GET", "http://google.com", nil) -// if err != nil { -// return err -// } -// req = req.WithContext(ctx) // extend existing trace, if any -// -// req, ht := nethttp.TraceRequest(tracer, req) -// defer ht.Finish() -// -// res, err := client.Do(req) -// if err != nil { -// return err -// } -// res.Body.Close() -// return nil -// } -func TraceRequest(tr opentracing.Tracer, req *http.Request, options ...ClientOption) (*http.Request, *Tracer) { - opts := &clientOptions{ - urlTagFunc: func(u *url.URL) string { - return u.String() - }, - spanObserver: func(_ opentracing.Span, _ *http.Request) {}, - } - for _, opt := range options { - opt(opts) - } - ht := &Tracer{tr: tr, opts: opts} - ctx := req.Context() - if !opts.disableClientTrace { - ctx = httptrace.WithClientTrace(ctx, ht.clientTrace()) - } - req = req.WithContext(context.WithValue(ctx, keyTracer, ht)) - return req, ht -} - -type closeTracker struct { - io.ReadCloser - sp opentracing.Span -} - -func (c closeTracker) Close() error { - err := c.ReadCloser.Close() - c.sp.LogFields(log.String("event", "ClosedBody")) - c.sp.Finish() - return err -} - -// TracerFromRequest retrieves the Tracer from the request. If the request does -// not have a Tracer it will return nil. -func TracerFromRequest(req *http.Request) *Tracer { - tr, ok := req.Context().Value(keyTracer).(*Tracer) - if !ok { - return nil - } - return tr -} - -// RoundTrip implements the RoundTripper interface. -func (t *Transport) RoundTrip(req *http.Request) (*http.Response, error) { - rt := t.RoundTripper - if rt == nil { - rt = http.DefaultTransport - } - tracer := TracerFromRequest(req) - if tracer == nil { - return rt.RoundTrip(req) - } - - tracer.start(req) - - ext.HTTPMethod.Set(tracer.sp, req.Method) - ext.HTTPUrl.Set(tracer.sp, tracer.opts.urlTagFunc(req.URL)) - tracer.opts.spanObserver(tracer.sp, req) - - if !tracer.opts.disableInjectSpanContext { - carrier := opentracing.HTTPHeadersCarrier(req.Header) - tracer.sp.Tracer().Inject(tracer.sp.Context(), opentracing.HTTPHeaders, carrier) - } - - resp, err := rt.RoundTrip(req) - - if err != nil { - tracer.sp.Finish() - return resp, err - } - ext.HTTPStatusCode.Set(tracer.sp, uint16(resp.StatusCode)) - if resp.StatusCode >= http.StatusInternalServerError { - ext.Error.Set(tracer.sp, true) - } - if req.Method == "HEAD" { - tracer.sp.Finish() - } else { - resp.Body = closeTracker{resp.Body, tracer.sp} - } - return resp, nil -} - -// Tracer holds tracing details for one HTTP request. -type Tracer struct { - tr opentracing.Tracer - root opentracing.Span - sp opentracing.Span - opts *clientOptions -} - -func (h *Tracer) start(req *http.Request) opentracing.Span { - if h.root == nil { - parent := opentracing.SpanFromContext(req.Context()) - var spanctx opentracing.SpanContext - if parent != nil { - spanctx = parent.Context() - } - operationName := h.opts.operationName - if operationName == "" { - operationName = "HTTP Client" - } - root := h.tr.StartSpan(operationName, opentracing.ChildOf(spanctx)) - h.root = root - } - - ctx := h.root.Context() - h.sp = h.tr.StartSpan("HTTP "+req.Method, opentracing.ChildOf(ctx)) - ext.SpanKindRPCClient.Set(h.sp) - - componentName := h.opts.componentName - if componentName == "" { - componentName = defaultComponentName - } - ext.Component.Set(h.sp, componentName) - - return h.sp -} - -// Finish finishes the span of the traced request. -func (h *Tracer) Finish() { - if h.root != nil { - h.root.Finish() - } -} - -// Span returns the root span of the traced request. This function -// should only be called after the request has been executed. -func (h *Tracer) Span() opentracing.Span { - return h.root -} - -func (h *Tracer) clientTrace() *httptrace.ClientTrace { - return &httptrace.ClientTrace{ - GetConn: h.getConn, - GotConn: h.gotConn, - PutIdleConn: h.putIdleConn, - GotFirstResponseByte: h.gotFirstResponseByte, - Got100Continue: h.got100Continue, - DNSStart: h.dnsStart, - DNSDone: h.dnsDone, - ConnectStart: h.connectStart, - ConnectDone: h.connectDone, - WroteHeaders: h.wroteHeaders, - Wait100Continue: h.wait100Continue, - WroteRequest: h.wroteRequest, - } -} - -func (h *Tracer) getConn(hostPort string) { - ext.HTTPUrl.Set(h.sp, hostPort) - h.sp.LogFields(log.String("event", "GetConn")) -} - -func (h *Tracer) gotConn(info httptrace.GotConnInfo) { - h.sp.SetTag("net/http.reused", info.Reused) - h.sp.SetTag("net/http.was_idle", info.WasIdle) - h.sp.LogFields(log.String("event", "GotConn")) -} - -func (h *Tracer) putIdleConn(error) { - h.sp.LogFields(log.String("event", "PutIdleConn")) -} - -func (h *Tracer) gotFirstResponseByte() { - h.sp.LogFields(log.String("event", "GotFirstResponseByte")) -} - -func (h *Tracer) got100Continue() { - h.sp.LogFields(log.String("event", "Got100Continue")) -} - -func (h *Tracer) dnsStart(info httptrace.DNSStartInfo) { - h.sp.LogFields( - log.String("event", "DNSStart"), - log.String("host", info.Host), - ) -} - -func (h *Tracer) dnsDone(info httptrace.DNSDoneInfo) { - fields := []log.Field{log.String("event", "DNSDone")} - for _, addr := range info.Addrs { - fields = append(fields, log.String("addr", addr.String())) - } - if info.Err != nil { - fields = append(fields, log.Error(info.Err)) - } - h.sp.LogFields(fields...) -} - -func (h *Tracer) connectStart(network, addr string) { - h.sp.LogFields( - log.String("event", "ConnectStart"), - log.String("network", network), - log.String("addr", addr), - ) -} - -func (h *Tracer) connectDone(network, addr string, err error) { - if err != nil { - h.sp.LogFields( - log.String("message", "ConnectDone"), - log.String("network", network), - log.String("addr", addr), - log.String("event", "error"), - log.Error(err), - ) - } else { - h.sp.LogFields( - log.String("event", "ConnectDone"), - log.String("network", network), - log.String("addr", addr), - ) - } -} - -func (h *Tracer) wroteHeaders() { - h.sp.LogFields(log.String("event", "WroteHeaders")) -} - -func (h *Tracer) wait100Continue() { - h.sp.LogFields(log.String("event", "Wait100Continue")) -} - -func (h *Tracer) wroteRequest(info httptrace.WroteRequestInfo) { - if info.Err != nil { - h.sp.LogFields( - log.String("message", "WroteRequest"), - log.String("event", "error"), - log.Error(info.Err), - ) - ext.Error.Set(h.sp, true) - } else { - h.sp.LogFields(log.String("event", "WroteRequest")) - } -} diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go deleted file mode 100644 index c853ca6437..0000000000 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/doc.go +++ /dev/null @@ -1,3 +0,0 @@ -// Package nethttp provides OpenTracing instrumentation for the -// net/http package. -package nethttp diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go deleted file mode 100644 index db2df66204..0000000000 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/server.go +++ /dev/null @@ -1,157 +0,0 @@ -// +build go1.7 - -package nethttp - -import ( - "net/http" - "net/url" - - opentracing "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/ext" -) - -type mwOptions struct { - opNameFunc func(r *http.Request) string - spanFilter func(r *http.Request) bool - spanObserver func(span opentracing.Span, r *http.Request) - urlTagFunc func(u *url.URL) string - componentName string -} - -// MWOption controls the behavior of the Middleware. -type MWOption func(*mwOptions) - -// OperationNameFunc returns a MWOption that uses given function f -// to generate operation name for each server-side span. -func OperationNameFunc(f func(r *http.Request) string) MWOption { - return func(options *mwOptions) { - options.opNameFunc = f - } -} - -// MWComponentName returns a MWOption that sets the component name -// for the server-side span. -func MWComponentName(componentName string) MWOption { - return func(options *mwOptions) { - options.componentName = componentName - } -} - -// MWSpanFilter returns a MWOption that filters requests from creating a span -// for the server-side span. -// Span won't be created if it returns false. -func MWSpanFilter(f func(r *http.Request) bool) MWOption { - return func(options *mwOptions) { - options.spanFilter = f - } -} - -// MWSpanObserver returns a MWOption that observe the span -// for the server-side span. -func MWSpanObserver(f func(span opentracing.Span, r *http.Request)) MWOption { - return func(options *mwOptions) { - options.spanObserver = f - } -} - -// MWURLTagFunc returns a MWOption that uses given function f -// to set the span's http.url tag. Can be used to change the default -// http.url tag, eg to redact sensitive information. -func MWURLTagFunc(f func(u *url.URL) string) MWOption { - return func(options *mwOptions) { - options.urlTagFunc = f - } -} - -// Middleware wraps an http.Handler and traces incoming requests. -// Additionally, it adds the span to the request's context. -// -// By default, the operation name of the spans is set to "HTTP {method}". -// This can be overriden with options. -// -// Example: -// http.ListenAndServe("localhost:80", nethttp.Middleware(tracer, http.DefaultServeMux)) -// -// The options allow fine tuning the behavior of the middleware. -// -// Example: -// mw := nethttp.Middleware( -// tracer, -// http.DefaultServeMux, -// nethttp.OperationNameFunc(func(r *http.Request) string { -// return "HTTP " + r.Method + ":/api/customers" -// }), -// nethttp.MWSpanObserver(func(sp opentracing.Span, r *http.Request) { -// sp.SetTag("http.uri", r.URL.EscapedPath()) -// }), -// ) -func Middleware(tr opentracing.Tracer, h http.Handler, options ...MWOption) http.Handler { - return MiddlewareFunc(tr, h.ServeHTTP, options...) -} - -// MiddlewareFunc wraps an http.HandlerFunc and traces incoming requests. -// It behaves identically to the Middleware function above. -// -// Example: -// http.ListenAndServe("localhost:80", nethttp.MiddlewareFunc(tracer, MyHandler)) -func MiddlewareFunc(tr opentracing.Tracer, h http.HandlerFunc, options ...MWOption) http.HandlerFunc { - opts := mwOptions{ - opNameFunc: func(r *http.Request) string { - return "HTTP " + r.Method - }, - spanFilter: func(r *http.Request) bool { return true }, - spanObserver: func(span opentracing.Span, r *http.Request) {}, - urlTagFunc: func(u *url.URL) string { - return u.String() - }, - } - for _, opt := range options { - opt(&opts) - } - // set component name, use "net/http" if caller does not specify - componentName := opts.componentName - if componentName == "" { - componentName = defaultComponentName - } - - fn := func(w http.ResponseWriter, r *http.Request) { - if !opts.spanFilter(r) { - h(w, r) - return - } - ctx, _ := tr.Extract(opentracing.HTTPHeaders, opentracing.HTTPHeadersCarrier(r.Header)) - sp := tr.StartSpan(opts.opNameFunc(r), ext.RPCServerOption(ctx)) - ext.HTTPMethod.Set(sp, r.Method) - ext.HTTPUrl.Set(sp, opts.urlTagFunc(r.URL)) - ext.Component.Set(sp, componentName) - opts.spanObserver(sp, r) - - sct := &statusCodeTracker{ResponseWriter: w} - r = r.WithContext(opentracing.ContextWithSpan(r.Context(), sp)) - - defer func() { - panicErr := recover() - didPanic := panicErr != nil - - if sct.status == 0 && !didPanic { - // Standard behavior of http.Server is to assume status code 200 if one was not written by a handler that returned successfully. - // https://github.com/golang/go/blob/fca286bed3ed0e12336532cc711875ae5b3cb02a/src/net/http/server.go#L120 - sct.status = 200 - } - if sct.status > 0 { - ext.HTTPStatusCode.Set(sp, uint16(sct.status)) - } - if sct.status >= http.StatusInternalServerError || didPanic { - ext.Error.Set(sp, true) - } - sp.Finish() - - if didPanic { - panic(panicErr) - } - }() - - h(sct.wrappedResponseWriter(), r) - } - return http.HandlerFunc(fn) -} diff --git a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go b/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go deleted file mode 100644 index 80a5ce0864..0000000000 --- a/vendor/github.com/opentracing-contrib/go-stdlib/nethttp/status-code-tracker.go +++ /dev/null @@ -1,251 +0,0 @@ -// +build go1.8 - -package nethttp - -import ( - "io" - "net/http" -) - -type statusCodeTracker struct { - http.ResponseWriter - status int -} - -func (w *statusCodeTracker) WriteHeader(status int) { - w.status = status - w.ResponseWriter.WriteHeader(status) -} - -func (w *statusCodeTracker) Write(b []byte) (int, error) { - return w.ResponseWriter.Write(b) -} - -// wrappedResponseWriter returns a wrapped version of the original -// ResponseWriter and only implements the same combination of additional -// interfaces as the original. This implementation is based on -// https://github.com/felixge/httpsnoop. -func (w *statusCodeTracker) wrappedResponseWriter() http.ResponseWriter { - var ( - hj, i0 = w.ResponseWriter.(http.Hijacker) - cn, i1 = w.ResponseWriter.(http.CloseNotifier) - pu, i2 = w.ResponseWriter.(http.Pusher) - fl, i3 = w.ResponseWriter.(http.Flusher) - rf, i4 = w.ResponseWriter.(io.ReaderFrom) - ) - - switch { - case !i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - }{w} - case !i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - io.ReaderFrom - }{w, rf} - case !i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Flusher - }{w, fl} - case !i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Flusher - io.ReaderFrom - }{w, fl, rf} - case !i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - }{w, pu} - case !i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - io.ReaderFrom - }{w, pu, rf} - case !i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - }{w, pu, fl} - case !i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Pusher - http.Flusher - io.ReaderFrom - }{w, pu, fl, rf} - case !i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - }{w, cn} - case !i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - io.ReaderFrom - }{w, cn, rf} - case !i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - }{w, cn, fl} - case !i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{w, cn, fl, rf} - case !i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - }{w, cn, pu} - case !i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{w, cn, pu, rf} - case !i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - }{w, cn, pu, fl} - case !i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{w, cn, pu, fl, rf} - case i0 && !i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - }{w, hj} - case i0 && !i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - io.ReaderFrom - }{w, hj, rf} - case i0 && !i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - }{w, hj, fl} - case i0 && !i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Flusher - io.ReaderFrom - }{w, hj, fl, rf} - case i0 && !i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - }{w, hj, pu} - case i0 && !i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - io.ReaderFrom - }{w, hj, pu, rf} - case i0 && !i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - }{w, hj, pu, fl} - case i0 && !i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.Pusher - http.Flusher - io.ReaderFrom - }{w, hj, pu, fl, rf} - case i0 && i1 && !i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - }{w, hj, cn} - case i0 && i1 && !i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - io.ReaderFrom - }{w, hj, cn, rf} - case i0 && i1 && !i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - }{w, hj, cn, fl} - case i0 && i1 && !i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Flusher - io.ReaderFrom - }{w, hj, cn, fl, rf} - case i0 && i1 && i2 && !i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - }{w, hj, cn, pu} - case i0 && i1 && i2 && !i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - io.ReaderFrom - }{w, hj, cn, pu, rf} - case i0 && i1 && i2 && i3 && !i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - }{w, hj, cn, pu, fl} - case i0 && i1 && i2 && i3 && i4: - return struct { - http.ResponseWriter - http.Hijacker - http.CloseNotifier - http.Pusher - http.Flusher - io.ReaderFrom - }{w, hj, cn, pu, fl, rf} - default: - return struct { - http.ResponseWriter - }{w} - } -} diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore deleted file mode 100644 index c57100a595..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/.gitignore +++ /dev/null @@ -1 +0,0 @@ -coverage.txt diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml deleted file mode 100644 index b950e42965..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/.travis.yml +++ /dev/null @@ -1,20 +0,0 @@ -language: go - -matrix: - include: - - go: "1.13.x" - - go: "1.14.x" - - go: "tip" - env: - - LINT=true - - COVERAGE=true - -install: - - if [ "$LINT" == true ]; then go get -u golang.org/x/lint/golint/... ; else echo 'skipping lint'; fi - - go get -u github.com/stretchr/testify/... - -script: - - make test - - go build ./... - - if [ "$LINT" == true ]; then make lint ; else echo 'skipping lint'; fi - - if [ "$COVERAGE" == true ]; then make cover && bash <(curl -s https://codecov.io/bash) ; else echo 'skipping coverage'; fi diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md deleted file mode 100644 index d3bfcf6235..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md +++ /dev/null @@ -1,63 +0,0 @@ -Changes by Version -================== - - -1.2.0 (2020-07-01) -------------------- - -* Restore the ability to reset the current span in context to nil (#231) -- Yuri Shkuro -* Use error.object per OpenTracing Semantic Conventions (#179) -- Rahman Syed -* Convert nil pointer log field value to string "nil" (#230) -- Cyril Tovena -* Add Go module support (#215) -- Zaba505 -* Make SetTag helper types in ext public (#229) -- Blake Edwards -* Add log/fields helpers for keys from specification (#226) -- Dmitry Monakhov -* Improve noop impementation (#223) -- chanxuehong -* Add an extension to Tracer interface for custom go context creation (#220) -- Krzesimir Nowak -* Fix typo in comments (#222) -- meteorlxy -* Improve documentation for log.Object() to emphasize the requirement to pass immutable arguments (#219) -- 疯狂的小企鹅 -* [mock] Return ErrInvalidSpanContext if span context is not MockSpanContext (#216) -- Milad Irannejad - - -1.1.0 (2019-03-23) -------------------- - -Notable changes: -- The library is now released under Apache 2.0 license -- Use Set() instead of Add() in HTTPHeadersCarrier is functionally a breaking change (fixes issue [#159](https://github.com/opentracing/opentracing-go/issues/159)) -- 'golang.org/x/net/context' is replaced with 'context' from the standard library - -List of all changes: - -- Export StartSpanFromContextWithTracer (#214) -- Add IsGlobalTracerRegistered() to indicate if a tracer has been registered (#201) -- Use Set() instead of Add() in HTTPHeadersCarrier (#191) -- Update license to Apache 2.0 (#181) -- Replace 'golang.org/x/net/context' with 'context' (#176) -- Port of Python opentracing/harness/api_check.py to Go (#146) -- Fix race condition in MockSpan.Context() (#170) -- Add PeerHostIPv4.SetString() (#155) -- Add a Noop log field type to log to allow for optional fields (#150) - - -1.0.2 (2017-04-26) -------------------- - -- Add more semantic tags (#139) - - -1.0.1 (2017-02-06) -------------------- - -- Correct spelling in comments -- Address race in nextMockID() (#123) -- log: avoid panic marshaling nil error (#131) -- Deprecate InitGlobalTracer in favor of SetGlobalTracer (#128) -- Drop Go 1.5 that fails in Travis (#129) -- Add convenience methods Key() and Value() to log.Field -- Add convenience methods to log.Field (2 years, 6 months ago) - -1.0.0 (2016-09-26) -------------------- - -- This release implements OpenTracing Specification 1.0 (https://opentracing.io/spec) - diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile deleted file mode 100644 index 62abb63f58..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/Makefile +++ /dev/null @@ -1,20 +0,0 @@ -.DEFAULT_GOAL := test-and-lint - -.PHONY: test-and-lint -test-and-lint: test lint - -.PHONY: test -test: - go test -v -cover -race ./... - -.PHONY: cover -cover: - go test -v -coverprofile=coverage.txt -covermode=atomic -race ./... - -.PHONY: lint -lint: - go fmt ./... - golint ./... - @# Run again with magic to exit non-zero if golint outputs anything. - @! (golint ./... | read dummy) - go vet ./... diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md deleted file mode 100644 index 6ef1d7c9d2..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/README.md +++ /dev/null @@ -1,171 +0,0 @@ -[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) -[![Sourcegraph Badge](https://sourcegraph.com/github.com/opentracing/opentracing-go/-/badge.svg)](https://sourcegraph.com/github.com/opentracing/opentracing-go?badge) - -# OpenTracing API for Go - -This package is a Go platform API for OpenTracing. - -## Required Reading - -In order to understand the Go platform API, one must first be familiar with the -[OpenTracing project](https://opentracing.io) and -[terminology](https://opentracing.io/specification/) more specifically. - -## API overview for those adding instrumentation - -Everyday consumers of this `opentracing` package really only need to worry -about a couple of key abstractions: the `StartSpan` function, the `Span` -interface, and binding a `Tracer` at `main()`-time. Here are code snippets -demonstrating some important use cases. - -#### Singleton initialization - -The simplest starting point is `./default_tracer.go`. As early as possible, call - -```go - import "github.com/opentracing/opentracing-go" - import ".../some_tracing_impl" - - func main() { - opentracing.SetGlobalTracer( - // tracing impl specific: - some_tracing_impl.New(...), - ) - ... - } -``` - -#### Non-Singleton initialization - -If you prefer direct control to singletons, manage ownership of the -`opentracing.Tracer` implementation explicitly. - -#### Creating a Span given an existing Go `context.Context` - -If you use `context.Context` in your application, OpenTracing's Go library will -happily rely on it for `Span` propagation. To start a new (blocking child) -`Span`, you can use `StartSpanFromContext`. - -```go - func xyz(ctx context.Context, ...) { - ... - span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") - defer span.Finish() - span.LogFields( - log.String("event", "soft error"), - log.String("type", "cache timeout"), - log.Int("waited.millis", 1500)) - ... - } -``` - -#### Starting an empty trace by creating a "root span" - -It's always possible to create a "root" `Span` with no parent or other causal -reference. - -```go - func xyz() { - ... - sp := opentracing.StartSpan("operation_name") - defer sp.Finish() - ... - } -``` - -#### Creating a (child) Span given an existing (parent) Span - -```go - func xyz(parentSpan opentracing.Span, ...) { - ... - sp := opentracing.StartSpan( - "operation_name", - opentracing.ChildOf(parentSpan.Context())) - defer sp.Finish() - ... - } -``` - -#### Serializing to the wire - -```go - func makeSomeRequest(ctx context.Context) ... { - if span := opentracing.SpanFromContext(ctx); span != nil { - httpClient := &http.Client{} - httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) - - // Transmit the span's TraceContext as HTTP headers on our - // outbound request. - opentracing.GlobalTracer().Inject( - span.Context(), - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(httpReq.Header)) - - resp, err := httpClient.Do(httpReq) - ... - } - ... - } -``` - -#### Deserializing from the wire - -```go - http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { - var serverSpan opentracing.Span - appSpecificOperationName := ... - wireContext, err := opentracing.GlobalTracer().Extract( - opentracing.HTTPHeaders, - opentracing.HTTPHeadersCarrier(req.Header)) - if err != nil { - // Optionally record something about err here - } - - // Create the span referring to the RPC client if available. - // If wireContext == nil, a root span will be created. - serverSpan = opentracing.StartSpan( - appSpecificOperationName, - ext.RPCServerOption(wireContext)) - - defer serverSpan.Finish() - - ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) - ... - } -``` - -#### Conditionally capture a field using `log.Noop` - -In some situations, you may want to dynamically decide whether or not -to log a field. For example, you may want to capture additional data, -such as a customer ID, in non-production environments: - -```go - func Customer(order *Order) log.Field { - if os.Getenv("ENVIRONMENT") == "dev" { - return log.String("customer", order.Customer.ID) - } - return log.Noop() - } -``` - -#### Goroutine-safety - -The entire public API is goroutine-safe and does not require external -synchronization. - -## API pointers for those implementing a tracing system - -Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. - -## API compatibility - -For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. - -## Tracer test suite - -A test suite is available in the [harness](https://godoc.org/github.com/opentracing/opentracing-go/harness) package that can assist Tracer implementors to assert that their Tracer is working correctly. - -## Licensing - -[Apache 2.0 License](./LICENSE). diff --git a/vendor/github.com/opentracing/opentracing-go/ext.go b/vendor/github.com/opentracing/opentracing-go/ext.go deleted file mode 100644 index e11977ebe8..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext.go +++ /dev/null @@ -1,24 +0,0 @@ -package opentracing - -import ( - "context" -) - -// TracerContextWithSpanExtension is an extension interface that the -// implementation of the Tracer interface may want to implement. It -// allows to have some control over the go context when the -// ContextWithSpan is invoked. -// -// The primary purpose of this extension are adapters from opentracing -// API to some other tracing API. -type TracerContextWithSpanExtension interface { - // ContextWithSpanHook gets called by the ContextWithSpan - // function, when the Tracer implementation also implements - // this interface. It allows to put extra information into the - // context and make it available to the callers of the - // ContextWithSpan. - // - // This hook is invoked before the ContextWithSpan function - // actually puts the span into the context. - ContextWithSpanHook(ctx context.Context, span Span) context.Context -} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/field.go b/vendor/github.com/opentracing/opentracing-go/ext/field.go deleted file mode 100644 index 8282bd7584..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/field.go +++ /dev/null @@ -1,17 +0,0 @@ -package ext - -import ( - "github.com/opentracing/opentracing-go" - "github.com/opentracing/opentracing-go/log" -) - -// LogError sets the error=true tag on the Span and logs err as an "error" event. -func LogError(span opentracing.Span, err error, fields ...log.Field) { - Error.Set(span, true) - ef := []log.Field{ - log.Event("error"), - log.Error(err), - } - ef = append(ef, fields...) - span.LogFields(ef...) -} diff --git a/vendor/github.com/opentracing/opentracing-go/ext/tags.go b/vendor/github.com/opentracing/opentracing-go/ext/tags.go deleted file mode 100644 index a414b5951f..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/ext/tags.go +++ /dev/null @@ -1,215 +0,0 @@ -package ext - -import "github.com/opentracing/opentracing-go" - -// These constants define common tag names recommended for better portability across -// tracing systems and languages/platforms. -// -// The tag names are defined as typed strings, so that in addition to the usual use -// -// span.setTag(TagName, value) -// -// they also support value type validation via this additional syntax: -// -// TagName.Set(span, value) -// -var ( - ////////////////////////////////////////////////////////////////////// - // SpanKind (client/server or producer/consumer) - ////////////////////////////////////////////////////////////////////// - - // SpanKind hints at relationship between spans, e.g. client/server - SpanKind = spanKindTagName("span.kind") - - // SpanKindRPCClient marks a span representing the client-side of an RPC - // or other remote call - SpanKindRPCClientEnum = SpanKindEnum("client") - SpanKindRPCClient = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCClientEnum} - - // SpanKindRPCServer marks a span representing the server-side of an RPC - // or other remote call - SpanKindRPCServerEnum = SpanKindEnum("server") - SpanKindRPCServer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindRPCServerEnum} - - // SpanKindProducer marks a span representing the producer-side of a - // message bus - SpanKindProducerEnum = SpanKindEnum("producer") - SpanKindProducer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindProducerEnum} - - // SpanKindConsumer marks a span representing the consumer-side of a - // message bus - SpanKindConsumerEnum = SpanKindEnum("consumer") - SpanKindConsumer = opentracing.Tag{Key: string(SpanKind), Value: SpanKindConsumerEnum} - - ////////////////////////////////////////////////////////////////////// - // Component name - ////////////////////////////////////////////////////////////////////// - - // Component is a low-cardinality identifier of the module, library, - // or package that is generating a span. - Component = StringTagName("component") - - ////////////////////////////////////////////////////////////////////// - // Sampling hint - ////////////////////////////////////////////////////////////////////// - - // SamplingPriority determines the priority of sampling this Span. - SamplingPriority = Uint16TagName("sampling.priority") - - ////////////////////////////////////////////////////////////////////// - // Peer tags. These tags can be emitted by either client-side or - // server-side to describe the other side/service in a peer-to-peer - // communications, like an RPC call. - ////////////////////////////////////////////////////////////////////// - - // PeerService records the service name of the peer. - PeerService = StringTagName("peer.service") - - // PeerAddress records the address name of the peer. This may be a "ip:port", - // a bare "hostname", a FQDN or even a database DSN substring - // like "mysql://username@127.0.0.1:3306/dbname" - PeerAddress = StringTagName("peer.address") - - // PeerHostname records the host name of the peer - PeerHostname = StringTagName("peer.hostname") - - // PeerHostIPv4 records IP v4 host address of the peer - PeerHostIPv4 = IPv4TagName("peer.ipv4") - - // PeerHostIPv6 records IP v6 host address of the peer - PeerHostIPv6 = StringTagName("peer.ipv6") - - // PeerPort records port number of the peer - PeerPort = Uint16TagName("peer.port") - - ////////////////////////////////////////////////////////////////////// - // HTTP Tags - ////////////////////////////////////////////////////////////////////// - - // HTTPUrl should be the URL of the request being handled in this segment - // of the trace, in standard URI format. The protocol is optional. - HTTPUrl = StringTagName("http.url") - - // HTTPMethod is the HTTP method of the request, and is case-insensitive. - HTTPMethod = StringTagName("http.method") - - // HTTPStatusCode is the numeric HTTP status code (200, 404, etc) of the - // HTTP response. - HTTPStatusCode = Uint16TagName("http.status_code") - - ////////////////////////////////////////////////////////////////////// - // DB Tags - ////////////////////////////////////////////////////////////////////// - - // DBInstance is database instance name. - DBInstance = StringTagName("db.instance") - - // DBStatement is a database statement for the given database type. - // It can be a query or a prepared statement (i.e., before substitution). - DBStatement = StringTagName("db.statement") - - // DBType is a database type. For any SQL database, "sql". - // For others, the lower-case database category, e.g. "redis" - DBType = StringTagName("db.type") - - // DBUser is a username for accessing database. - DBUser = StringTagName("db.user") - - ////////////////////////////////////////////////////////////////////// - // Message Bus Tag - ////////////////////////////////////////////////////////////////////// - - // MessageBusDestination is an address at which messages can be exchanged - MessageBusDestination = StringTagName("message_bus.destination") - - ////////////////////////////////////////////////////////////////////// - // Error Tag - ////////////////////////////////////////////////////////////////////// - - // Error indicates that operation represented by the span resulted in an error. - Error = BoolTagName("error") -) - -// --- - -// SpanKindEnum represents common span types -type SpanKindEnum string - -type spanKindTagName string - -// Set adds a string tag to the `span` -func (tag spanKindTagName) Set(span opentracing.Span, value SpanKindEnum) { - span.SetTag(string(tag), value) -} - -type rpcServerOption struct { - clientContext opentracing.SpanContext -} - -func (r rpcServerOption) Apply(o *opentracing.StartSpanOptions) { - if r.clientContext != nil { - opentracing.ChildOf(r.clientContext).Apply(o) - } - SpanKindRPCServer.Apply(o) -} - -// RPCServerOption returns a StartSpanOption appropriate for an RPC server span -// with `client` representing the metadata for the remote peer Span if available. -// In case client == nil, due to the client not being instrumented, this RPC -// server span will be a root span. -func RPCServerOption(client opentracing.SpanContext) opentracing.StartSpanOption { - return rpcServerOption{client} -} - -// --- - -// StringTagName is a common tag name to be set to a string value -type StringTagName string - -// Set adds a string tag to the `span` -func (tag StringTagName) Set(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint32TagName is a common tag name to be set to a uint32 value -type Uint32TagName string - -// Set adds a uint32 tag to the `span` -func (tag Uint32TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// --- - -// Uint16TagName is a common tag name to be set to a uint16 value -type Uint16TagName string - -// Set adds a uint16 tag to the `span` -func (tag Uint16TagName) Set(span opentracing.Span, value uint16) { - span.SetTag(string(tag), value) -} - -// --- - -// BoolTagName is a common tag name to be set to a bool value -type BoolTagName string - -// Set adds a bool tag to the `span` -func (tag BoolTagName) Set(span opentracing.Span, value bool) { - span.SetTag(string(tag), value) -} - -// IPv4TagName is a common tag name to be set to an ipv4 value -type IPv4TagName string - -// Set adds IP v4 host address of the peer as an uint32 value to the `span`, keep this for backward and zipkin compatibility -func (tag IPv4TagName) Set(span opentracing.Span, value uint32) { - span.SetTag(string(tag), value) -} - -// SetString records IP v4 host address of the peer as a .-separated tuple to the `span`. E.g., "127.0.0.1" -func (tag IPv4TagName) SetString(span opentracing.Span, value string) { - span.SetTag(string(tag), value) -} diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go deleted file mode 100644 index 4f7066a925..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/globaltracer.go +++ /dev/null @@ -1,42 +0,0 @@ -package opentracing - -type registeredTracer struct { - tracer Tracer - isRegistered bool -} - -var ( - globalTracer = registeredTracer{NoopTracer{}, false} -) - -// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by -// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an -// opentracing.Tracer instance) should call SetGlobalTracer as early as -// possible in main(), prior to calling the `StartSpan` global func below. -// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` -// (etc) globals are noops. -func SetGlobalTracer(tracer Tracer) { - globalTracer = registeredTracer{tracer, true} -} - -// GlobalTracer returns the global singleton `Tracer` implementation. -// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop -// implementation that drops all data handed to it. -func GlobalTracer() Tracer { - return globalTracer.tracer -} - -// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. -func StartSpan(operationName string, opts ...StartSpanOption) Span { - return globalTracer.tracer.StartSpan(operationName, opts...) -} - -// InitGlobalTracer is deprecated. Please use SetGlobalTracer. -func InitGlobalTracer(tracer Tracer) { - SetGlobalTracer(tracer) -} - -// IsGlobalTracerRegistered returns a `bool` to indicate if a tracer has been globally registered -func IsGlobalTracerRegistered() bool { - return globalTracer.isRegistered -} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go deleted file mode 100644 index 1831bc9b26..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/gocontext.go +++ /dev/null @@ -1,65 +0,0 @@ -package opentracing - -import "context" - -type contextKey struct{} - -var activeSpanKey = contextKey{} - -// ContextWithSpan returns a new `context.Context` that holds a reference to -// the span. If span is nil, a new context without an active span is returned. -func ContextWithSpan(ctx context.Context, span Span) context.Context { - if span != nil { - if tracerWithHook, ok := span.Tracer().(TracerContextWithSpanExtension); ok { - ctx = tracerWithHook.ContextWithSpanHook(ctx, span) - } - } - return context.WithValue(ctx, activeSpanKey, span) -} - -// SpanFromContext returns the `Span` previously associated with `ctx`, or -// `nil` if no such `Span` could be found. -// -// NOTE: context.Context != SpanContext: the former is Go's intra-process -// context propagation mechanism, and the latter houses OpenTracing's per-Span -// identity and baggage information. -func SpanFromContext(ctx context.Context) Span { - val := ctx.Value(activeSpanKey) - if sp, ok := val.(Span); ok { - return sp - } - return nil -} - -// StartSpanFromContext starts and returns a Span with `operationName`, using -// any Span found within `ctx` as a ChildOfRef. If no such parent could be -// found, StartSpanFromContext creates a root (parentless) Span. -// -// The second return value is a context.Context object built around the -// returned Span. -// -// Example usage: -// -// SomeFunction(ctx context.Context, ...) { -// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") -// defer sp.Finish() -// ... -// } -func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { - return StartSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) -} - -// StartSpanFromContextWithTracer starts and returns a span with `operationName` -// using a span found within the context as a ChildOfRef. If that doesn't exist -// it creates a root span. It also returns a context.Context object built -// around the returned span. -// -// It's behavior is identical to StartSpanFromContext except that it takes an explicit -// tracer as opposed to using the global tracer. -func StartSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { - if parentSpan := SpanFromContext(ctx); parentSpan != nil { - opts = append(opts, ChildOf(parentSpan.Context())) - } - span := tracer.StartSpan(operationName, opts...) - return span, ContextWithSpan(ctx, span) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go deleted file mode 100644 index f222ded797..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/field.go +++ /dev/null @@ -1,282 +0,0 @@ -package log - -import ( - "fmt" - "math" -) - -type fieldType int - -const ( - stringType fieldType = iota - boolType - intType - int32Type - uint32Type - int64Type - uint64Type - float32Type - float64Type - errorType - objectType - lazyLoggerType - noopType -) - -// Field instances are constructed via LogBool, LogString, and so on. -// Tracing implementations may then handle them via the Field.Marshal -// method. -// -// "heavily influenced by" (i.e., partially stolen from) -// https://github.com/uber-go/zap -type Field struct { - key string - fieldType fieldType - numericVal int64 - stringVal string - interfaceVal interface{} -} - -// String adds a string-valued key:value pair to a Span.LogFields() record -func String(key, val string) Field { - return Field{ - key: key, - fieldType: stringType, - stringVal: val, - } -} - -// Bool adds a bool-valued key:value pair to a Span.LogFields() record -func Bool(key string, val bool) Field { - var numericVal int64 - if val { - numericVal = 1 - } - return Field{ - key: key, - fieldType: boolType, - numericVal: numericVal, - } -} - -// Int adds an int-valued key:value pair to a Span.LogFields() record -func Int(key string, val int) Field { - return Field{ - key: key, - fieldType: intType, - numericVal: int64(val), - } -} - -// Int32 adds an int32-valued key:value pair to a Span.LogFields() record -func Int32(key string, val int32) Field { - return Field{ - key: key, - fieldType: int32Type, - numericVal: int64(val), - } -} - -// Int64 adds an int64-valued key:value pair to a Span.LogFields() record -func Int64(key string, val int64) Field { - return Field{ - key: key, - fieldType: int64Type, - numericVal: val, - } -} - -// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record -func Uint32(key string, val uint32) Field { - return Field{ - key: key, - fieldType: uint32Type, - numericVal: int64(val), - } -} - -// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record -func Uint64(key string, val uint64) Field { - return Field{ - key: key, - fieldType: uint64Type, - numericVal: int64(val), - } -} - -// Float32 adds a float32-valued key:value pair to a Span.LogFields() record -func Float32(key string, val float32) Field { - return Field{ - key: key, - fieldType: float32Type, - numericVal: int64(math.Float32bits(val)), - } -} - -// Float64 adds a float64-valued key:value pair to a Span.LogFields() record -func Float64(key string, val float64) Field { - return Field{ - key: key, - fieldType: float64Type, - numericVal: int64(math.Float64bits(val)), - } -} - -// Error adds an error with the key "error.object" to a Span.LogFields() record -func Error(err error) Field { - return Field{ - key: "error.object", - fieldType: errorType, - interfaceVal: err, - } -} - -// Object adds an object-valued key:value pair to a Span.LogFields() record -// Please pass in an immutable object, otherwise there may be concurrency issues. -// Such as passing in the map, log.Object may result in "fatal error: concurrent map iteration and map write". -// Because span is sent asynchronously, it is possible that this map will also be modified. -func Object(key string, obj interface{}) Field { - return Field{ - key: key, - fieldType: objectType, - interfaceVal: obj, - } -} - -// Event creates a string-valued Field for span logs with key="event" and value=val. -func Event(val string) Field { - return String("event", val) -} - -// Message creates a string-valued Field for span logs with key="message" and value=val. -func Message(val string) Field { - return String("message", val) -} - -// LazyLogger allows for user-defined, late-bound logging of arbitrary data -type LazyLogger func(fv Encoder) - -// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing -// implementation will call the LazyLogger function at an indefinite time in -// the future (after Lazy() returns). -func Lazy(ll LazyLogger) Field { - return Field{ - fieldType: lazyLoggerType, - interfaceVal: ll, - } -} - -// Noop creates a no-op log field that should be ignored by the tracer. -// It can be used to capture optional fields, for example those that should -// only be logged in non-production environment: -// -// func customerField(order *Order) log.Field { -// if os.Getenv("ENVIRONMENT") == "dev" { -// return log.String("customer", order.Customer.ID) -// } -// return log.Noop() -// } -// -// span.LogFields(log.String("event", "purchase"), customerField(order)) -// -func Noop() Field { - return Field{ - fieldType: noopType, - } -} - -// Encoder allows access to the contents of a Field (via a call to -// Field.Marshal). -// -// Tracer implementations typically provide an implementation of Encoder; -// OpenTracing callers typically do not need to concern themselves with it. -type Encoder interface { - EmitString(key, value string) - EmitBool(key string, value bool) - EmitInt(key string, value int) - EmitInt32(key string, value int32) - EmitInt64(key string, value int64) - EmitUint32(key string, value uint32) - EmitUint64(key string, value uint64) - EmitFloat32(key string, value float32) - EmitFloat64(key string, value float64) - EmitObject(key string, value interface{}) - EmitLazyLogger(value LazyLogger) -} - -// Marshal passes a Field instance through to the appropriate -// field-type-specific method of an Encoder. -func (lf Field) Marshal(visitor Encoder) { - switch lf.fieldType { - case stringType: - visitor.EmitString(lf.key, lf.stringVal) - case boolType: - visitor.EmitBool(lf.key, lf.numericVal != 0) - case intType: - visitor.EmitInt(lf.key, int(lf.numericVal)) - case int32Type: - visitor.EmitInt32(lf.key, int32(lf.numericVal)) - case int64Type: - visitor.EmitInt64(lf.key, int64(lf.numericVal)) - case uint32Type: - visitor.EmitUint32(lf.key, uint32(lf.numericVal)) - case uint64Type: - visitor.EmitUint64(lf.key, uint64(lf.numericVal)) - case float32Type: - visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) - case float64Type: - visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) - case errorType: - if err, ok := lf.interfaceVal.(error); ok { - visitor.EmitString(lf.key, err.Error()) - } else { - visitor.EmitString(lf.key, "") - } - case objectType: - visitor.EmitObject(lf.key, lf.interfaceVal) - case lazyLoggerType: - visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) - case noopType: - // intentionally left blank - } -} - -// Key returns the field's key. -func (lf Field) Key() string { - return lf.key -} - -// Value returns the field's value as interface{}. -func (lf Field) Value() interface{} { - switch lf.fieldType { - case stringType: - return lf.stringVal - case boolType: - return lf.numericVal != 0 - case intType: - return int(lf.numericVal) - case int32Type: - return int32(lf.numericVal) - case int64Type: - return int64(lf.numericVal) - case uint32Type: - return uint32(lf.numericVal) - case uint64Type: - return uint64(lf.numericVal) - case float32Type: - return math.Float32frombits(uint32(lf.numericVal)) - case float64Type: - return math.Float64frombits(uint64(lf.numericVal)) - case errorType, objectType, lazyLoggerType: - return lf.interfaceVal - case noopType: - return nil - default: - return nil - } -} - -// String returns a string representation of the key and value. -func (lf Field) String() string { - return fmt.Sprint(lf.key, ":", lf.Value()) -} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go deleted file mode 100644 index d57e28aa57..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/log/util.go +++ /dev/null @@ -1,61 +0,0 @@ -package log - -import ( - "fmt" - "reflect" -) - -// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice -// a la Span.LogFields(). -func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { - if len(keyValues)%2 != 0 { - return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) - } - fields := make([]Field, len(keyValues)/2) - for i := 0; i*2 < len(keyValues); i++ { - key, ok := keyValues[i*2].(string) - if !ok { - return nil, fmt.Errorf( - "non-string key (pair #%d): %T", - i, keyValues[i*2]) - } - switch typedVal := keyValues[i*2+1].(type) { - case bool: - fields[i] = Bool(key, typedVal) - case string: - fields[i] = String(key, typedVal) - case int: - fields[i] = Int(key, typedVal) - case int8: - fields[i] = Int32(key, int32(typedVal)) - case int16: - fields[i] = Int32(key, int32(typedVal)) - case int32: - fields[i] = Int32(key, typedVal) - case int64: - fields[i] = Int64(key, typedVal) - case uint: - fields[i] = Uint64(key, uint64(typedVal)) - case uint64: - fields[i] = Uint64(key, typedVal) - case uint8: - fields[i] = Uint32(key, uint32(typedVal)) - case uint16: - fields[i] = Uint32(key, uint32(typedVal)) - case uint32: - fields[i] = Uint32(key, typedVal) - case float32: - fields[i] = Float32(key, typedVal) - case float64: - fields[i] = Float64(key, typedVal) - default: - if typedVal == nil || (reflect.ValueOf(typedVal).Kind() == reflect.Ptr && reflect.ValueOf(typedVal).IsNil()) { - fields[i] = String(key, "nil") - continue - } - // When in doubt, coerce to a string - fields[i] = String(key, fmt.Sprint(typedVal)) - } - } - return fields, nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go deleted file mode 100644 index f9b680a213..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/noop.go +++ /dev/null @@ -1,64 +0,0 @@ -package opentracing - -import "github.com/opentracing/opentracing-go/log" - -// A NoopTracer is a trivial, minimum overhead implementation of Tracer -// for which all operations are no-ops. -// -// The primary use of this implementation is in libraries, such as RPC -// frameworks, that make tracing an optional feature controlled by the -// end user. A no-op implementation allows said libraries to use it -// as the default Tracer and to write instrumentation that does -// not need to keep checking if the tracer instance is nil. -// -// For the same reason, the NoopTracer is the default "global" tracer -// (see GlobalTracer and SetGlobalTracer functions). -// -// WARNING: NoopTracer does not support baggage propagation. -type NoopTracer struct{} - -type noopSpan struct{} -type noopSpanContext struct{} - -var ( - defaultNoopSpanContext SpanContext = noopSpanContext{} - defaultNoopSpan Span = noopSpan{} - defaultNoopTracer Tracer = NoopTracer{} -) - -const ( - emptyString = "" -) - -// noopSpanContext: -func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} - -// noopSpan: -func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } -func (n noopSpan) SetBaggageItem(key, val string) Span { return n } -func (n noopSpan) BaggageItem(key string) string { return emptyString } -func (n noopSpan) SetTag(key string, value interface{}) Span { return n } -func (n noopSpan) LogFields(fields ...log.Field) {} -func (n noopSpan) LogKV(keyVals ...interface{}) {} -func (n noopSpan) Finish() {} -func (n noopSpan) FinishWithOptions(opts FinishOptions) {} -func (n noopSpan) SetOperationName(operationName string) Span { return n } -func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } -func (n noopSpan) LogEvent(event string) {} -func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} -func (n noopSpan) Log(data LogData) {} - -// StartSpan belongs to the Tracer interface. -func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { - return defaultNoopSpan -} - -// Inject belongs to the Tracer interface. -func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { - return nil -} - -// Extract belongs to the Tracer interface. -func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { - return nil, ErrSpanContextNotFound -} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go deleted file mode 100644 index b0c275eb05..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/propagation.go +++ /dev/null @@ -1,176 +0,0 @@ -package opentracing - -import ( - "errors" - "net/http" -) - -/////////////////////////////////////////////////////////////////////////////// -// CORE PROPAGATION INTERFACES: -/////////////////////////////////////////////////////////////////////////////// - -var ( - // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or - // Tracer.Extract() is not recognized by the Tracer implementation. - ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") - - // ErrSpanContextNotFound occurs when the `carrier` passed to - // Tracer.Extract() is valid and uncorrupted but has insufficient - // information to extract a SpanContext. - ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") - - // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to - // operate on a SpanContext which it is not prepared to handle (for - // example, since it was created by a different tracer implementation). - ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") - - // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() - // implementations expect a different type of `carrier` than they are - // given. - ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") - - // ErrSpanContextCorrupted occurs when the `carrier` passed to - // Tracer.Extract() is of the expected type but is corrupted. - ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") -) - -/////////////////////////////////////////////////////////////////////////////// -// BUILTIN PROPAGATION FORMATS: -/////////////////////////////////////////////////////////////////////////////// - -// BuiltinFormat is used to demarcate the values within package `opentracing` -// that are intended for use with the Tracer.Inject() and Tracer.Extract() -// methods. -type BuiltinFormat byte - -const ( - // Binary represents SpanContexts as opaque binary data. - // - // For Tracer.Inject(): the carrier must be an `io.Writer`. - // - // For Tracer.Extract(): the carrier must be an `io.Reader`. - Binary BuiltinFormat = iota - - // TextMap represents SpanContexts as key:value string pairs. - // - // Unlike HTTPHeaders, the TextMap format does not restrict the key or - // value character sets in any way. - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - TextMap - - // HTTPHeaders represents SpanContexts as HTTP header string pairs. - // - // Unlike TextMap, the HTTPHeaders format requires that the keys and values - // be valid as HTTP headers as-is (i.e., character casing may be unstable - // and special characters are disallowed in keys, values should be - // URL-escaped, etc). - // - // For Tracer.Inject(): the carrier must be a `TextMapWriter`. - // - // For Tracer.Extract(): the carrier must be a `TextMapReader`. - // - // See HTTPHeadersCarrier for an implementation of both TextMapWriter - // and TextMapReader that defers to an http.Header instance for storage. - // For example, Inject(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := span.Tracer().Inject( - // span.Context(), opentracing.HTTPHeaders, carrier) - // - // Or Extract(): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract( - // opentracing.HTTPHeaders, carrier) - // - HTTPHeaders -) - -// TextMapWriter is the Inject() carrier for the TextMap builtin format. With -// it, the caller can encode a SpanContext for propagation as entries in a map -// of unicode strings. -type TextMapWriter interface { - // Set a key:value pair to the carrier. Multiple calls to Set() for the - // same key leads to undefined behavior. - // - // NOTE: The backing store for the TextMapWriter may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - Set(key, val string) -} - -// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, -// the caller can decode a propagated SpanContext as entries in a map of -// unicode strings. -type TextMapReader interface { - // ForeachKey returns TextMap contents via repeated calls to the `handler` - // function. If any call to `handler` returns a non-nil error, ForeachKey - // terminates and returns that error. - // - // NOTE: The backing store for the TextMapReader may contain data unrelated - // to SpanContext. As such, Inject() and Extract() implementations that - // call the TextMapWriter and TextMapReader interfaces must agree on a - // prefix or other convention to distinguish their own key:value pairs. - // - // The "foreach" callback pattern reduces unnecessary copying in some cases - // and also allows implementations to hold locks while the map is read. - ForeachKey(handler func(key, val string) error) error -} - -// TextMapCarrier allows the use of regular map[string]string -// as both TextMapWriter and TextMapReader. -type TextMapCarrier map[string]string - -// ForeachKey conforms to the TextMapReader interface. -func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { - for k, v := range c { - if err := handler(k, v); err != nil { - return err - } - } - return nil -} - -// Set implements Set() of opentracing.TextMapWriter -func (c TextMapCarrier) Set(key, val string) { - c[key] = val -} - -// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. -// -// Example usage for server side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) -// -// Example usage for client side: -// -// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) -// err := tracer.Inject( -// span.Context(), -// opentracing.HTTPHeaders, -// carrier) -// -type HTTPHeadersCarrier http.Header - -// Set conforms to the TextMapWriter interface. -func (c HTTPHeadersCarrier) Set(key, val string) { - h := http.Header(c) - h.Set(key, val) -} - -// ForeachKey conforms to the TextMapReader interface. -func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { - for k, vals := range c { - for _, v := range vals { - if err := handler(k, v); err != nil { - return err - } - } - } - return nil -} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go deleted file mode 100644 index 0d3fb53418..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/span.go +++ /dev/null @@ -1,189 +0,0 @@ -package opentracing - -import ( - "time" - - "github.com/opentracing/opentracing-go/log" -) - -// SpanContext represents Span state that must propagate to descendant Spans and across process -// boundaries (e.g., a tuple). -type SpanContext interface { - // ForeachBaggageItem grants access to all baggage items stored in the - // SpanContext. - // The handler function will be called for each baggage key/value pair. - // The ordering of items is not guaranteed. - // - // The bool return value indicates if the handler wants to continue iterating - // through the rest of the baggage items; for example if the handler is trying to - // find some baggage item by pattern matching the name, it can return false - // as soon as the item is found to stop further iterations. - ForeachBaggageItem(handler func(k, v string) bool) -} - -// Span represents an active, un-finished span in the OpenTracing system. -// -// Spans are created by the Tracer interface. -type Span interface { - // Sets the end timestamp and finalizes Span state. - // - // With the exception of calls to Context() (which are always allowed), - // Finish() must be the last call made to any span instance, and to do - // otherwise leads to undefined behavior. - Finish() - // FinishWithOptions is like Finish() but with explicit control over - // timestamps and log data. - FinishWithOptions(opts FinishOptions) - - // Context() yields the SpanContext for this Span. Note that the return - // value of Context() is still valid after a call to Span.Finish(), as is - // a call to Span.Context() after a call to Span.Finish(). - Context() SpanContext - - // Sets or changes the operation name. - // - // Returns a reference to this Span for chaining. - SetOperationName(operationName string) Span - - // Adds a tag to the span. - // - // If there is a pre-existing tag set for `key`, it is overwritten. - // - // Tag values can be numeric types, strings, or bools. The behavior of - // other tag value types is undefined at the OpenTracing level. If a - // tracing system does not know how to handle a particular value type, it - // may ignore the tag, but shall not panic. - // - // Returns a reference to this Span for chaining. - SetTag(key string, value interface{}) Span - - // LogFields is an efficient and type-checked way to record key:value - // logging data about a Span, though the programming interface is a little - // more verbose than LogKV(). Here's an example: - // - // span.LogFields( - // log.String("event", "soft error"), - // log.String("type", "cache timeout"), - // log.Int("waited.millis", 1500)) - // - // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. - LogFields(fields ...log.Field) - - // LogKV is a concise, readable way to record key:value logging data about - // a Span, though unfortunately this also makes it less efficient and less - // type-safe than LogFields(). Here's an example: - // - // span.LogKV( - // "event", "soft error", - // "type", "cache timeout", - // "waited.millis", 1500) - // - // For LogKV (as opposed to LogFields()), the parameters must appear as - // key-value pairs, like - // - // span.LogKV(key1, val1, key2, val2, key3, val3, ...) - // - // The keys must all be strings. The values may be strings, numeric types, - // bools, Go error instances, or arbitrary structs. - // - // (Note to implementors: consider the log.InterleavedKVToFields() helper) - LogKV(alternatingKeyValues ...interface{}) - - // SetBaggageItem sets a key:value pair on this Span and its SpanContext - // that also propagates to descendants of this Span. - // - // SetBaggageItem() enables powerful functionality given a full-stack - // opentracing integration (e.g., arbitrary application data from a mobile - // app can make it, transparently, all the way into the depths of a storage - // system), and with it some powerful costs: use this feature with care. - // - // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to - // *future* causal descendants of the associated Span. - // - // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and - // value is copied into every local *and remote* child of the associated - // Span, and that can add up to a lot of network and cpu overhead. - // - // Returns a reference to this Span for chaining. - SetBaggageItem(restrictedKey, value string) Span - - // Gets the value for a baggage item given its key. Returns the empty string - // if the value isn't found in this Span. - BaggageItem(restrictedKey string) string - - // Provides access to the Tracer that created this Span. - Tracer() Tracer - - // Deprecated: use LogFields or LogKV - LogEvent(event string) - // Deprecated: use LogFields or LogKV - LogEventWithPayload(event string, payload interface{}) - // Deprecated: use LogFields or LogKV - Log(data LogData) -} - -// LogRecord is data associated with a single Span log. Every LogRecord -// instance must specify at least one Field. -type LogRecord struct { - Timestamp time.Time - Fields []log.Field -} - -// FinishOptions allows Span.FinishWithOptions callers to override the finish -// timestamp and provide log data via a bulk interface. -type FinishOptions struct { - // FinishTime overrides the Span's finish time, or implicitly becomes - // time.Now() if FinishTime.IsZero(). - // - // FinishTime must resolve to a timestamp that's >= the Span's StartTime - // (per StartSpanOptions). - FinishTime time.Time - - // LogRecords allows the caller to specify the contents of many LogFields() - // calls with a single slice. May be nil. - // - // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must - // be set explicitly). Also, they must be >= the Span's start timestamp and - // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the - // behavior of FinishWithOptions() is undefined. - // - // If specified, the caller hands off ownership of LogRecords at - // FinishWithOptions() invocation time. - // - // If specified, the (deprecated) BulkLogData must be nil or empty. - LogRecords []LogRecord - - // BulkLogData is DEPRECATED. - BulkLogData []LogData -} - -// LogData is DEPRECATED -type LogData struct { - Timestamp time.Time - Event string - Payload interface{} -} - -// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord -func (ld *LogData) ToLogRecord() LogRecord { - var literalTimestamp time.Time - if ld.Timestamp.IsZero() { - literalTimestamp = time.Now() - } else { - literalTimestamp = ld.Timestamp - } - rval := LogRecord{ - Timestamp: literalTimestamp, - } - if ld.Payload == nil { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - } - } else { - rval.Fields = []log.Field{ - log.String("event", ld.Event), - log.Object("payload", ld.Payload), - } - } - return rval -} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go deleted file mode 100644 index 715f0cedfb..0000000000 --- a/vendor/github.com/opentracing/opentracing-go/tracer.go +++ /dev/null @@ -1,304 +0,0 @@ -package opentracing - -import "time" - -// Tracer is a simple, thin interface for Span creation and SpanContext -// propagation. -type Tracer interface { - - // Create, start, and return a new Span with the given `operationName` and - // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows - // from the "functional options" pattern, per - // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) - // - // A Span with no SpanReference options (e.g., opentracing.ChildOf() or - // opentracing.FollowsFrom()) becomes the root of its own trace. - // - // Examples: - // - // var tracer opentracing.Tracer = ... - // - // // The root-span case: - // sp := tracer.StartSpan("GetFeed") - // - // // The vanilla child span case: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context())) - // - // // All the bells and whistles: - // sp := tracer.StartSpan( - // "GetFeed", - // opentracing.ChildOf(parentSpan.Context()), - // opentracing.Tag{"user_agent", loggedReq.UserAgent}, - // opentracing.StartTime(loggedReq.Timestamp), - // ) - // - StartSpan(operationName string, opts ...StartSpanOption) Span - - // Inject() takes the `sm` SpanContext instance and injects it for - // propagation within `carrier`. The actual type of `carrier` depends on - // the value of `format`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see https://godoc.org/context#WithValue). - // - // Example usage (sans error handling): - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // err := tracer.Inject( - // span.Context(), - // opentracing.HTTPHeaders, - // carrier) - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Implementations may return opentracing.ErrUnsupportedFormat if `format` - // is not supported by (or not known by) the implementation. - // - // Implementations may return opentracing.ErrInvalidCarrier or any other - // implementation-specific error if the format is supported but injection - // fails anyway. - // - // See Tracer.Extract(). - Inject(sm SpanContext, format interface{}, carrier interface{}) error - - // Extract() returns a SpanContext instance given `format` and `carrier`. - // - // OpenTracing defines a common set of `format` values (see BuiltinFormat), - // and each has an expected carrier type. - // - // Other packages may declare their own `format` values, much like the keys - // used by `context.Context` (see - // https://godoc.org/golang.org/x/net/context#WithValue). - // - // Example usage (with StartSpan): - // - // - // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) - // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) - // - // // ... assuming the ultimate goal here is to resume the trace with a - // // server-side Span: - // var serverSpan opentracing.Span - // if err == nil { - // span = tracer.StartSpan( - // rpcMethodName, ext.RPCServerOption(clientContext)) - // } else { - // span = tracer.StartSpan(rpcMethodName) - // } - // - // - // NOTE: All opentracing.Tracer implementations MUST support all - // BuiltinFormats. - // - // Return values: - // - A successful Extract returns a SpanContext instance and a nil error - // - If there was simply no SpanContext to extract in `carrier`, Extract() - // returns (nil, opentracing.ErrSpanContextNotFound) - // - If `format` is unsupported or unrecognized, Extract() returns (nil, - // opentracing.ErrUnsupportedFormat) - // - If there are more fundamental problems with the `carrier` object, - // Extract() may return opentracing.ErrInvalidCarrier, - // opentracing.ErrSpanContextCorrupted, or implementation-specific - // errors. - // - // See Tracer.Inject(). - Extract(format interface{}, carrier interface{}) (SpanContext, error) -} - -// StartSpanOptions allows Tracer.StartSpan() callers and implementors a -// mechanism to override the start timestamp, specify Span References, and make -// a single Tag or multiple Tags available at Span start time. -// -// StartSpan() callers should look at the StartSpanOption interface and -// implementations available in this package. -// -// Tracer implementations can convert a slice of `StartSpanOption` instances -// into a `StartSpanOptions` struct like so: -// -// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { -// sso := opentracing.StartSpanOptions{} -// for _, o := range opts { -// o.Apply(&sso) -// } -// ... -// } -// -type StartSpanOptions struct { - // Zero or more causal references to other Spans (via their SpanContext). - // If empty, start a "root" Span (i.e., start a new trace). - References []SpanReference - - // StartTime overrides the Span's start time, or implicitly becomes - // time.Now() if StartTime.IsZero(). - StartTime time.Time - - // Tags may have zero or more entries; the restrictions on map values are - // identical to those for Span.SetTag(). May be nil. - // - // If specified, the caller hands off ownership of Tags at - // StartSpan() invocation time. - Tags map[string]interface{} -} - -// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. -// -// StartSpanOption borrows from the "functional options" pattern, per -// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis -type StartSpanOption interface { - Apply(*StartSpanOptions) -} - -// SpanReferenceType is an enum type describing different categories of -// relationships between two Spans. If Span-2 refers to Span-1, the -// SpanReferenceType describes Span-1 from Span-2's perspective. For example, -// ChildOfRef means that Span-1 created Span-2. -// -// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for -// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, -// or Span-2 may be sitting in a distributed queue behind Span-1. -type SpanReferenceType int - -const ( - // ChildOfRef refers to a parent Span that caused *and* somehow depends - // upon the new child Span. Often (but not always), the parent Span cannot - // finish until the child Span does. - // - // An timing diagram for a ChildOfRef that's blocked on the new Span: - // - // [-Parent Span---------] - // [-Child Span----] - // - // See http://opentracing.io/spec/ - // - // See opentracing.ChildOf() - ChildOfRef SpanReferenceType = iota - - // FollowsFromRef refers to a parent Span that does not depend in any way - // on the result of the new child Span. For instance, one might use - // FollowsFromRefs to describe pipeline stages separated by queues, - // or a fire-and-forget cache insert at the tail end of a web request. - // - // A FollowsFromRef Span is part of the same logical trace as the new Span: - // i.e., the new Span is somehow caused by the work of its FollowsFromRef. - // - // All of the following could be valid timing diagrams for children that - // "FollowFrom" a parent. - // - // [-Parent Span-] [-Child Span-] - // - // - // [-Parent Span--] - // [-Child Span-] - // - // - // [-Parent Span-] - // [-Child Span-] - // - // See http://opentracing.io/spec/ - // - // See opentracing.FollowsFrom() - FollowsFromRef -) - -// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a -// referenced SpanContext. See the SpanReferenceType documentation for -// supported relationships. If SpanReference is created with -// ReferencedContext==nil, it has no effect. Thus it allows for a more concise -// syntax for starting spans: -// -// sc, _ := tracer.Extract(someFormat, someCarrier) -// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) -// -// The `ChildOf(sc)` option above will not panic if sc == nil, it will just -// not add the parent span reference to the options. -type SpanReference struct { - Type SpanReferenceType - ReferencedContext SpanContext -} - -// Apply satisfies the StartSpanOption interface. -func (r SpanReference) Apply(o *StartSpanOptions) { - if r.ReferencedContext != nil { - o.References = append(o.References, r) - } -} - -// ChildOf returns a StartSpanOption pointing to a dependent parent span. -// If sc == nil, the option has no effect. -// -// See ChildOfRef, SpanReference -func ChildOf(sc SpanContext) SpanReference { - return SpanReference{ - Type: ChildOfRef, - ReferencedContext: sc, - } -} - -// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused -// the child Span but does not directly depend on its result in any way. -// If sc == nil, the option has no effect. -// -// See FollowsFromRef, SpanReference -func FollowsFrom(sc SpanContext) SpanReference { - return SpanReference{ - Type: FollowsFromRef, - ReferencedContext: sc, - } -} - -// StartTime is a StartSpanOption that sets an explicit start timestamp for the -// new Span. -type StartTime time.Time - -// Apply satisfies the StartSpanOption interface. -func (t StartTime) Apply(o *StartSpanOptions) { - o.StartTime = time.Time(t) -} - -// Tags are a generic map from an arbitrary string key to an opaque value type. -// The underlying tracing system is responsible for interpreting and -// serializing the values. -type Tags map[string]interface{} - -// Apply satisfies the StartSpanOption interface. -func (t Tags) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - for k, v := range t { - o.Tags[k] = v - } -} - -// Tag may be passed as a StartSpanOption to add a tag to new spans, -// or its Set method may be used to apply the tag to an existing Span, -// for example: -// -// tracer.StartSpan("opName", Tag{"Key", value}) -// -// or -// -// Tag{"key", value}.Set(span) -type Tag struct { - Key string - Value interface{} -} - -// Apply satisfies the StartSpanOption interface. -func (t Tag) Apply(o *StartSpanOptions) { - if o.Tags == nil { - o.Tags = make(map[string]interface{}) - } - o.Tags[t.Key] = t.Value -} - -// Set applies the tag to an existing Span. -func (t Tag) Set(s Span) { - s.SetTag(t.Key, t.Value) -} diff --git a/vendor/github.com/tonistiigi/go-archvariant/.golangci.yml b/vendor/github.com/tonistiigi/go-archvariant/.golangci.yml new file mode 100644 index 0000000000..da23404f61 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/.golangci.yml @@ -0,0 +1,20 @@ +run: + timeout: 10m + skip-files: + - ".*\\.pb\\.go$" + +linters: + enable: + - gofmt + - govet + - deadcode + - goimports + - ineffassign + - misspell + - unused + - varcheck + - revive + - staticcheck + - typecheck + - structcheck + disable-all: true diff --git a/vendor/github.com/tonistiigi/go-archvariant/Dockerfile b/vendor/github.com/tonistiigi/go-archvariant/Dockerfile new file mode 100644 index 0000000000..7ea15cb73c --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/Dockerfile @@ -0,0 +1,15 @@ +ARG GO_VERSION=1.17 + +FROM --platform=$BUILDPLATFORM tonistiigi/xx AS xx + +FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS build +COPY --from=xx / / +RUN apk add --no-cache git +WORKDIR /src +ARG TARGETPLATFORM +RUN --mount=target=. \ + TARGETPLATFORM=$TARGETPLATFORM xx-go build -o /out/amd64variant ./cmd/amd64variant && \ + xx-verify --static /out/amd64variant + +FROM scratch +COPY --from=build /out/amd64variant . \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/go-archvariant/LICENSE b/vendor/github.com/tonistiigi/go-archvariant/LICENSE new file mode 100644 index 0000000000..2209c459ac --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2022 Tõnis Tiigi + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/tonistiigi/go-archvariant/README.md b/vendor/github.com/tonistiigi/go-archvariant/README.md new file mode 100644 index 0000000000..b154bad95c --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/README.md @@ -0,0 +1,16 @@ +# go-archvariant + +[![Go Reference](https://pkg.go.dev/badge/github.com/tonistiigi/go-archvariant.svg)](https://pkg.go.dev/github.com/tonistiigi/go-archvariant) +[![Build Status](https://github.com/tonistiigi/go-archvariant/workflows/ci/badge.svg)](https://github.com/tonistiigi/go-archvariant/actions) + +Go package for determining the maximum compatibility version of the current system. The main use case is to use this value in container [platform definitions](https://github.com/containerd/containerd/blob/v1.5.9/platforms/platforms.go#L55). + +On x86-64 platforms this package returns the maximum current microarchitecture level as defined in https://en.wikipedia.org/wiki/X86-64#Microarchitecture_levels . This value can be used to configure compiler in [LLVM since 12.0](https://github.com/llvm/llvm-project/commit/012dd42e027e2ff3d183cc9dcf27004cf9711720) and [GCC since 11.0](https://github.com/gcc-mirror/gcc/commit/324bec558e95584e8c1997575ae9d75978af59f1). [Go1.18+](https://tip.golang.org/doc/go1.18#amd64) uses `GOAMD64` environemnt to configure Go compiler with this value. + +#### Scope + +The goal of this repository is to only provide the variant with minimal external dependencies. If you need more specific CPU features detection you should look at [`golang.org/x/sys/cpu`](https://pkg.go.dev/golang.org/x/sys/cpu) or [`github.com/klauspost/cpuid`](https://pkg.go.dev/github.com/klauspost/cpuid/v2) instead. + +#### Credits + +The checks in this repository are based on the checks Go runtime does [on startup](https://github.com/golang/go/blob/go1.18beta1/src/runtime/asm_amd64.s#L95-L96). diff --git a/vendor/github.com/tonistiigi/go-archvariant/amd64variant.go b/vendor/github.com/tonistiigi/go-archvariant/amd64variant.go new file mode 100644 index 0000000000..78690c4021 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/amd64variant.go @@ -0,0 +1,140 @@ +//go:build amd64 +// +build amd64 + +package archvariant + +import ( + "fmt" + "sync" +) + +var cacheOnce sync.Once +var amdVariantCache string + +func cpuid(ax, cx uint32) (eax, ebx, ecx, edx uint32) +func xgetbv() (eax uint32) + +const ( + sse3 = 0 + ssse3 = 9 + cx16 = 13 + sse4_1 = 19 + sse4_2 = 20 + popcnt = 23 + fma = 12 + movbe = 22 + xsave = 26 + osxsave = 27 + avx = 28 + f16c = 29 + + v2Features = 1<= 3 { + if cx&v3ExtFeatureCX != v3ExtFeatureCX { + version = 2 + } + } + + if version == 2 { + if cx&v2ExtFeatureCX != v2ExtFeatureCX { + return 1 + } + } + + if version >= 3 { + ax = xgetbv() + if version == 4 { + if !osAVX512Supported(ax) { + version = 3 + } + } + if ax&v3OSSupport != v3OSSupport { + version = 2 + } + } + + return version +} + +func AMD64Variant() string { + cacheOnce.Do(func() { + amdVariantCache = "v" + fmt.Sprintf("%d", detectVersion()) + }) + return amdVariantCache +} diff --git a/vendor/github.com/tonistiigi/go-archvariant/amd64variant.s b/vendor/github.com/tonistiigi/go-archvariant/amd64variant.s new file mode 100644 index 0000000000..f811341500 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/amd64variant.s @@ -0,0 +1,21 @@ +//go:build amd64 +// +build amd64 + +#include "textflag.h" + + +TEXT ·cpuid(SB),NOSPLIT,$0-24 + MOVL ax+0(FP),AX + MOVL cx+4(FP), CX + CPUID + MOVL AX,eax+8(FP) + MOVL BX,ebx+12(FP) + MOVL CX,ecx+16(FP) + MOVL DX,edx+20(FP) + RET + +TEXT ·xgetbv(SB),NOSPLIT,$0-8 + XORL CX, CX + XGETBV + MOVL AX, eax+0(FP) + RET diff --git a/vendor/github.com/tonistiigi/go-archvariant/amd64variant_darwin.go b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_darwin.go new file mode 100644 index 0000000000..dd558ddef1 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_darwin.go @@ -0,0 +1,10 @@ +//go:build amd64 && darwin +// +build amd64,darwin + +package archvariant + +func darwinSupportsAVX512() bool + +func osAVX512Supported(ax uint32) bool { + return ax&v3OSSupport == v3OSSupport && darwinSupportsAVX512() +} diff --git a/vendor/github.com/tonistiigi/go-archvariant/amd64variant_darwin.s b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_darwin.s new file mode 100644 index 0000000000..a6eb59ac1a --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_darwin.s @@ -0,0 +1,30 @@ +//go:build amd64 && darwin +// +build amd64,darwin + +#include "textflag.h" + +// Based on https://github.com/golang/sys/blob/ae416a5f93c7892a9dce2d607fc2479eabbacd70/cpu/cpu_x86.s#L31 + +// Note that Go currently has problems with AVX512 on darwin due to signal handling conflict in kernel and +// has been disabled in some libraries for runtime detection. We keep it on as this is unlikely to be used +// in runtime checks. golang/go#49233 + +// func darwinSupportsAVX512() bool +TEXT ·darwinSupportsAVX512(SB), NOSPLIT, $0-1 + MOVB $0, ret+0(FP) // default to false +// These values from: +// https://github.com/apple/darwin-xnu/blob/xnu-4570.1.46/osfmk/i386/cpu_capabilities.h +#define commpage64_base_address 0x00007fffffe00000 +#define commpage64_cpu_capabilities64 (commpage64_base_address+0x010) +#define commpage64_version (commpage64_base_address+0x01E) +#define hasAVX512F 0x0000004000000000 + MOVQ $commpage64_version, BX + CMPW (BX), $13 // cpu_capabilities64 undefined in versions < 13 + JL no_avx512 + MOVQ $commpage64_cpu_capabilities64, BX + MOVQ $hasAVX512F, CX + TESTQ (BX), CX + JZ no_avx512 + MOVB $1, ret+0(FP) +no_avx512: + RET diff --git a/vendor/github.com/tonistiigi/go-archvariant/amd64variant_nodarwin.go b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_nodarwin.go new file mode 100644 index 0000000000..b0a80e0890 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_nodarwin.go @@ -0,0 +1,8 @@ +//go:build amd64 && !darwin +// +build amd64,!darwin + +package archvariant + +func osAVX512Supported(ax uint32) bool { + return ax&v4OSSupport == v4OSSupport +} diff --git a/vendor/github.com/tonistiigi/go-archvariant/amd64variant_unsupported.go b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_unsupported.go new file mode 100644 index 0000000000..7188f25b1d --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/amd64variant_unsupported.go @@ -0,0 +1,8 @@ +//go:build !amd64 +// +build !amd64 + +package archvariant + +func AMD64Variant() string { + return "v1" +} diff --git a/vendor/github.com/tonistiigi/go-archvariant/docker-bake.hcl b/vendor/github.com/tonistiigi/go-archvariant/docker-bake.hcl new file mode 100644 index 0000000000..c5cb846689 --- /dev/null +++ b/vendor/github.com/tonistiigi/go-archvariant/docker-bake.hcl @@ -0,0 +1,30 @@ +variable GO_VERSION { + default = "1.17" +} + +target "_base" { + args = { + GO_VERSION = GO_VERSION + } +} + +target "binary" { + inherits = ["_base"] + platforms = ["local"] + output = ["bin"] +} + +target "all-arch" { + inherits = ["_base"] + platforms = [ + "linux/amd64", + "linux/arm64", + "linux/arm", + "linux/riscv64", + "linux/386", + "windows/amd64", + "windows/arm64", + "darwin/amd64", + "darwin/arm64", + ] +} \ No newline at end of file diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go new file mode 100644 index 0000000000..aaeb19e30d --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/grpctrace.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc + +import ( + "context" + + "google.golang.org/grpc/metadata" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + // instrumentationName is the name of this instrumentation package. + instrumentationName = "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc" + // GRPCStatusCodeKey is convention for numeric status code of a gRPC request. + GRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +// config is a group of options for this instrumentation. +type config struct { + Propagators propagation.TextMapPropagator + TracerProvider trace.TracerProvider +} + +// Option applies an option value for a config. +type Option interface { + apply(*config) +} + +// newConfig returns a config configured with all the passed Options. +func newConfig(opts []Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + TracerProvider: otel.GetTracerProvider(), + } + for _, o := range opts { + o.apply(c) + } + return c +} + +type propagatorsOption struct{ p propagation.TextMapPropagator } + +func (o propagatorsOption) apply(c *config) { + if o.p != nil { + c.Propagators = o.p + } +} + +// WithPropagators returns an Option to use the Propagators when extracting +// and injecting trace context from requests. +func WithPropagators(p propagation.TextMapPropagator) Option { + return propagatorsOption{p: p} +} + +type tracerProviderOption struct{ tp trace.TracerProvider } + +func (o tracerProviderOption) apply(c *config) { + if o.tp != nil { + c.TracerProvider = o.tp + } +} + +// WithTracerProvider returns an Option to use the TracerProvider when +// creating a Tracer. +func WithTracerProvider(tp trace.TracerProvider) Option { + return tracerProviderOption{tp: tp} +} + +type metadataSupplier struct { + metadata *metadata.MD +} + +// assert that metadataSupplier implements the TextMapCarrier interface +var _ propagation.TextMapCarrier = &metadataSupplier{} + +func (s *metadataSupplier) Get(key string) string { + values := s.metadata.Get(key) + if len(values) == 0 { + return "" + } + return values[0] +} + +func (s *metadataSupplier) Set(key string, value string) { + s.metadata.Set(key, value) +} + +func (s *metadataSupplier) Keys() []string { + out := make([]string, 0, len(*s.metadata)) + for key := range *s.metadata { + out = append(out, key) + } + return out +} + +// Inject injects correlation context and span context into the gRPC +// metadata object. This function is meant to be used on outgoing +// requests. +func Inject(ctx context.Context, metadata *metadata.MD, opts ...Option) { + c := newConfig(opts) + c.Propagators.Inject(ctx, &metadataSupplier{ + metadata: metadata, + }) +} + +// Extract returns the correlation context and span context that +// another service encoded in the gRPC metadata object with Inject. +// This function is meant to be used on incoming requests. +func Extract(ctx context.Context, metadata *metadata.MD, opts ...Option) (baggage.Baggage, trace.SpanContext) { + c := newConfig(opts) + ctx = c.Propagators.Extract(ctx, &metadataSupplier{ + metadata: metadata, + }) + + return baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go new file mode 100644 index 0000000000..b1d9f643d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/interceptor.go @@ -0,0 +1,465 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc + +// gRPC tracing middleware +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/semantic_conventions/rpc.md +import ( + "context" + "io" + "net" + + "github.com/golang/protobuf/proto" // nolint:staticcheck + + "google.golang.org/grpc" + grpc_codes "google.golang.org/grpc/codes" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/peer" + "google.golang.org/grpc/status" + + "go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" +) + +type messageType attribute.KeyValue + +// Event adds an event of the messageType to the span associated with the +// passed context with id and size (if message is a proto message). +func (m messageType) Event(ctx context.Context, id int, message interface{}) { + span := trace.SpanFromContext(ctx) + if p, ok := message.(proto.Message); ok { + span.AddEvent("message", trace.WithAttributes( + attribute.KeyValue(m), + RPCMessageIDKey.Int(id), + RPCMessageUncompressedSizeKey.Int(proto.Size(p)), + )) + } else { + span.AddEvent("message", trace.WithAttributes( + attribute.KeyValue(m), + RPCMessageIDKey.Int(id), + )) + } +} + +var ( + messageSent = messageType(RPCMessageTypeSent) + messageReceived = messageType(RPCMessageTypeReceived) +) + +// UnaryClientInterceptor returns a grpc.UnaryClientInterceptor suitable +// for use in a grpc.Dial call. +func UnaryClientInterceptor(opts ...Option) grpc.UnaryClientInterceptor { + return func( + ctx context.Context, + method string, + req, reply interface{}, + cc *grpc.ClientConn, + invoker grpc.UnaryInvoker, + callOpts ...grpc.CallOption, + ) error { + requestMetadata, _ := metadata.FromOutgoingContext(ctx) + metadataCopy := requestMetadata.Copy() + + tracer := newConfig(opts).TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + name, attr := spanInfo(method, cc.Target()) + var span trace.Span + ctx, span = tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + ) + defer span.End() + + Inject(ctx, &metadataCopy, opts...) + ctx = metadata.NewOutgoingContext(ctx, metadataCopy) + + messageSent.Event(ctx, 1, req) + + err := invoker(ctx, method, req, reply, cc, callOpts...) + + messageReceived.Event(ctx, 1, reply) + + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +type streamEventType int + +type streamEvent struct { + Type streamEventType + Err error +} + +const ( + receiveEndEvent streamEventType = iota + errorEvent +) + +// clientStream wraps around the embedded grpc.ClientStream, and intercepts the RecvMsg and +// SendMsg method call. +type clientStream struct { + grpc.ClientStream + + desc *grpc.StreamDesc + events chan streamEvent + eventsDone chan struct{} + finished chan error + + receivedMessageID int + sentMessageID int +} + +var _ = proto.Marshal + +func (w *clientStream) RecvMsg(m interface{}) error { + err := w.ClientStream.RecvMsg(m) + + if err == nil && !w.desc.ServerStreams { + w.sendStreamEvent(receiveEndEvent, nil) + } else if err == io.EOF { + w.sendStreamEvent(receiveEndEvent, nil) + } else if err != nil { + w.sendStreamEvent(errorEvent, err) + } else { + w.receivedMessageID++ + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + + return err +} + +func (w *clientStream) SendMsg(m interface{}) error { + err := w.ClientStream.SendMsg(m) + + w.sentMessageID++ + messageSent.Event(w.Context(), w.sentMessageID, m) + + if err != nil { + w.sendStreamEvent(errorEvent, err) + } + + return err +} + +func (w *clientStream) Header() (metadata.MD, error) { + md, err := w.ClientStream.Header() + + if err != nil { + w.sendStreamEvent(errorEvent, err) + } + + return md, err +} + +func (w *clientStream) CloseSend() error { + err := w.ClientStream.CloseSend() + + if err != nil { + w.sendStreamEvent(errorEvent, err) + } + + return err +} + +func wrapClientStream(ctx context.Context, s grpc.ClientStream, desc *grpc.StreamDesc) *clientStream { + events := make(chan streamEvent) + eventsDone := make(chan struct{}) + finished := make(chan error) + + go func() { + defer close(eventsDone) + + for { + select { + case event := <-events: + switch event.Type { + case receiveEndEvent: + finished <- nil + return + case errorEvent: + finished <- event.Err + return + } + case <-ctx.Done(): + finished <- ctx.Err() + return + } + } + }() + + return &clientStream{ + ClientStream: s, + desc: desc, + events: events, + eventsDone: eventsDone, + finished: finished, + } +} + +func (w *clientStream) sendStreamEvent(eventType streamEventType, err error) { + select { + case <-w.eventsDone: + case w.events <- streamEvent{Type: eventType, Err: err}: + } +} + +// StreamClientInterceptor returns a grpc.StreamClientInterceptor suitable +// for use in a grpc.Dial call. +func StreamClientInterceptor(opts ...Option) grpc.StreamClientInterceptor { + return func( + ctx context.Context, + desc *grpc.StreamDesc, + cc *grpc.ClientConn, + method string, + streamer grpc.Streamer, + callOpts ...grpc.CallOption, + ) (grpc.ClientStream, error) { + requestMetadata, _ := metadata.FromOutgoingContext(ctx) + metadataCopy := requestMetadata.Copy() + + tracer := newConfig(opts).TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + name, attr := spanInfo(method, cc.Target()) + var span trace.Span + ctx, span = tracer.Start( + ctx, + name, + trace.WithSpanKind(trace.SpanKindClient), + trace.WithAttributes(attr...), + ) + + Inject(ctx, &metadataCopy, opts...) + ctx = metadata.NewOutgoingContext(ctx, metadataCopy) + + s, err := streamer(ctx, desc, cc, method, callOpts...) + if err != nil { + grpcStatus, _ := status.FromError(err) + span.SetStatus(codes.Error, grpcStatus.Message()) + span.SetAttributes(statusCodeAttr(grpcStatus.Code())) + span.End() + return s, err + } + stream := wrapClientStream(ctx, s, desc) + + go func() { + err := <-stream.finished + + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + span.End() + }() + + return stream, nil + } +} + +// UnaryServerInterceptor returns a grpc.UnaryServerInterceptor suitable +// for use in a grpc.NewServer call. +func UnaryServerInterceptor(opts ...Option) grpc.UnaryServerInterceptor { + return func( + ctx context.Context, + req interface{}, + info *grpc.UnaryServerInfo, + handler grpc.UnaryHandler, + ) (interface{}, error) { + requestMetadata, _ := metadata.FromIncomingContext(ctx) + metadataCopy := requestMetadata.Copy() + + bags, spanCtx := Extract(ctx, &metadataCopy, opts...) + ctx = baggage.ContextWithBaggage(ctx, bags) + + tracer := newConfig(opts).TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, spanCtx), + name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + ) + defer span.End() + + messageReceived.Event(ctx, 1, req) + + resp, err := handler(ctx, req) + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + messageSent.Event(ctx, 1, s.Proto()) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + messageSent.Event(ctx, 1, resp) + } + + return resp, err + } +} + +// serverStream wraps around the embedded grpc.ServerStream, and intercepts the RecvMsg and +// SendMsg method call. +type serverStream struct { + grpc.ServerStream + ctx context.Context + + receivedMessageID int + sentMessageID int +} + +func (w *serverStream) Context() context.Context { + return w.ctx +} + +func (w *serverStream) RecvMsg(m interface{}) error { + err := w.ServerStream.RecvMsg(m) + + if err == nil { + w.receivedMessageID++ + messageReceived.Event(w.Context(), w.receivedMessageID, m) + } + + return err +} + +func (w *serverStream) SendMsg(m interface{}) error { + err := w.ServerStream.SendMsg(m) + + w.sentMessageID++ + messageSent.Event(w.Context(), w.sentMessageID, m) + + return err +} + +func wrapServerStream(ctx context.Context, ss grpc.ServerStream) *serverStream { + return &serverStream{ + ServerStream: ss, + ctx: ctx, + } +} + +// StreamServerInterceptor returns a grpc.StreamServerInterceptor suitable +// for use in a grpc.NewServer call. +func StreamServerInterceptor(opts ...Option) grpc.StreamServerInterceptor { + return func( + srv interface{}, + ss grpc.ServerStream, + info *grpc.StreamServerInfo, + handler grpc.StreamHandler, + ) error { + ctx := ss.Context() + + requestMetadata, _ := metadata.FromIncomingContext(ctx) + metadataCopy := requestMetadata.Copy() + + bags, spanCtx := Extract(ctx, &metadataCopy, opts...) + ctx = baggage.ContextWithBaggage(ctx, bags) + + tracer := newConfig(opts).TracerProvider.Tracer( + instrumentationName, + trace.WithInstrumentationVersion(SemVersion()), + ) + + name, attr := spanInfo(info.FullMethod, peerFromCtx(ctx)) + ctx, span := tracer.Start( + trace.ContextWithRemoteSpanContext(ctx, spanCtx), + name, + trace.WithSpanKind(trace.SpanKindServer), + trace.WithAttributes(attr...), + ) + defer span.End() + + err := handler(srv, wrapServerStream(ctx, ss)) + + if err != nil { + s, _ := status.FromError(err) + span.SetStatus(codes.Error, s.Message()) + span.SetAttributes(statusCodeAttr(s.Code())) + } else { + span.SetAttributes(statusCodeAttr(grpc_codes.OK)) + } + + return err + } +} + +// spanInfo returns a span name and all appropriate attributes from the gRPC +// method and peer address. +func spanInfo(fullMethod, peerAddress string) (string, []attribute.KeyValue) { + attrs := []attribute.KeyValue{RPCSystemGRPC} + name, mAttrs := internal.ParseFullMethod(fullMethod) + attrs = append(attrs, mAttrs...) + attrs = append(attrs, peerAttr(peerAddress)...) + return name, attrs +} + +// peerAttr returns attributes about the peer address. +func peerAttr(addr string) []attribute.KeyValue { + host, port, err := net.SplitHostPort(addr) + if err != nil { + return []attribute.KeyValue(nil) + } + + if host == "" { + host = "127.0.0.1" + } + + return []attribute.KeyValue{ + semconv.NetPeerIPKey.String(host), + semconv.NetPeerPortKey.String(port), + } +} + +// peerFromCtx returns a peer address from a context, if one exists. +func peerFromCtx(ctx context.Context) string { + p, ok := peer.FromContext(ctx) + if !ok { + return "" + } + return p.Addr.String() +} + +// statusCodeAttr returns status code attribute based on given gRPC code +func statusCodeAttr(c grpc_codes.Code) attribute.KeyValue { + return GRPCStatusCodeKey.Int64(int64(c)) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go new file mode 100644 index 0000000000..56a682102b --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal/parse.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal + +import ( + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +// ParseFullMethod returns a span name following the OpenTelemetry semantic +// conventions as well as all applicable span attribute.KeyValue attributes based +// on a gRPC's FullMethod. +func ParseFullMethod(fullMethod string) (string, []attribute.KeyValue) { + name := strings.TrimLeft(fullMethod, "/") + parts := strings.SplitN(name, "/", 2) + if len(parts) != 2 { + // Invalid format, does not follow `/package.service/method`. + return name, []attribute.KeyValue(nil) + } + + var attrs []attribute.KeyValue + if service := parts[0]; service != "" { + attrs = append(attrs, semconv.RPCServiceKey.String(service)) + } + if method := parts[1]; method != "" { + attrs = append(attrs, semconv.RPCMethodKey.String(method)) + } + return name, attrs +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go new file mode 100644 index 0000000000..73f14a458e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/semconv.go @@ -0,0 +1,52 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc + +import ( + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +// Semantic conventions for attribute keys for gRPC. +const ( + // Name of message transmitted or received. + RPCNameKey = attribute.Key("name") + + // Type of message transmitted or received. + RPCMessageTypeKey = attribute.Key("message.type") + + // Identifier of message transmitted or received. + RPCMessageIDKey = attribute.Key("message.id") + + // The compressed size of the message transmitted or received in bytes. + RPCMessageCompressedSizeKey = attribute.Key("message.compressed_size") + + // The uncompressed size of the message transmitted or received in + // bytes. + RPCMessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +// Semantic conventions for common RPC attributes. +var ( + // Semantic convention for gRPC as the remoting system. + RPCSystemGRPC = semconv.RPCSystemKey.String("grpc") + + // Semantic convention for a message named message. + RPCNameMessage = RPCNameKey.String("message") + + // Semantic conventions for RPC message types. + RPCMessageTypeSent = RPCMessageTypeKey.String("SENT") + RPCMessageTypeReceived = RPCMessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go new file mode 100644 index 0000000000..f7c0e09079 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelgrpc + +// Version is the current release version of the gRPC instrumentation. +func Version() string { + return "0.29.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/api.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/api.go new file mode 100644 index 0000000000..491061583e --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/api.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttptrace + +import ( + "context" + "net/http" + "net/http/httptrace" +) + +// W3C client. +func W3C(ctx context.Context, req *http.Request) (context.Context, *http.Request) { + ctx = httptrace.WithClientTrace(ctx, NewClientTrace(ctx)) + req = req.WithContext(ctx) + return ctx, req +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go new file mode 100644 index 0000000000..a9c20429e0 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/clienttrace.go @@ -0,0 +1,408 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttptrace + +import ( + "context" + "crypto/tls" + "net/http/httptrace" + "net/textproto" + "strings" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" +) + +// HTTP attributes. +var ( + HTTPStatus = attribute.Key("http.status") + HTTPHeaderMIME = attribute.Key("http.mime") + HTTPRemoteAddr = attribute.Key("http.remote") + HTTPLocalAddr = attribute.Key("http.local") + HTTPConnectionReused = attribute.Key("http.conn.reused") + HTTPConnectionWasIdle = attribute.Key("http.conn.wasidle") + HTTPConnectionIdleTime = attribute.Key("http.conn.idletime") + HTTPConnectionStartNetwork = attribute.Key("http.conn.start.network") + HTTPConnectionDoneNetwork = attribute.Key("http.conn.done.network") + HTTPConnectionDoneAddr = attribute.Key("http.conn.done.addr") + HTTPDNSAddrs = attribute.Key("http.dns.addrs") +) + +var ( + hookMap = map[string]string{ + "http.dns": "http.getconn", + "http.connect": "http.getconn", + "http.tls": "http.getconn", + } +) + +func parentHook(hook string) string { + if strings.HasPrefix(hook, "http.connect") { + return hookMap["http.connect"] + } + return hookMap[hook] +} + +// ClientTraceOption allows customizations to how the httptrace.Client +// collects information. +type ClientTraceOption interface { + apply(*clientTracer) +} + +type clientTraceOptionFunc func(*clientTracer) + +func (fn clientTraceOptionFunc) apply(c *clientTracer) { + fn(c) +} + +// WithoutSubSpans will modify the httptrace.ClientTrace to only collect data +// as Events and Attributes on a span found in the context. By default +// sub-spans will be generated. +func WithoutSubSpans() ClientTraceOption { + return clientTraceOptionFunc(func(ct *clientTracer) { + ct.useSpans = false + }) +} + +// WithRedactedHeaders will be replaced by fixed '****' values for the header +// names provided. These are in addition to the sensitive headers already +// redacted by default: Authorization, WWW-Authenticate, Proxy-Authenticate +// Proxy-Authorization, Cookie, Set-Cookie +func WithRedactedHeaders(headers ...string) ClientTraceOption { + return clientTraceOptionFunc(func(ct *clientTracer) { + for _, header := range headers { + ct.redactedHeaders[strings.ToLower(header)] = struct{}{} + } + }) +} + +// WithoutHeaders will disable adding span attributes for the http headers +// and values. +func WithoutHeaders() ClientTraceOption { + return clientTraceOptionFunc(func(ct *clientTracer) { + ct.addHeaders = false + }) +} + +// WithInsecureHeaders will add span attributes for all http headers *INCLUDING* +// the sensitive headers that are redacted by default. The attribute values +// will include the raw un-redacted text. This might be useful for +// debugging authentication related issues, but should not be used for +// production deployments. +func WithInsecureHeaders() ClientTraceOption { + return clientTraceOptionFunc(func(ct *clientTracer) { + ct.addHeaders = true + ct.redactedHeaders = nil + }) +} + +// WithTracerProvider specifies a tracer provider for creating a tracer. +// The global provider is used if none is specified. +func WithTracerProvider(provider trace.TracerProvider) ClientTraceOption { + return clientTraceOptionFunc(func(ct *clientTracer) { + if provider != nil { + ct.tracerProvider = provider + } + }) +} + +type clientTracer struct { + context.Context + + tracerProvider trace.TracerProvider + + tr trace.Tracer + + activeHooks map[string]context.Context + root trace.Span + mtx sync.Mutex + redactedHeaders map[string]struct{} + addHeaders bool + useSpans bool +} + +// NewClientTrace returns an httptrace.ClientTrace implementation that will +// record OpenTelemetry spans for requests made by an http.Client. By default +// several spans will be added to the trace for various stages of a request +// (dns, connection, tls, etc). Also by default, all HTTP headers will be +// added as attributes to spans, although several headers will be automatically +// redacted: Authorization, WWW-Authenticate, Proxy-Authenticate, +// Proxy-Authorization, Cookie, and Set-Cookie. +func NewClientTrace(ctx context.Context, opts ...ClientTraceOption) *httptrace.ClientTrace { + ct := &clientTracer{ + Context: ctx, + activeHooks: make(map[string]context.Context), + redactedHeaders: map[string]struct{}{ + "authorization": {}, + "www-authenticate": {}, + "proxy-authenticate": {}, + "proxy-authorization": {}, + "cookie": {}, + "set-cookie": {}, + }, + addHeaders: true, + useSpans: true, + } + + if span := trace.SpanFromContext(ctx); span.SpanContext().IsValid() { + ct.tracerProvider = span.TracerProvider() + } else { + ct.tracerProvider = otel.GetTracerProvider() + } + + for _, opt := range opts { + opt.apply(ct) + } + + ct.tr = ct.tracerProvider.Tracer( + "go.opentelemetry.io/otel/instrumentation/httptrace", + trace.WithInstrumentationVersion(SemVersion()), + ) + + return &httptrace.ClientTrace{ + GetConn: ct.getConn, + GotConn: ct.gotConn, + PutIdleConn: ct.putIdleConn, + GotFirstResponseByte: ct.gotFirstResponseByte, + Got100Continue: ct.got100Continue, + Got1xxResponse: ct.got1xxResponse, + DNSStart: ct.dnsStart, + DNSDone: ct.dnsDone, + ConnectStart: ct.connectStart, + ConnectDone: ct.connectDone, + TLSHandshakeStart: ct.tlsHandshakeStart, + TLSHandshakeDone: ct.tlsHandshakeDone, + WroteHeaderField: ct.wroteHeaderField, + WroteHeaders: ct.wroteHeaders, + Wait100Continue: ct.wait100Continue, + WroteRequest: ct.wroteRequest, + } +} + +func (ct *clientTracer) start(hook, spanName string, attrs ...attribute.KeyValue) { + if !ct.useSpans { + if ct.root == nil { + ct.root = trace.SpanFromContext(ct.Context) + } + ct.root.AddEvent(hook+".start", trace.WithAttributes(attrs...)) + return + } + + ct.mtx.Lock() + defer ct.mtx.Unlock() + + if hookCtx, found := ct.activeHooks[hook]; !found { + var sp trace.Span + ct.activeHooks[hook], sp = ct.tr.Start(ct.getParentContext(hook), spanName, trace.WithAttributes(attrs...), trace.WithSpanKind(trace.SpanKindClient)) + if ct.root == nil { + ct.root = sp + } + } else { + // end was called before start finished, add the start attributes and end the span here + span := trace.SpanFromContext(hookCtx) + span.SetAttributes(attrs...) + span.End() + + delete(ct.activeHooks, hook) + } +} + +func (ct *clientTracer) end(hook string, err error, attrs ...attribute.KeyValue) { + if !ct.useSpans { + if err != nil { + attrs = append(attrs, attribute.String(hook+".error", err.Error())) + } + ct.root.AddEvent(hook+".done", trace.WithAttributes(attrs...)) + return + } + + ct.mtx.Lock() + defer ct.mtx.Unlock() + if ctx, ok := ct.activeHooks[hook]; ok { + span := trace.SpanFromContext(ctx) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + span.SetAttributes(attrs...) + span.End() + delete(ct.activeHooks, hook) + } else { + // start is not finished before end is called. + // Start a span here with the ending attributes that will be finished when start finishes. + // Yes, it's backwards. v0v + ctx, span := ct.tr.Start(ct.getParentContext(hook), hook, trace.WithAttributes(attrs...), trace.WithSpanKind(trace.SpanKindClient)) + if err != nil { + span.SetStatus(codes.Error, err.Error()) + } + ct.activeHooks[hook] = ctx + } +} + +func (ct *clientTracer) getParentContext(hook string) context.Context { + ctx, ok := ct.activeHooks[parentHook(hook)] + if !ok { + return ct.Context + } + return ctx +} + +func (ct *clientTracer) span(hook string) trace.Span { + ct.mtx.Lock() + defer ct.mtx.Unlock() + if ctx, ok := ct.activeHooks[hook]; ok { + return trace.SpanFromContext(ctx) + } + return nil +} + +func (ct *clientTracer) getConn(host string) { + ct.start("http.getconn", "http.getconn", semconv.HTTPHostKey.String(host)) +} + +func (ct *clientTracer) gotConn(info httptrace.GotConnInfo) { + attrs := []attribute.KeyValue{ + HTTPRemoteAddr.String(info.Conn.RemoteAddr().String()), + HTTPLocalAddr.String(info.Conn.LocalAddr().String()), + HTTPConnectionReused.Bool(info.Reused), + HTTPConnectionWasIdle.Bool(info.WasIdle), + } + if info.WasIdle { + attrs = append(attrs, HTTPConnectionIdleTime.String(info.IdleTime.String())) + } + ct.end("http.getconn", nil, attrs...) +} + +func (ct *clientTracer) putIdleConn(err error) { + ct.end("http.receive", err) +} + +func (ct *clientTracer) gotFirstResponseByte() { + ct.start("http.receive", "http.receive") +} + +func (ct *clientTracer) dnsStart(info httptrace.DNSStartInfo) { + ct.start("http.dns", "http.dns", semconv.HTTPHostKey.String(info.Host)) +} + +func (ct *clientTracer) dnsDone(info httptrace.DNSDoneInfo) { + var addrs []string + for _, netAddr := range info.Addrs { + addrs = append(addrs, netAddr.String()) + } + ct.end("http.dns", info.Err, HTTPDNSAddrs.String(sliceToString(addrs))) +} + +func (ct *clientTracer) connectStart(network, addr string) { + ct.start("http.connect."+addr, "http.connect", + HTTPRemoteAddr.String(addr), + HTTPConnectionStartNetwork.String(network), + ) +} + +func (ct *clientTracer) connectDone(network, addr string, err error) { + ct.end("http.connect."+addr, err, + HTTPConnectionDoneAddr.String(addr), + HTTPConnectionDoneNetwork.String(network), + ) +} + +func (ct *clientTracer) tlsHandshakeStart() { + ct.start("http.tls", "http.tls") +} + +func (ct *clientTracer) tlsHandshakeDone(_ tls.ConnectionState, err error) { + ct.end("http.tls", err) +} + +func (ct *clientTracer) wroteHeaderField(k string, v []string) { + if ct.useSpans && ct.span("http.headers") == nil { + ct.start("http.headers", "http.headers") + } + if !ct.addHeaders { + return + } + k = strings.ToLower(k) + value := sliceToString(v) + if _, ok := ct.redactedHeaders[k]; ok { + value = "****" + } + ct.root.SetAttributes(attribute.String("http."+k, value)) +} + +func (ct *clientTracer) wroteHeaders() { + if ct.useSpans && ct.span("http.headers") != nil { + ct.end("http.headers", nil) + } + ct.start("http.send", "http.send") +} + +func (ct *clientTracer) wroteRequest(info httptrace.WroteRequestInfo) { + if info.Err != nil { + ct.root.SetStatus(codes.Error, info.Err.Error()) + } + ct.end("http.send", info.Err) +} + +func (ct *clientTracer) got100Continue() { + span := ct.root + if ct.useSpans { + span = ct.span("http.receive") + } + span.AddEvent("GOT 100 - Continue") +} + +func (ct *clientTracer) wait100Continue() { + span := ct.root + if ct.useSpans { + span = ct.span("http.receive") + } + span.AddEvent("GOT 100 - Wait") +} + +func (ct *clientTracer) got1xxResponse(code int, header textproto.MIMEHeader) error { + span := ct.root + if ct.useSpans { + span = ct.span("http.receive") + } + span.AddEvent("GOT 1xx", trace.WithAttributes( + HTTPStatus.Int(code), + HTTPHeaderMIME.String(sm2s(header)), + )) + return nil +} + +func sliceToString(value []string) string { + if len(value) == 0 { + return "undefined" + } + return strings.Join(value, ",") +} + +func sm2s(value map[string][]string) string { + var buf strings.Builder + for k, v := range value { + if buf.Len() != 0 { + buf.WriteString(",") + } + buf.WriteString(k) + buf.WriteString("=") + buf.WriteString(sliceToString(v)) + } + return buf.String() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go new file mode 100644 index 0000000000..6fca43a3ba --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/httptrace.go @@ -0,0 +1,78 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttptrace + +import ( + "context" + "net/http" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/baggage" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" +) + +// Option allows configuration of the httptrace Extract() +// and Inject() functions. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +type config struct { + propagators propagation.TextMapPropagator +} + +func newConfig(opts []Option) *config { + c := &config{propagators: otel.GetTextMapPropagator()} + for _, o := range opts { + o.apply(c) + } + return c +} + +// WithPropagators sets the propagators to use for Extraction and Injection +func WithPropagators(props propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if props != nil { + c.propagators = props + } + }) +} + +// Extract returns the Attributes, Context Entries, and SpanContext that were encoded by Inject. +func Extract(ctx context.Context, req *http.Request, opts ...Option) ([]attribute.KeyValue, baggage.Baggage, trace.SpanContext) { + c := newConfig(opts) + ctx = c.propagators.Extract(ctx, propagation.HeaderCarrier(req.Header)) + + attrs := append( + semconv.HTTPServerAttributesFromHTTPRequest("", "", req), + semconv.NetAttributesFromHTTPRequest("tcp", req)..., + ) + + return attrs, baggage.FromContext(ctx), trace.SpanContextFromContext(ctx) +} + +func Inject(ctx context.Context, req *http.Request, opts ...Option) { + c := newConfig(opts) + c.propagators.Inject(ctx, propagation.HeaderCarrier(req.Header)) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go new file mode 100644 index 0000000000..3641fbb6cc --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttptrace + +// Version is the current release version of the httptrace instrumentation. +func Version() string { + return "0.29.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go new file mode 100644 index 0000000000..0816b9f5da --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/client.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +import ( + "context" + "io" + "net/http" + "net/url" + "strings" +) + +// DefaultClient is the default Client and is used by Get, Head, Post and PostForm. +// Please be careful of intitialization order - for example, if you change +// the global propagator, the DefaultClient might still be using the old one +var DefaultClient = &http.Client{Transport: NewTransport(http.DefaultTransport)} + +// Get is a convenient replacement for http.Get that adds a span around the request. +func Get(ctx context.Context, url string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "GET", url, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Head is a convenient replacement for http.Head that adds a span around the request. +func Head(ctx context.Context, url string) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "HEAD", url, nil) + if err != nil { + return nil, err + } + return DefaultClient.Do(req) +} + +// Post is a convenient replacement for http.Post that adds a span around the request. +func Post(ctx context.Context, url, contentType string, body io.Reader) (resp *http.Response, err error) { + req, err := http.NewRequestWithContext(ctx, "POST", url, body) + if err != nil { + return nil, err + } + req.Header.Set("Content-Type", contentType) + return DefaultClient.Do(req) +} + +// PostForm is a convenient replacement for http.PostForm that adds a span around the request. +func PostForm(ctx context.Context, url string, data url.Values) (resp *http.Response, err error) { + return Post(ctx, url, "application/x-www-form-urlencoded", strings.NewReader(data.Encode())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go new file mode 100644 index 0000000000..dcc59449e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/common.go @@ -0,0 +1,46 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +import ( + "net/http" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Attribute keys that can be added to a span. +const ( + ReadBytesKey = attribute.Key("http.read_bytes") // if anything was read from the request body, the total number of bytes read + ReadErrorKey = attribute.Key("http.read_error") // If an error occurred while reading a request, the string of the error (io.EOF is not recorded) + WroteBytesKey = attribute.Key("http.wrote_bytes") // if anything was written to the response writer, the total number of bytes written + WriteErrorKey = attribute.Key("http.write_error") // if an error occurred while writing a reply, the string of the error (io.EOF is not recorded) +) + +// Server HTTP metrics +const ( + RequestCount = "http.server.request_count" // Incoming request count total + RequestContentLength = "http.server.request_content_length" // Incoming request bytes total + ResponseContentLength = "http.server.response_content_length" // Incoming response bytes total + ServerLatency = "http.server.duration" // Incoming end to end duration, microseconds +) + +// Filter is a predicate used to determine whether a given http.request should +// be traced. A Filter must return true if the request should be traced. +type Filter func(*http.Request) bool + +func newTracer(tp trace.TracerProvider) trace.Tracer { + return tp.Tracer(instrumentationName, trace.WithInstrumentationVersion(SemVersion())) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go new file mode 100644 index 0000000000..0c18c11104 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/config.go @@ -0,0 +1,187 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +import ( + "context" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/global" + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +const ( + instrumentationName = "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" +) + +// config represents the configuration options available for the http.Handler +// and http.Transport types. +type config struct { + Tracer trace.Tracer + Meter metric.Meter + Propagators propagation.TextMapPropagator + SpanStartOptions []trace.SpanStartOption + ReadEvent bool + WriteEvent bool + Filters []Filter + SpanNameFormatter func(string, *http.Request) string + ClientTrace func(context.Context) *httptrace.ClientTrace + + TracerProvider trace.TracerProvider + MeterProvider metric.MeterProvider +} + +// Option interface used for setting optional config properties. +type Option interface { + apply(*config) +} + +type optionFunc func(*config) + +func (o optionFunc) apply(c *config) { + o(c) +} + +// newConfig creates a new config struct and applies opts to it. +func newConfig(opts ...Option) *config { + c := &config{ + Propagators: otel.GetTextMapPropagator(), + MeterProvider: global.GetMeterProvider(), + } + for _, opt := range opts { + opt.apply(c) + } + + // Tracer is only initialized if manually specified. Otherwise, can be passed with the tracing context. + if c.TracerProvider != nil { + c.Tracer = newTracer(c.TracerProvider) + } + + c.Meter = c.MeterProvider.Meter( + instrumentationName, + metric.WithInstrumentationVersion(SemVersion()), + ) + + return c +} + +// WithTracerProvider specifies a tracer provider to use for creating a tracer. +// If none is specified, the global provider is used. +func WithTracerProvider(provider trace.TracerProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.TracerProvider = provider + } + }) +} + +// WithMeterProvider specifies a meter provider to use for creating a meter. +// If none is specified, the global provider is used. +func WithMeterProvider(provider metric.MeterProvider) Option { + return optionFunc(func(cfg *config) { + if provider != nil { + cfg.MeterProvider = provider + } + }) +} + +// WithPublicEndpoint configures the Handler to link the span with an incoming +// span context. If this option is not provided, then the association is a child +// association instead of a link. +func WithPublicEndpoint() Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, trace.WithNewRoot()) + }) +} + +// WithPropagators configures specific propagators. If this +// option isn't specified, then the global TextMapPropagator is used. +func WithPropagators(ps propagation.TextMapPropagator) Option { + return optionFunc(func(c *config) { + if ps != nil { + c.Propagators = ps + } + }) +} + +// WithSpanOptions configures an additional set of +// trace.SpanOptions, which are applied to each new span. +func WithSpanOptions(opts ...trace.SpanStartOption) Option { + return optionFunc(func(c *config) { + c.SpanStartOptions = append(c.SpanStartOptions, opts...) + }) +} + +// WithFilter adds a filter to the list of filters used by the handler. +// If any filter indicates to exclude a request then the request will not be +// traced. All filters must allow a request to be traced for a Span to be created. +// If no filters are provided then all requests are traced. +// Filters will be invoked for each processed request, it is advised to make them +// simple and fast. +func WithFilter(f Filter) Option { + return optionFunc(func(c *config) { + c.Filters = append(c.Filters, f) + }) +} + +type event int + +// Different types of events that can be recorded, see WithMessageEvents +const ( + ReadEvents event = iota + WriteEvents +) + +// WithMessageEvents configures the Handler to record the specified events +// (span.AddEvent) on spans. By default only summary attributes are added at the +// end of the request. +// +// Valid events are: +// * ReadEvents: Record the number of bytes read after every http.Request.Body.Read +// using the ReadBytesKey +// * WriteEvents: Record the number of bytes written after every http.ResponeWriter.Write +// using the WriteBytesKey +func WithMessageEvents(events ...event) Option { + return optionFunc(func(c *config) { + for _, e := range events { + switch e { + case ReadEvents: + c.ReadEvent = true + case WriteEvents: + c.WriteEvent = true + } + } + }) +} + +// WithSpanNameFormatter takes a function that will be called on every +// request and the returned string will become the Span Name +func WithSpanNameFormatter(f func(operation string, r *http.Request) string) Option { + return optionFunc(func(c *config) { + c.SpanNameFormatter = f + }) +} + +// WithClientTrace takes a function that returns client trace instance that will be +// applied to the requests sent through the otelhttp Transport. +func WithClientTrace(f func(context.Context) *httptrace.ClientTrace) Option { + return optionFunc(func(c *config) { + c.ClientTrace = f + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go new file mode 100644 index 0000000000..38c7f01c71 --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/doc.go @@ -0,0 +1,18 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package otelhttp provides an http.Handler and functions that are intended +// to be used to add tracing by wrapping existing handlers (with Handler) and +// routes WithRouteTag. +package otelhttp // import "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go new file mode 100644 index 0000000000..dbba925dfa --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/handler.go @@ -0,0 +1,236 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +import ( + "io" + "net/http" + "time" + + "github.com/felixge/httpsnoop" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" +) + +var _ http.Handler = &Handler{} + +// Handler is http middleware that corresponds to the http.Handler interface and +// is designed to wrap a http.Mux (or equivalent), while individual routes on +// the mux are wrapped with WithRouteTag. A Handler will add various attributes +// to the span using the attribute.Keys defined in this package. +type Handler struct { + operation string + handler http.Handler + + tracer trace.Tracer + meter metric.Meter + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + readEvent bool + writeEvent bool + filters []Filter + spanNameFormatter func(string, *http.Request) string + counters map[string]metric.Int64Counter + valueRecorders map[string]metric.Float64Histogram +} + +func defaultHandlerFormatter(operation string, _ *http.Request) string { + return operation +} + +// NewHandler wraps the passed handler, functioning like middleware, in a span +// named after the operation and with any provided Options. +func NewHandler(handler http.Handler, operation string, opts ...Option) http.Handler { + h := Handler{ + handler: handler, + operation: operation, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindServer)), + WithSpanNameFormatter(defaultHandlerFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + h.configure(c) + h.createMeasures() + + return &h +} + +func (h *Handler) configure(c *config) { + h.tracer = c.Tracer + h.meter = c.Meter + h.propagators = c.Propagators + h.spanStartOptions = c.SpanStartOptions + h.readEvent = c.ReadEvent + h.writeEvent = c.WriteEvent + h.filters = c.Filters + h.spanNameFormatter = c.SpanNameFormatter +} + +func handleErr(err error) { + if err != nil { + otel.Handle(err) + } +} + +func (h *Handler) createMeasures() { + h.counters = make(map[string]metric.Int64Counter) + h.valueRecorders = make(map[string]metric.Float64Histogram) + + requestBytesCounter, err := h.meter.NewInt64Counter(RequestContentLength) + handleErr(err) + + responseBytesCounter, err := h.meter.NewInt64Counter(ResponseContentLength) + handleErr(err) + + serverLatencyMeasure, err := h.meter.NewFloat64Histogram(ServerLatency) + handleErr(err) + + h.counters[RequestContentLength] = requestBytesCounter + h.counters[ResponseContentLength] = responseBytesCounter + h.valueRecorders[ServerLatency] = serverLatencyMeasure +} + +// ServeHTTP serves HTTP requests (http.Handler) +func (h *Handler) ServeHTTP(w http.ResponseWriter, r *http.Request) { + requestStartTime := time.Now() + for _, f := range h.filters { + if !f(r) { + // Simply pass through to the handler if a filter rejects the request + h.handler.ServeHTTP(w, r) + return + } + } + + opts := append([]trace.SpanStartOption{ + trace.WithAttributes(semconv.NetAttributesFromHTTPRequest("tcp", r)...), + trace.WithAttributes(semconv.EndUserAttributesFromHTTPRequest(r)...), + trace.WithAttributes(semconv.HTTPServerAttributesFromHTTPRequest(h.operation, "", r)...), + }, h.spanStartOptions...) // start with the configured options + + tracer := h.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + ctx := h.propagators.Extract(r.Context(), propagation.HeaderCarrier(r.Header)) + ctx, span := tracer.Start(ctx, h.spanNameFormatter(h.operation, r), opts...) + defer span.End() + + readRecordFunc := func(int64) {} + if h.readEvent { + readRecordFunc = func(n int64) { + span.AddEvent("read", trace.WithAttributes(ReadBytesKey.Int64(n))) + } + } + + var bw bodyWrapper + // if request body is nil we don't want to mutate the body as it will affect + // the identity of it in a unforeseeable way because we assert ReadCloser + // fullfills a certain interface and it is indeed nil. + if r.Body != nil { + bw.ReadCloser = r.Body + bw.record = readRecordFunc + r.Body = &bw + } + + writeRecordFunc := func(int64) {} + if h.writeEvent { + writeRecordFunc = func(n int64) { + span.AddEvent("write", trace.WithAttributes(WroteBytesKey.Int64(n))) + } + } + + rww := &respWriterWrapper{ResponseWriter: w, record: writeRecordFunc, ctx: ctx, props: h.propagators} + + // Wrap w to use our ResponseWriter methods while also exposing + // other interfaces that w may implement (http.CloseNotifier, + // http.Flusher, http.Hijacker, http.Pusher, io.ReaderFrom). + + w = httpsnoop.Wrap(w, httpsnoop.Hooks{ + Header: func(httpsnoop.HeaderFunc) httpsnoop.HeaderFunc { + return rww.Header + }, + Write: func(httpsnoop.WriteFunc) httpsnoop.WriteFunc { + return rww.Write + }, + WriteHeader: func(httpsnoop.WriteHeaderFunc) httpsnoop.WriteHeaderFunc { + return rww.WriteHeader + }, + }) + + labeler := &Labeler{} + ctx = injectLabeler(ctx, labeler) + + h.handler.ServeHTTP(w, r.WithContext(ctx)) + + setAfterServeAttributes(span, bw.read, rww.written, rww.statusCode, bw.err, rww.err) + + // Add metrics + attributes := append(labeler.Get(), semconv.HTTPServerMetricAttributesFromHTTPRequest(h.operation, r)...) + h.counters[RequestContentLength].Add(ctx, bw.read, attributes...) + h.counters[ResponseContentLength].Add(ctx, rww.written, attributes...) + + // Use floating point division here for higher precision (instead of Millisecond method). + elapsedTime := float64(time.Since(requestStartTime)) / float64(time.Millisecond) + + h.valueRecorders[ServerLatency].Record(ctx, elapsedTime, attributes...) +} + +func setAfterServeAttributes(span trace.Span, read, wrote int64, statusCode int, rerr, werr error) { + attributes := []attribute.KeyValue{} + + // TODO: Consider adding an event after each read and write, possibly as an + // option (defaulting to off), so as to not create needlessly verbose spans. + if read > 0 { + attributes = append(attributes, ReadBytesKey.Int64(read)) + } + if rerr != nil && rerr != io.EOF { + attributes = append(attributes, ReadErrorKey.String(rerr.Error())) + } + if wrote > 0 { + attributes = append(attributes, WroteBytesKey.Int64(wrote)) + } + if statusCode > 0 { + attributes = append(attributes, semconv.HTTPAttributesFromHTTPStatusCode(statusCode)...) + span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(statusCode)) + } + if werr != nil && werr != io.EOF { + attributes = append(attributes, WriteErrorKey.String(werr.Error())) + } + span.SetAttributes(attributes...) +} + +// WithRouteTag annotates a span with the provided route name using the +// RouteKey Tag. +func WithRouteTag(route string, h http.Handler) http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + span := trace.SpanFromContext(r.Context()) + span.SetAttributes(semconv.HTTPRouteKey.String(route)) + h.ServeHTTP(w, r) + }) +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go new file mode 100644 index 0000000000..7b7d1c0ace --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/labeler.go @@ -0,0 +1,65 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/attribute" +) + +// Labeler is used to allow instrumented HTTP handlers to add custom attributes to +// the metrics recorded by the net/http instrumentation. +type Labeler struct { + mu sync.Mutex + attributes []attribute.KeyValue +} + +// Add attributes to a Labeler. +func (l *Labeler) Add(ls ...attribute.KeyValue) { + l.mu.Lock() + defer l.mu.Unlock() + l.attributes = append(l.attributes, ls...) +} + +// Get returns a copy of the attributes added to the Labeler. +func (l *Labeler) Get() []attribute.KeyValue { + l.mu.Lock() + defer l.mu.Unlock() + ret := make([]attribute.KeyValue, len(l.attributes)) + copy(ret, l.attributes) + return ret +} + +type labelerContextKeyType int + +const lablelerContextKey labelerContextKeyType = 0 + +func injectLabeler(ctx context.Context, l *Labeler) context.Context { + return context.WithValue(ctx, lablelerContextKey, l) +} + +// LabelerFromContext retrieves a Labeler instance from the provided context if +// one is available. If no Labeler was found in the provided context a new, empty +// Labeler is returned and the second return value is false. In this case it is +// safe to use the Labeler but any attributes added to it will not be used. +func LabelerFromContext(ctx context.Context) (*Labeler, bool) { + l, ok := ctx.Value(lablelerContextKey).(*Labeler) + if !ok { + l = &Labeler{} + } + return l, ok +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go new file mode 100644 index 0000000000..121ad99b0a --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/transport.go @@ -0,0 +1,190 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +import ( + "context" + "io" + "net/http" + "net/http/httptrace" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/propagation" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" +) + +// Transport implements the http.RoundTripper interface and wraps +// outbound HTTP(S) requests with a span. +type Transport struct { + rt http.RoundTripper + + tracer trace.Tracer + propagators propagation.TextMapPropagator + spanStartOptions []trace.SpanStartOption + filters []Filter + spanNameFormatter func(string, *http.Request) string + clientTrace func(context.Context) *httptrace.ClientTrace +} + +var _ http.RoundTripper = &Transport{} + +// NewTransport wraps the provided http.RoundTripper with one that +// starts a span and injects the span context into the outbound request headers. +// +// If the provided http.RoundTripper is nil, http.DefaultTransport will be used +// as the base http.RoundTripper +func NewTransport(base http.RoundTripper, opts ...Option) *Transport { + if base == nil { + base = http.DefaultTransport + } + + t := Transport{ + rt: base, + } + + defaultOpts := []Option{ + WithSpanOptions(trace.WithSpanKind(trace.SpanKindClient)), + WithSpanNameFormatter(defaultTransportFormatter), + } + + c := newConfig(append(defaultOpts, opts...)...) + t.applyConfig(c) + + return &t +} + +func (t *Transport) applyConfig(c *config) { + t.tracer = c.Tracer + t.propagators = c.Propagators + t.spanStartOptions = c.SpanStartOptions + t.filters = c.Filters + t.spanNameFormatter = c.SpanNameFormatter + t.clientTrace = c.ClientTrace +} + +func defaultTransportFormatter(_ string, r *http.Request) string { + return "HTTP " + r.Method +} + +// RoundTrip creates a Span and propagates its context via the provided request's headers +// before handing the request to the configured base RoundTripper. The created span will +// end when the response body is closed or when a read from the body returns io.EOF. +func (t *Transport) RoundTrip(r *http.Request) (*http.Response, error) { + for _, f := range t.filters { + if !f(r) { + // Simply pass through to the base RoundTripper if a filter rejects the request + return t.rt.RoundTrip(r) + } + } + + tracer := t.tracer + + if tracer == nil { + if span := trace.SpanFromContext(r.Context()); span.SpanContext().IsValid() { + tracer = newTracer(span.TracerProvider()) + } else { + tracer = newTracer(otel.GetTracerProvider()) + } + } + + opts := append([]trace.SpanStartOption{}, t.spanStartOptions...) // start with the configured options + + ctx, span := tracer.Start(r.Context(), t.spanNameFormatter("", r), opts...) + + if t.clientTrace != nil { + ctx = httptrace.WithClientTrace(ctx, t.clientTrace(ctx)) + } + + r = r.WithContext(ctx) + span.SetAttributes(semconv.HTTPClientAttributesFromHTTPRequest(r)...) + t.propagators.Inject(ctx, propagation.HeaderCarrier(r.Header)) + + res, err := t.rt.RoundTrip(r) + if err != nil { + span.RecordError(err) + span.SetStatus(codes.Error, err.Error()) + span.End() + return res, err + } + + span.SetAttributes(semconv.HTTPAttributesFromHTTPStatusCode(res.StatusCode)...) + span.SetStatus(semconv.SpanStatusFromHTTPStatusCode(res.StatusCode)) + res.Body = newWrappedBody(span, res.Body) + + return res, err +} + +// newWrappedBody returns a new and appropriately scoped *wrappedBody as an +// io.ReadCloser. If the passed body implements io.Writer, the returned value +// will implement io.ReadWriteCloser. +func newWrappedBody(span trace.Span, body io.ReadCloser) io.ReadCloser { + // The successful protocol switch responses will have a body that + // implement an io.ReadWriteCloser. Ensure this interface type continues + // to be satisfied if that is the case. + if _, ok := body.(io.ReadWriteCloser); ok { + return &wrappedBody{span: span, body: body} + } + + // Remove the implementation of the io.ReadWriteCloser and only implement + // the io.ReadCloser. + return struct{ io.ReadCloser }{&wrappedBody{span: span, body: body}} +} + +// wrappedBody is the response body type returned by the transport +// instrumentation to complete a span. Errors encountered when using the +// response body are recorded in span tracking the response. +// +// The span tracking the response is ended when this body is closed. +// +// If the response body implements the io.Writer interface (i.e. for +// successful protocol switches), the wrapped body also will. +type wrappedBody struct { + span trace.Span + body io.ReadCloser +} + +var _ io.ReadWriteCloser = &wrappedBody{} + +func (wb *wrappedBody) Write(p []byte) (int, error) { + // This will not panic given the guard in newWrappedBody. + n, err := wb.body.(io.Writer).Write(p) + if err != nil { + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Read(b []byte) (int, error) { + n, err := wb.body.Read(b) + + switch err { + case nil: + // nothing to do here but fall through to the return + case io.EOF: + wb.span.End() + default: + wb.span.RecordError(err) + wb.span.SetStatus(codes.Error, err.Error()) + } + return n, err +} + +func (wb *wrappedBody) Close() error { + wb.span.End() + return wb.body.Close() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go new file mode 100644 index 0000000000..63402819ca --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/version.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +// Version is the current release version of the otelhttp instrumentation. +func Version() string { + return "0.29.0" + // This string is updated by the pre_release.sh script during release +} + +// SemVersion is the semantic version to be supplied to tracer/meter creation. +func SemVersion() string { + return "semver:" + Version() +} diff --git a/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go new file mode 100644 index 0000000000..ec787c820a --- /dev/null +++ b/vendor/go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp/wrap.go @@ -0,0 +1,96 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otelhttp + +import ( + "context" + "io" + "net/http" + + "go.opentelemetry.io/otel/propagation" +) + +var _ io.ReadCloser = &bodyWrapper{} + +// bodyWrapper wraps a http.Request.Body (an io.ReadCloser) to track the number +// of bytes read and the last error +type bodyWrapper struct { + io.ReadCloser + record func(n int64) // must not be nil + + read int64 + err error +} + +func (w *bodyWrapper) Read(b []byte) (int, error) { + n, err := w.ReadCloser.Read(b) + n1 := int64(n) + w.read += n1 + w.err = err + w.record(n1) + return n, err +} + +func (w *bodyWrapper) Close() error { + return w.ReadCloser.Close() +} + +var _ http.ResponseWriter = &respWriterWrapper{} + +// respWriterWrapper wraps a http.ResponseWriter in order to track the number of +// bytes written, the last error, and to catch the returned statusCode +// TODO: The wrapped http.ResponseWriter doesn't implement any of the optional +// types (http.Hijacker, http.Pusher, http.CloseNotifier, http.Flusher, etc) +// that may be useful when using it in real life situations. +type respWriterWrapper struct { + http.ResponseWriter + record func(n int64) // must not be nil + + // used to inject the header + ctx context.Context + + props propagation.TextMapPropagator + + written int64 + statusCode int + err error + wroteHeader bool +} + +func (w *respWriterWrapper) Header() http.Header { + return w.ResponseWriter.Header() +} + +func (w *respWriterWrapper) Write(p []byte) (int, error) { + if !w.wroteHeader { + w.WriteHeader(http.StatusOK) + } + n, err := w.ResponseWriter.Write(p) + n1 := int64(n) + w.record(n1) + w.written += n1 + w.err = err + return n, err +} + +func (w *respWriterWrapper) WriteHeader(statusCode int) { + if w.wroteHeader { + return + } + w.wroteHeader = true + w.statusCode = statusCode + w.props.Inject(w.ctx, propagation.HeaderCarrier(w.Header())) + w.ResponseWriter.WriteHeader(statusCode) +} diff --git a/vendor/go.opentelemetry.io/otel/.gitattributes b/vendor/go.opentelemetry.io/otel/.gitattributes new file mode 100644 index 0000000000..314766e91b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitattributes @@ -0,0 +1,3 @@ +* text=auto eol=lf +*.{cmd,[cC][mM][dD]} text eol=crlf +*.{bat,[bB][aA][tT]} text eol=crlf diff --git a/vendor/go.opentelemetry.io/otel/.gitignore b/vendor/go.opentelemetry.io/otel/.gitignore new file mode 100644 index 0000000000..759cf53e00 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitignore @@ -0,0 +1,21 @@ +.DS_Store +Thumbs.db + +.tools/ +.idea/ +.vscode/ +*.iml +*.so +coverage.* + +gen/ + +/example/fib/fib +/example/jaeger/jaeger +/example/namedtracer/namedtracer +/example/opencensus/opencensus +/example/passthrough/passthrough +/example/prometheus/prometheus +/example/prom-collector/prom-collector +/example/zipkin/zipkin +/example/otel-collector/otel-collector diff --git a/vendor/go.opentelemetry.io/otel/.gitmodules b/vendor/go.opentelemetry.io/otel/.gitmodules new file mode 100644 index 0000000000..38a1f56982 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.gitmodules @@ -0,0 +1,3 @@ +[submodule "opentelemetry-proto"] + path = exporters/otlp/internal/opentelemetry-proto + url = https://github.com/open-telemetry/opentelemetry-proto diff --git a/vendor/go.opentelemetry.io/otel/.golangci.yml b/vendor/go.opentelemetry.io/otel/.golangci.yml new file mode 100644 index 0000000000..7a5fdc07ab --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.golangci.yml @@ -0,0 +1,47 @@ +# See https://github.com/golangci/golangci-lint#config-file +run: + issues-exit-code: 1 #Default + tests: true #Default + +linters: + # Disable everything by default so upgrades to not include new "default + # enabled" linters. + disable-all: true + # Specifically enable linters we want to use. + enable: + - deadcode + - errcheck + - gofmt + - goimports + - gosimple + - govet + - ineffassign + - misspell + - revive + - staticcheck + - structcheck + - typecheck + - unused + - varcheck + + +issues: + exclude-rules: + # helpers in tests often (rightfully) pass a *testing.T as their first argument + - path: _test\.go + text: "context.Context should be the first parameter of a function" + linters: + - revive + # Yes, they are, but it's okay in a test + - path: _test\.go + text: "exported func.*returns unexported type.*which can be annoying to use" + linters: + - revive + +linters-settings: + misspell: + locale: US + ignore-words: + - cancelled + goimports: + local-prefixes: go.opentelemetry.io diff --git a/vendor/go.opentelemetry.io/otel/.markdown-link.json b/vendor/go.opentelemetry.io/otel/.markdown-link.json new file mode 100644 index 0000000000..f222ad89c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdown-link.json @@ -0,0 +1,16 @@ +{ + "ignorePatterns": [ + { + "pattern": "^http(s)?://localhost" + } + ], + "replacementPatterns": [ + { + "pattern": "^/registry", + "replacement": "https://opentelemetry.io/registry" + } + ], + "retryOn429": true, + "retryCount": 5, + "fallbackRetryDelay": "30s" +} diff --git a/vendor/go.opentelemetry.io/otel/.markdownlint.yaml b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml new file mode 100644 index 0000000000..3202496c35 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/.markdownlint.yaml @@ -0,0 +1,29 @@ +# Default state for all rules +default: true + +# ul-style +MD004: false + +# hard-tabs +MD010: false + +# line-length +MD013: false + +# no-duplicate-header +MD024: + siblings_only: true + +#single-title +MD025: false + +# ol-prefix +MD029: + style: ordered + +# no-inline-html +MD033: false + +# fenced-code-language +MD040: false + diff --git a/vendor/go.opentelemetry.io/otel/CHANGELOG.md b/vendor/go.opentelemetry.io/otel/CHANGELOG.md new file mode 100644 index 0000000000..42dada4905 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CHANGELOG.md @@ -0,0 +1,1733 @@ +# Changelog + +All notable changes to this project will be documented in this file. + +The format is based on [Keep a Changelog](https://keepachangelog.com/en/1.0.0/). + +This project adheres to [Semantic Versioning](https://semver.org/spec/v2.0.0.html). + +## [Unreleased] + +## [1.4.1] - 2022-02-16 + +### Fixed + +- Fix race condition in reading the dropped spans number for the `BatchSpanProcessor`. (#2615) + +## [1.4.0] - 2022-02-11 + +### Added + +- Use `OTEL_EXPORTER_ZIPKIN_ENDPOINT` environment variable to specify zipkin collector endpoint. (#2490) +- Log the configuration of `TracerProvider`s, and `Tracer`s for debugging. + To enable use a logger with Verbosity (V level) `>=1`. (#2500) +- Added support to configure the batch span-processor with environment variables. + The following environment variables are used. (#2515) + - `OTEL_BSP_SCHEDULE_DELAY` + - `OTEL_BSP_EXPORT_TIMEOUT` + - `OTEL_BSP_MAX_QUEUE_SIZE`. + - `OTEL_BSP_MAX_EXPORT_BATCH_SIZE` + +### Changed + +- Zipkin exporter exports `Resource` attributes in the `Tags` field. (#2589) + +### Deprecated + +- Deprecate module the `go.opentelemetry.io/otel/sdk/export/metric`. + Use the `go.opentelemetry.io/otel/sdk/metric` module instead. (#2382) +- Deprecate `"go.opentelemetry.io/otel/sdk/metric".AtomicFieldOffsets`. (#2445) + +### Fixed + +- Fixed the instrument kind for noop async instruments to correctly report an implementation. (#2461) +- Fix UDP packets overflowing with Jaeger payloads. (#2489, #2512) +- Change the `otlpmetric.Client` interface's `UploadMetrics` method to accept a single `ResourceMetrics` instead of a slice of them. (#2491) +- Specify explicit buckets in Prometheus example, fixing issue where example only has `+inf` bucket. (#2419, #2493) +- W3C baggage will now decode urlescaped values. (#2529) +- Baggage members are now only validated once, when calling `NewMember` and not also when adding it to the baggage itself. (#2522) +- The order attributes are dropped from spans in the `go.opentelemetry.io/otel/sdk/trace` package when capacity is reached is fixed to be in compliance with the OpenTelemetry specification. + Instead of dropping the least-recently-used attribute, the last added attribute is dropped. + This drop order still only applies to attributes with unique keys not already contained in the span. + If an attribute is added with a key already contained in the span, that attribute is updated to the new value being added. (#2576) + +### Removed + +- Updated `go.opentelemetry.io/proto/otlp` from `v0.11.0` to `v0.12.0`. This version removes a number of deprecated methods. (#2546) + - [`Metric.GetIntGauge()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntGauge) + - [`Metric.GetIntHistogram()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntHistogram) + - [`Metric.GetIntSum()`](https://pkg.go.dev/go.opentelemetry.io/proto/otlp@v0.11.0/metrics/v1#Metric.GetIntSum) + +## [1.3.0] - 2021-12-10 + +### ⚠️ Notice ⚠️ + +We have updated the project minimum supported Go version to 1.16 + +### Added + +- Added an internal Logger. + This can be used by the SDK and API to provide users with feedback of the internal state. + To enable verbose logs configure the logger which will print V(1) logs. For debugging information configure to print V(5) logs. (#2343) +- Add the `WithRetry` `Option` and the `RetryConfig` type to the `go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp` package to specify retry behavior consistently. (#2425) +- Add `SpanStatusFromHTTPStatusCodeAndSpanKind` to all `semconv` packages to return a span status code similar to `SpanStatusFromHTTPStatusCode`, but exclude `4XX` HTTP errors as span errors if the span is of server kind. (#2296) + +### Changed + +- The `"go.opentelemetry.io/otel/exporter/otel/otlptrace/otlptracegrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2329) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".Client` now uses the underlying gRPC `ClientConn` to handle name resolution, TCP connection establishment (with retries and backoff) and TLS handshakes, and handling errors on established connections by re-resolving the name and reconnecting. (#2425) +- The `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetricgrpc".RetrySettings` type is renamed to `RetryConfig`. (#2425) +- The `go.opentelemetry.io/otel/exporter/otel/*` gRPC exporters now default to using the host's root CA set if none are provided by the user and `WithInsecure` is not specified. (#2432) +- Change `resource.Default` to be evaluated the first time it is called, rather than on import. This allows the caller the option to update `OTEL_RESOURCE_ATTRIBUTES` first, such as with `os.Setenv`. (#2371) + +### Fixed + +- The `go.opentelemetry.io/otel/exporter/otel/*` exporters are updated to handle per-signal and universal endpoints according to the OpenTelemetry specification. + Any per-signal endpoint set via an `OTEL_EXPORTER_OTLP__ENDPOINT` environment variable is now used without modification of the path. + When `OTEL_EXPORTER_OTLP_ENDPOINT` is set, if it contains a path, that path is used as a base path which per-signal paths are appended to. (#2433) +- Basic metric controller updated to use sync.Map to avoid blocking calls (#2381) +- The `go.opentelemetry.io/otel/exporter/jaeger` correctly sets the `otel.status_code` value to be a string of `ERROR` or `OK` instead of an integer code. (#2439, #2440) + +### Deprecated + +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithMaxAttempts` `Option`, use the new `WithRetry` `Option` instead. (#2425) +- Deprecated the `"go.opentelemetry.io/otel/exporter/otel/otlpmetric/otlpmetrichttp".WithBackoff` `Option`, use the new `WithRetry` `Option` instead. (#2425) + +### Removed + +- Remove the metric Processor's ability to convert cumulative to delta aggregation temporality. (#2350) +- Remove the metric Bound Instruments interface and implementations. (#2399) +- Remove the metric MinMaxSumCount kind aggregation and the corresponding OTLP export path. (#2423) +- Metric SDK removes the "exact" aggregator for histogram instruments, as it performed a non-standard aggregation for OTLP export (creating repeated Gauge points) and worked its way into a number of confusing examples. (#2348) + +## [1.2.0] - 2021-11-12 + +### Changed + +- Metric SDK `export.ExportKind`, `export.ExportKindSelector` types have been renamed to `aggregation.Temporality` and `aggregation.TemporalitySelector` respectively to keep in line with current specification and protocol along with built-in selectors (e.g., `aggregation.CumulativeTemporalitySelector`, ...). (#2274) +- The Metric `Exporter` interface now requires a `TemporalitySelector` method instead of an `ExportKindSelector`. (#2274) +- Metrics API cleanup. The `metric/sdkapi` package has been created to relocate the API-to-SDK interface: + - The following interface types simply moved from `metric` to `metric/sdkapi`: `Descriptor`, `MeterImpl`, `InstrumentImpl`, `SyncImpl`, `BoundSyncImpl`, `AsyncImpl`, `AsyncRunner`, `AsyncSingleRunner`, and `AsyncBatchRunner` + - The following struct types moved and are replaced with type aliases, since they are exposed to the user: `Observation`, `Measurement`. + - The No-op implementations of sync and async instruments are no longer exported, new functions `sdkapi.NewNoopAsyncInstrument()` and `sdkapi.NewNoopSyncInstrument()` are provided instead. (#2271) +- Update the SDK `BatchSpanProcessor` to export all queued spans when `ForceFlush` is called. (#2080, #2335) + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Added a new `schema` module to help parse Schema Files in OTEP 0152 format. (#2267) +- Added a new `MapCarrier` to the `go.opentelemetry.io/otel/propagation` package to hold propagated cross-cutting concerns as a `map[string]string` held in memory. (#2334) + +## [1.1.0] - 2021-10-27 + +### Added + +- Add the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".WithGRPCConn` option so the exporter can reuse an existing gRPC connection. (#2002) +- Add the `go.opentelemetry.io/otel/semconv/v1.7.0` package. + The package contains semantic conventions from the `v1.7.0` version of the OpenTelemetry specification. (#2320) +- Add the `go.opentelemetry.io/otel/semconv/v1.6.1` package. + The package contains semantic conventions from the `v1.6.1` version of the OpenTelemetry specification. (#2321) +- Add the `go.opentelemetry.io/otel/semconv/v1.5.0` package. + The package contains semantic conventions from the `v1.5.0` version of the OpenTelemetry specification. (#2322) + - When upgrading from the `semconv/v1.4.0` package note the following name changes: + - `K8SReplicasetUIDKey` -> `K8SReplicaSetUIDKey` + - `K8SReplicasetNameKey` -> `K8SReplicaSetNameKey` + - `K8SStatefulsetUIDKey` -> `K8SStatefulSetUIDKey` + - `k8SStatefulsetNameKey` -> `K8SStatefulSetNameKey` + - `K8SDaemonsetUIDKey` -> `K8SDaemonSetUIDKey` + - `K8SDaemonsetNameKey` -> `K8SDaemonSetNameKey` + +### Changed + +- Links added to a span will be dropped by the SDK if they contain an invalid span context (#2275). + +### Fixed + +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".HTTPServerAttributesFromHTTPRequest` now correctly only sets the HTTP client IP attribute even if the connection was routed with proxies and there are multiple addresses in the `X-Forwarded-For` header. (#2282, #2284) +- The `"go.opentelemetry.io/otel/semconv/v1.4.0".NetAttributesFromHTTPRequest` function correctly handles IPv6 addresses as IP addresses and sets the correct net peer IP instead of the net peer hostname attribute. (#2283, #2285) +- The simple span processor shutdown method deterministically returns the exporter error status if it simultaneously finishes when the deadline is reached. (#2290, #2289) + +## [1.0.1] - 2021-10-01 + +### Fixed + +- json stdout exporter no longer crashes due to concurrency bug. (#2265) + +## [Metrics 0.24.0] - 2021-10-01 + +### Changed + +- NoopMeterProvider is now private and NewNoopMeterProvider must be used to obtain a noopMeterProvider. (#2237) +- The Metric SDK `Export()` function takes a new two-level reader interface for iterating over results one instrumentation library at a time. (#2197) + - The former `"go.opentelemetry.io/otel/sdk/export/metric".CheckpointSet` is renamed `Reader`. + - The new interface is named `"go.opentelemetry.io/otel/sdk/export/metric".InstrumentationLibraryReader`. + +## [1.0.0] - 2021-09-20 + +This is the first stable release for the project. +This release includes an API and SDK for the tracing signal that will comply with the stability guarantees defined by the projects [versioning policy](./VERSIONING.md). + +### Added + +- OTLP trace exporter now sets the `SchemaURL` field in the exported telemetry if the Tracer has `WithSchemaURL` option. (#2242) + +### Fixed + +- Slice-valued attributes can correctly be used as map keys. (#2223) + +### Removed + +- Removed the `"go.opentelemetry.io/otel/exporters/zipkin".WithSDKOptions` function. (#2248) +- Removed the deprecated package `go.opentelemetry.io/otel/oteltest`. (#2234) +- Removed the deprecated package `go.opentelemetry.io/otel/bridge/opencensus/utils`. (#2233) +- Removed deprecated functions, types, and methods from `go.opentelemetry.io/otel/attribute` package. + Use the typed functions and methods added to the package instead. (#2235) + - The `Key.Array` method is removed. + - The `Array` function is removed. + - The `Any` function is removed. + - The `ArrayValue` function is removed. + - The `AsArray` function is removed. + +## [1.0.0-RC3] - 2021-09-02 + +### Added + +- Added `ErrorHandlerFunc` to use a function as an `"go.opentelemetry.io/otel".ErrorHandler`. (#2149) +- Added `"go.opentelemetry.io/otel/trace".WithStackTrace` option to add a stack trace when using `span.RecordError` or when panic is handled in `span.End`. (#2163) +- Added typed slice attribute types and functionality to the `go.opentelemetry.io/otel/attribute` package to replace the existing array type and functions. (#2162) + - `BoolSlice`, `IntSlice`, `Int64Slice`, `Float64Slice`, and `StringSlice` replace the use of the `Array` function in the package. +- Added the `go.opentelemetry.io/otel/example/fib` example package. + Included is an example application that computes Fibonacci numbers. (#2203) + +### Changed + +- Metric instruments have been renamed to match the (feature-frozen) metric API specification: + - ValueRecorder becomes Histogram + - ValueObserver becomes Gauge + - SumObserver becomes CounterObserver + - UpDownSumObserver becomes UpDownCounterObserver + The API exported from this project is still considered experimental. (#2202) +- Metric SDK/API implementation type `InstrumentKind` moves into `sdkapi` sub-package. (#2091) +- The Metrics SDK export record no longer contains a Resource pointer, the SDK `"go.opentelemetry.io/otel/sdk/trace/export/metric".Exporter.Export()` function for push-based exporters now takes a single Resource argument, pull-based exporters use `"go.opentelemetry.io/otel/sdk/metric/controller/basic".Controller.Resource()`. (#2120) +- The JSON output of the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` is harmonized now such that the output is "plain" JSON objects after each other of the form `{ ... } { ... } { ... }`. Earlier the JSON objects describing a span were wrapped in a slice for each `Exporter.ExportSpans` call, like `[ { ... } ][ { ... } { ... } ]`. Outputting JSON object directly after each other is consistent with JSON loggers, and a bit easier to parse and read. (#2196) +- Update the `NewTracerConfig`, `NewSpanStartConfig`, `NewSpanEndConfig`, and `NewEventConfig` function in the `go.opentelemetry.io/otel/trace` package to return their respective configurations as structs instead of pointers to the struct. (#2212) + +### Deprecated + +- The `go.opentelemetry.io/otel/bridge/opencensus/utils` package is deprecated. + All functionality from this package now exists in the `go.opentelemetry.io/otel/bridge/opencensus` package. + The functions from that package should be used instead. (#2166) +- The `"go.opentelemetry.io/otel/attribute".Array` function and the related `ARRAY` value type is deprecated. + Use the typed `*Slice` functions and types added to the package instead. (#2162) +- The `"go.opentelemetry.io/otel/attribute".Any` function is deprecated. + Use the typed functions instead. (#2181) +- The `go.opentelemetry.io/otel/oteltest` package is deprecated. + The `"go.opentelemetry.io/otel/sdk/trace/tracetest".SpanRecorder` can be registered with the default SDK (`go.opentelemetry.io/otel/sdk/trace`) as a `SpanProcessor` and used as a replacement for this deprecated package. (#2188) + +### Removed + +- Removed metrics test package `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#2105) + +### Fixed + +- The `fromEnv` detector no longer throws an error when `OTEL_RESOURCE_ATTRIBUTES` environment variable is not set or empty. (#2138) +- Setting the global `ErrorHandler` with `"go.opentelemetry.io/otel".SetErrorHandler` multiple times is now supported. (#2160, #2140) +- The `"go.opentelemetry.io/otel/attribute".Any` function now supports `int32` values. (#2169) +- Multiple calls to `"go.opentelemetry.io/otel/sdk/metric/controller/basic".WithResource()` are handled correctly, and when no resources are provided `"go.opentelemetry.io/otel/sdk/resource".Default()` is used. (#2120) +- The `WithoutTimestamps` option for the `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter causes the exporter to correctly ommit timestamps. (#2195) +- Fixed typos in resources.go. (#2201) + +## [1.0.0-RC2] - 2021-07-26 + +### Added + +- Added `WithOSDescription` resource configuration option to set OS (Operating System) description resource attribute (`os.description`). (#1840) +- Added `WithOS` resource configuration option to set all OS (Operating System) resource attributes at once. (#1840) +- Added the `WithRetry` option to the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + This option is a replacement for the removed `WithMaxAttempts` and `WithBackoff` options. (#2095) +- Added API `LinkFromContext` to return Link which encapsulates SpanContext from provided context and also encapsulates attributes. (#2115) +- Added a new `Link` type under the SDK `otel/sdk/trace` package that counts the number of attributes that were dropped for surpassing the `AttributePerLinkCountLimit` configured in the Span's `SpanLimits`. + This new type replaces the equal-named API `Link` type found in the `otel/trace` package for most usages within the SDK. + For example, instances of this type are now returned by the `Links()` function of `ReadOnlySpan`s provided in places like the `OnEnd` function of `SpanProcessor` implementations. (#2118) +- Added the `SpanRecorder` type to the `go.opentelemetry.io/otel/skd/trace/tracetest` package. + This type can be used with the default SDK as a `SpanProcessor` during testing. (#2132) + +### Changed + +- The `SpanModels` function is now exported from the `go.opentelemetry.io/otel/exporters/zipkin` package to convert OpenTelemetry spans into Zipkin model spans. (#2027) +- Rename the `"go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc".RetrySettings` to `RetryConfig`. (#2095) + +### Deprecated + +- The `TextMapCarrier` and `TextMapPropagator` from the `go.opentelemetry.io/otel/oteltest` package and their associated creation functions (`TextMapCarrier`, `NewTextMapPropagator`) are deprecated. (#2114) +- The `Harness` type from the `go.opentelemetry.io/otel/oteltest` package and its associated creation function, `NewHarness` are deprecated and will be removed in the next release. (#2123) +- The `TraceStateFromKeyValues` function from the `go.opentelemetry.io/otel/oteltest` package is deprecated. + Use the `trace.ParseTraceState` function instead. (#2122) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/jaeger`. (#2020) +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/trace/zipkin`. (#2020) +- Removed the `"go.opentelemetry.io/otel/sdk/resource".WithBuiltinDetectors` function. + The explicit `With*` options for every built-in detector should be used instead. (#2026 #2097) +- Removed the `WithMaxAttempts` and `WithBackoff` options from the `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` package. + The retry logic of the package has been updated to match the `otlptracegrpc` package and accordingly a `WithRetry` option is added that should be used instead. (#2095) +- Removed `DroppedAttributeCount` field from `otel/trace.Link` struct. (#2118) + +### Fixed + +- When using WithNewRoot, don't use the parent context for making sampling decisions. (#2032) +- `oteltest.Tracer` now creates a valid `SpanContext` when using `WithNewRoot`. (#2073) +- OS type detector now sets the correct `dragonflybsd` value for DragonFly BSD. (#2092) +- The OTel span status is correctly transformed into the OTLP status in the `go.opentelemetry.io/otel/exporters/otlp/otlptrace` package. + This fix will by default set the status to `Unset` if it is not explicitly set to `Ok` or `Error`. (#2099 #2102) +- The `Inject` method for the `"go.opentelemetry.io/otel/propagation".TraceContext` type no longer injects empty `tracestate` values. (#2108) +- Use `6831` as default Jaeger agent port instead of `6832`. (#2131) + +## [Experimental Metrics v0.22.0] - 2021-07-19 + +### Added + +- Adds HTTP support for OTLP metrics exporter. (#2022) + +### Removed + +- Removed the deprecated package `go.opentelemetry.io/otel/exporters/metric/prometheus`. (#2020) + +## [1.0.0-RC1] / 0.21.0 - 2021-06-18 + +With this release we are introducing a split in module versions. The tracing API and SDK are entering the `v1.0.0` Release Candidate phase with `v1.0.0-RC1` +while the experimental metrics API and SDK continue with `v0.x` releases at `v0.21.0`. Modules at major version 1 or greater will not depend on modules +with major version 0. + +### Added + +- Adds `otlpgrpc.WithRetry`option for configuring the retry policy for transient errors on the otlp/gRPC exporter. (#1832) + - The following status codes are defined as transient errors: + | gRPC Status Code | Description | + | ---------------- | ----------- | + | 1 | Cancelled | + | 4 | Deadline Exceeded | + | 8 | Resource Exhausted | + | 10 | Aborted | + | 10 | Out of Range | + | 14 | Unavailable | + | 15 | Data Loss | +- Added `Status` type to the `go.opentelemetry.io/otel/sdk/trace` package to represent the status of a span. (#1874) +- Added `SpanStub` type and its associated functions to the `go.opentelemetry.io/otel/sdk/trace/tracetest` package. + This type can be used as a testing replacement for the `SpanSnapshot` that was removed from the `go.opentelemetry.io/otel/sdk/trace` package. (#1873) +- Adds support for scheme in `OTEL_EXPORTER_OTLP_ENDPOINT` according to the spec. (#1886) +- Adds `trace.WithSchemaURL` option for configuring the tracer with a Schema URL. (#1889) +- Added an example of using OpenTelemetry Go as a trace context forwarder. (#1912) +- `ParseTraceState` is added to the `go.opentelemetry.io/otel/trace` package. + It can be used to decode a `TraceState` from a `tracestate` header string value. (#1937) +- Added `Len` method to the `TraceState` type in the `go.opentelemetry.io/otel/trace` package. + This method returns the number of list-members the `TraceState` holds. (#1937) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace` that defines a trace exporter that uses a `otlptrace.Client` to send data. + Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc` implementing a gRPC `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing .(#1922) +- Added `Baggage`, `Member`, and `Property` types to the `go.opentelemetry.io/otel/baggage` package along with their related functions. (#1967) +- Added `ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext` functions to the `go.opentelemetry.io/otel/baggage` package. + These functions replace the `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions from that package and directly work with the new `Baggage` type. (#1967) +- The `OTEL_SERVICE_NAME` environment variable is the preferred source for `service.name`, used by the environment resource detector if a service name is present both there and in `OTEL_RESOURCE_ATTRIBUTES`. (#1969) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp` implementing an HTTP `otlptrace.Client` and offers convenience functions, `NewExportPipeline` and `InstallNewPipeline`, to setup and install a `otlptrace.Exporter` in tracing. (#1963) +- Changes `go.opentelemetry.io/otel/sdk/resource.NewWithAttributes` to require a schema URL. The old function is still available as `resource.NewSchemaless`. This is a breaking change. (#1938) +- Several builtin resource detectors now correctly populate the schema URL. (#1938) +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric` that defines a metrics exporter that uses a `otlpmetric.Client` to send data. +- Creates package `go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc` implementing a gRPC `otlpmetric.Client` and offers convenience functions, `New` and `NewUnstarted`, to create an `otlpmetric.Exporter`.(#1991) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` exporter. (#2005) +- Added `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` exporter. (#2005) +- Added a `TracerProvider()` method to the `"go.opentelemetry.io/otel/trace".Span` interface. This can be used to obtain a `TracerProvider` from a given span that utilizes the same trace processing pipeline. (#2009) + +### Changed + +- Make `NewSplitDriver` from `go.opentelemetry.io/otel/exporters/otlp` take variadic arguments instead of a `SplitConfig` item. + `NewSplitDriver` now automatically implements an internal `noopDriver` for `SplitConfig` fields that are not initialized. (#1798) +- `resource.New()` now creates a Resource without builtin detectors. Previous behavior is now achieved by using `WithBuiltinDetectors` Option. (#1810) +- Move the `Event` type from the `go.opentelemetry.io/otel` package to the `go.opentelemetry.io/otel/sdk/trace` package. (#1846) +- CI builds validate against last two versions of Go, dropping 1.14 and adding 1.16. (#1865) +- BatchSpanProcessor now report export failures when calling `ForceFlush()` method. (#1860) +- `Set.Encoded(Encoder)` no longer caches the result of an encoding. (#1855) +- Renamed `CloudZoneKey` to `CloudAvailabilityZoneKey` in Resource semantic conventions according to spec. (#1871) +- The `StatusCode` and `StatusMessage` methods of the `ReadOnlySpan` interface and the `Span` produced by the `go.opentelemetry.io/otel/sdk/trace` package have been replaced with a single `Status` method. + This method returns the status of a span using the new `Status` type. (#1874) +- Updated `ExportSpans` method of the`SpanExporter` interface type to accept `ReadOnlySpan`s instead of the removed `SpanSnapshot`. + This brings the export interface into compliance with the specification in that it now accepts an explicitly immutable type instead of just an implied one. (#1873) +- Unembed `SpanContext` in `Link`. (#1877) +- Generate Semantic conventions from the specification YAML. (#1891) +- Spans created by the global `Tracer` obtained from `go.opentelemetry.io/otel`, prior to a functioning `TracerProvider` being set, now propagate the span context from their parent if one exists. (#1901) +- The `"go.opentelemetry.io/otel".Tracer` function now accepts tracer options. (#1902) +- Move the `go.opentelemetry.io/otel/unit` package to `go.opentelemetry.io/otel/metric/unit`. (#1903) +- Changed `go.opentelemetry.io/otel/trace.TracerConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config.) (#1921) +- Changed `go.opentelemetry.io/otel/trace.SpanConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `span.End()` now only accepts Options that are allowed at `End()`. (#1921) +- Changed `go.opentelemetry.io/otel/metric.InstrumentConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Changed `go.opentelemetry.io/otel/metric.MeterConfig` to conform to the [Contributing guidelines](CONTRIBUTING.md#config). (#1921) +- Refactored option types according to the contribution style guide. (#1882) +- Move the `go.opentelemetry.io/otel/trace.TraceStateFromKeyValues` function to the `go.opentelemetry.io/otel/oteltest` package. + This function is preserved for testing purposes where it may be useful to create a `TraceState` from `attribute.KeyValue`s, but it is not intended for production use. + The new `ParseTraceState` function should be used to create a `TraceState`. (#1931) +- Updated `MarshalJSON` method of the `go.opentelemetry.io/otel/trace.TraceState` type to marshal the type into the string representation of the `TraceState`. (#1931) +- The `TraceState.Delete` method from the `go.opentelemetry.io/otel/trace` package no longer returns an error in addition to a `TraceState`. (#1931) +- Updated `Get` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Updated `Insert` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a pair of `string`s instead of an `attribute.KeyValue` type. (#1931) +- Updated `Delete` method of the `TraceState` type from the `go.opentelemetry.io/otel/trace` package to accept a `string` instead of an `attribute.Key` type. (#1931) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/stdout` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/metric/prometheus` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1985) +- Renamed `NewExporter` to `New` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- Renamed `NewUnstartedExporter` to `NewUnstarted` in the `go.opentelemetry.io/otel/exporters/otlp` package. (#1985) +- The `go.opentelemetry.io/otel/semconv` package has been moved to `go.opentelemetry.io/otel/semconv/v1.4.0` to allow for multiple [telemetry schema](https://github.com/open-telemetry/oteps/blob/main/text/0152-telemetry-schemas.md) versions to be used concurrently. (#1987) +- Metrics test helpers in `go.opentelemetry.io/otel/oteltest` have been moved to `go.opentelemetry.io/otel/metric/metrictest`. (#1988) + +### Deprecated + +- The `go.opentelemetry.io/otel/exporters/metric/prometheus` is deprecated, use `go.opentelemetry.io/otel/exporters/prometheus` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/jaeger` is deprecated, use `go.opentelemetry.io/otel/exporters/jaeger` instead. (#1993) +- The `go.opentelemetry.io/otel/exporters/trace/zipkin` is deprecated, use `go.opentelemetry.io/otel/exporters/zipkin` instead. (#1993) + +### Removed + +- Removed `resource.WithoutBuiltin()`. Use `resource.New()`. (#1810) +- Unexported types `resource.FromEnv`, `resource.Host`, and `resource.TelemetrySDK`, Use the corresponding `With*()` to use individually. (#1810) +- Removed the `Tracer` and `IsRecording` method from the `ReadOnlySpan` in the `go.opentelemetry.io/otel/sdk/trace`. + The `Tracer` method is not a required to be included in this interface and given the mutable nature of the tracer that is associated with a span, this method is not appropriate. + The `IsRecording` method returns if the span is recording or not. + A read-only span value does not need to know if updates to it will be recorded or not. + By definition, it cannot be updated so there is no point in communicating if an update is recorded. (#1873) +- Removed the `SpanSnapshot` type from the `go.opentelemetry.io/otel/sdk/trace` package. + The use of this type has been replaced with the use of the explicitly immutable `ReadOnlySpan` type. + When a concrete representation of a read-only span is needed for testing, the newly added `SpanStub` in the `go.opentelemetry.io/otel/sdk/trace/tracetest` package should be used. (#1873) +- Removed the `Tracer` method from the `Span` interface in the `go.opentelemetry.io/otel/trace` package. + Using the same tracer that created a span introduces the error where an instrumentation library's `Tracer` is used by other code instead of their own. + The `"go.opentelemetry.io/otel".Tracer` function or a `TracerProvider` should be used to acquire a library specific `Tracer` instead. (#1900) + - The `TracerProvider()` method on the `Span` interface may also be used to obtain a `TracerProvider` using the same trace processing pipeline. (#2009) +- The `http.url` attribute generated by `HTTPClientAttributesFromHTTPRequest` will no longer include username or password information. (#1919) +- Removed `IsEmpty` method of the `TraceState` type in the `go.opentelemetry.io/otel/trace` package in favor of using the added `TraceState.Len` method. (#1931) +- Removed `Set`, `Value`, `ContextWithValue`, `ContextWithoutValue`, and `ContextWithEmpty` functions in the `go.opentelemetry.io/otel/baggage` package. + Handling of baggage is now done using the added `Baggage` type and related context functions (`ContextWithBaggage`, `ContextWithoutBaggage`, and `FromContext`) in that package. (#1967) +- The `InstallNewPipeline` and `NewExportPipeline` creation functions in all the exporters (prometheus, otlp, stdout, jaeger, and zipkin) have been removed. + These functions were deemed premature attempts to provide convenience that did not achieve this aim. (#1985) +- The `go.opentelemetry.io/otel/exporters/otlp` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/otlp/otlptrace` instead. (#1990) +- The `go.opentelemetry.io/otel/exporters/stdout` exporter has been removed. Use `go.opentelemetry.io/otel/exporters/stdout/stdouttrace` or `go.opentelemetry.io/otel/exporters/stdout/stdoutmetric` instead. (#2005) + +### Fixed + +- Only report errors from the `"go.opentelemetry.io/otel/sdk/resource".Environment` function when they are not `nil`. (#1850, #1851) +- The `Shutdown` method of the simple `SpanProcessor` in the `go.opentelemetry.io/otel/sdk/trace` package now honors the context deadline or cancellation. (#1616, #1856) +- BatchSpanProcessor now drops span batches that failed to be exported. (#1860) +- Use `http://localhost:14268/api/traces` as default Jaeger collector endpoint instead of `http://localhost:14250`. (#1898) +- Allow trailing and leading whitespace in the parsing of a `tracestate` header. (#1931) +- Add logic to determine if the channel is closed to fix Jaeger exporter test panic with close closed channel. (#1870, #1973) +- Avoid transport security when OTLP endpoint is a Unix socket. (#2001) + +### Security + +## [0.20.0] - 2021-04-23 + +### Added + +- The OTLP exporter now has two new convenience functions, `NewExportPipeline` and `InstallNewPipeline`, setup and install the exporter in tracing and metrics pipelines. (#1373) +- Adds semantic conventions for exceptions. (#1492) +- Added Jaeger Environment variables: `OTEL_EXPORTER_JAEGER_AGENT_HOST`, `OTEL_EXPORTER_JAEGER_AGENT_PORT` + These environment variables can be used to override Jaeger agent hostname and port (#1752) +- Option `ExportTimeout` was added to batch span processor. (#1755) +- `trace.TraceFlags` is now a defined type over `byte` and `WithSampled(bool) TraceFlags` and `IsSampled() bool` methods have been added to it. (#1770) +- The `Event` and `Link` struct types from the `go.opentelemetry.io/otel` package now include a `DroppedAttributeCount` field to record the number of attributes that were not recorded due to configured limits being reached. (#1771) +- The Jaeger exporter now reports dropped attributes for a Span event in the exported log. (#1771) +- Adds test to check BatchSpanProcessor ignores `OnEnd` and `ForceFlush` post `Shutdown`. (#1772) +- Extract resource attributes from the `OTEL_RESOURCE_ATTRIBUTES` environment variable and merge them with the `resource.Default` resource as well as resources provided to the `TracerProvider` and metric `Controller`. (#1785) +- Added `WithOSType` resource configuration option to set OS (Operating System) type resource attribute (`os.type`). (#1788) +- Added `WithProcess*` resource configuration options to set Process resource attributes. (#1788) + - `process.pid` + - `process.executable.name` + - `process.executable.path` + - `process.command_args` + - `process.owner` + - `process.runtime.name` + - `process.runtime.version` + - `process.runtime.description` +- Adds `k8s.node.name` and `k8s.node.uid` attribute keys to the `semconv` package. (#1789) +- Added support for configuring OTLP/HTTP and OTLP/gRPC Endpoints, TLS Certificates, Headers, Compression and Timeout via Environment Variables. (#1758, #1769 and #1811) + - `OTEL_EXPORTER_OTLP_ENDPOINT` + - `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` + - `OTEL_EXPORTER_OTLP_METRICS_ENDPOINT` + - `OTEL_EXPORTER_OTLP_HEADERS` + - `OTEL_EXPORTER_OTLP_TRACES_HEADERS` + - `OTEL_EXPORTER_OTLP_METRICS_HEADERS` + - `OTEL_EXPORTER_OTLP_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` + - `OTEL_EXPORTER_OTLP_METRICS_COMPRESSION` + - `OTEL_EXPORTER_OTLP_TIMEOUT` + - `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` + - `OTEL_EXPORTER_OTLP_METRICS_TIMEOUT` + - `OTEL_EXPORTER_OTLP_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` + - `OTEL_EXPORTER_OTLP_METRICS_CERTIFICATE` +- Adds `otlpgrpc.WithTimeout` option for configuring timeout to the otlp/gRPC exporter. (#1821) +- Adds `jaeger.WithMaxPacketSize` option for configuring maximum UDP packet size used when connecting to the Jaeger agent. (#1853) + +### Fixed + +- The `Span.IsRecording` implementation from `go.opentelemetry.io/otel/sdk/trace` always returns false when not being sampled. (#1750) +- The Jaeger exporter now correctly sets tags for the Span status code and message. + This means it uses the correct tag keys (`"otel.status_code"`, `"otel.status_description"`) and does not set the status message as a tag unless it is set on the span. (#1761) +- The Jaeger exporter now correctly records Span event's names using the `"event"` key for a tag. + Additionally, this tag is overridden, as specified in the OTel specification, if the event contains an attribute with that key. (#1768) +- Zipkin Exporter: Ensure mapping between OTel and Zipkin span data complies with the specification. (#1688) +- Fixed typo for default service name in Jaeger Exporter. (#1797) +- Fix flaky OTLP for the reconnnection of the client connection. (#1527, #1814) +- Fix Jaeger exporter dropping of span batches that exceed the UDP packet size limit. + Instead, the exporter now splits the batch into smaller sendable batches. (#1828) + +### Changed + +- Span `RecordError` now records an `exception` event to comply with the semantic convention specification. (#1492) +- Jaeger exporter was updated to use thrift v0.14.1. (#1712) +- Migrate from using internally built and maintained version of the OTLP to the one hosted at `go.opentelemetry.io/proto/otlp`. (#1713) +- Migrate from using `github.com/gogo/protobuf` to `google.golang.org/protobuf` to match `go.opentelemetry.io/proto/otlp`. (#1713) +- The storage of a local or remote Span in a `context.Context` using its SpanContext is unified to store just the current Span. + The Span's SpanContext can now self-identify as being remote or not. + This means that `"go.opentelemetry.io/otel/trace".ContextWithRemoteSpanContext` will now overwrite any existing current Span, not just existing remote Spans, and make it the current Span in a `context.Context`. (#1731) +- Improve OTLP/gRPC exporter connection errors. (#1737) +- Information about a parent span context in a `"go.opentelemetry.io/otel/export/trace".SpanSnapshot` is unified in a new `Parent` field. + The existing `ParentSpanID` and `HasRemoteParent` fields are removed in favor of this. (#1748) +- The `ParentContext` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is updated to hold a `context.Context` containing the parent span. + This changes it to make `SamplingParameters` conform with the OpenTelemetry specification. (#1749) +- Updated Jaeger Environment Variables: `JAEGER_ENDPOINT`, `JAEGER_USER`, `JAEGER_PASSWORD` + to `OTEL_EXPORTER_JAEGER_ENDPOINT`, `OTEL_EXPORTER_JAEGER_USER`, `OTEL_EXPORTER_JAEGER_PASSWORD` in compliance with OTel specification. (#1752) +- Modify `BatchSpanProcessor.ForceFlush` to abort after timeout/cancellation. (#1757) +- The `DroppedAttributeCount` field of the `Span` in the `go.opentelemetry.io/otel` package now only represents the number of attributes dropped for the span itself. + It no longer is a conglomerate of itself, events, and link attributes that have been dropped. (#1771) +- Make `ExportSpans` in Jaeger Exporter honor context deadline. (#1773) +- Modify Zipkin Exporter default service name, use default resource's serviceName instead of empty. (#1777) +- The `go.opentelemetry.io/otel/sdk/export/trace` package is merged into the `go.opentelemetry.io/otel/sdk/trace` package. (#1778) +- The prometheus.InstallNewPipeline example is moved from comment to example test (#1796) +- The convenience functions for the stdout exporter have been updated to return the `TracerProvider` implementation and enable the shutdown of the exporter. (#1800) +- Replace the flush function returned from the Jaeger exporter's convenience creation functions (`InstallNewPipeline` and `NewExportPipeline`) with the `TracerProvider` implementation they create. + This enables the caller to shutdown and flush using the related `TracerProvider` methods. (#1822) +- Updated the Jaeger exporter to have a default endpoint, `http://localhost:14250`, for the collector. (#1824) +- Changed the function `WithCollectorEndpoint` in the Jaeger exporter to no longer accept an endpoint as an argument. + The endpoint can be passed with the `CollectorEndpointOption` using the `WithEndpoint` function or by setting the `OTEL_EXPORTER_JAEGER_ENDPOINT` environment variable value appropriately. (#1824) +- The Jaeger exporter no longer batches exported spans itself, instead it relies on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter creation functions (`NewRawExporter`, `NewExportPipeline`, and `InstallNewPipeline`) no longer accept the removed `Option` type as a variadic argument. (#1830) + +### Removed + +- Removed Jaeger Environment variables: `JAEGER_SERVICE_NAME`, `JAEGER_DISABLED`, `JAEGER_TAGS` + These environment variables will no longer be used to override values of the Jaeger exporter (#1752) +- No longer set the links for a `Span` in `go.opentelemetry.io/otel/sdk/trace` that is configured to be a new root. + This is unspecified behavior that the OpenTelemetry community plans to standardize in the future. + To prevent backwards incompatible changes when it is specified, these links are removed. (#1726) +- Setting error status while recording error with Span from oteltest package. (#1729) +- The concept of a remote and local Span stored in a context is unified to just the current Span. + Because of this `"go.opentelemetry.io/otel/trace".RemoteSpanContextFromContext` is removed as it is no longer needed. + Instead, `"go.opentelemetry.io/otel/trace".SpanContextFromContex` can be used to return the current Span. + If needed, that Span's `SpanContext.IsRemote()` can then be used to determine if it is remote or not. (#1731) +- The `HasRemoteParent` field of the `"go.opentelemetry.io/otel/sdk/trace".SamplingParameters` is removed. + This field is redundant to the information returned from the `Remote` method of the `SpanContext` held in the `ParentContext` field. (#1749) +- The `trace.FlagsDebug` and `trace.FlagsDeferred` constants have been removed and will be localized to the B3 propagator. (#1770) +- Remove `Process` configuration, `WithProcessFromEnv` and `ProcessFromEnv`, and type from the Jaeger exporter package. + The information that could be configured in the `Process` struct should be configured in a `Resource` instead. (#1776, #1804) +- Remove the `WithDisabled` option from the Jaeger exporter. + To disable the exporter unregister it from the `TracerProvider` or use a no-operation `TracerProvider`. (#1806) +- Removed the functions `CollectorEndpointFromEnv` and `WithCollectorEndpointOptionFromEnv` from the Jaeger exporter. + These functions for retrieving specific environment variable values are redundant of other internal functions and + are not intended for end user use. (#1824) +- Removed the Jaeger exporter `WithSDKOptions` `Option`. + This option was used to set SDK options for the exporter creation convenience functions. + These functions are provided as a way to easily setup or install the exporter with what are deemed reasonable SDK settings for common use cases. + If the SDK needs to be configured differently, the `NewRawExporter` function and direct setup of the SDK with the desired settings should be used. (#1825) +- The `WithBufferMaxCount` and `WithBatchMaxCount` `Option`s from the Jaeger exporter are removed. + The exporter no longer batches exports, instead relying on the SDK's `BatchSpanProcessor` for this functionality. (#1830) +- The Jaeger exporter `Option` type is removed. + The type is no longer used by the exporter to configure anything. + All the previous configurations these options provided were duplicates of SDK configuration. + They have been removed in favor of using the SDK configuration and focuses the exporter configuration to be only about the endpoints it will send telemetry to. (#1830) + +## [0.19.0] - 2021-03-18 + +### Added + +- Added `Marshaler` config option to `otlphttp` to enable otlp over json or protobufs. (#1586) +- A `ForceFlush` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` to flush all registered `SpanProcessor`s. (#1608) +- Added `WithSampler` and `WithSpanLimits` to tracer provider. (#1633, #1702) +- `"go.opentelemetry.io/otel/trace".SpanContext` now has a `remote` property, and `IsRemote()` predicate, that is true when the `SpanContext` has been extracted from remote context data. (#1701) +- A `Valid` method to the `"go.opentelemetry.io/otel/attribute".KeyValue` type. (#1703) + +### Changed + +- `trace.SpanContext` is now immutable and has no exported fields. (#1573) + - `trace.NewSpanContext()` can be used in conjunction with the `trace.SpanContextConfig` struct to initialize a new `SpanContext` where all values are known. +- Update the `ForceFlush` method signature to the `"go.opentelemetry.io/otel/sdk/trace".SpanProcessor` to accept a `context.Context` and return an error. (#1608) +- Update the `Shutdown` method to the `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` return an error on shutdown failure. (#1608) +- The SimpleSpanProcessor will now shut down the enclosed `SpanExporter` and gracefully ignore subsequent calls to `OnEnd` after `Shutdown` is called. (#1612) +- `"go.opentelemetry.io/sdk/metric/controller.basic".WithPusher` is replaced with `WithExporter` to provide consistent naming across project. (#1656) +- Added non-empty string check for trace `Attribute` keys. (#1659) +- Add `description` to SpanStatus only when `StatusCode` is set to error. (#1662) +- Jaeger exporter falls back to `resource.Default`'s `service.name` if the exported Span does not have one. (#1673) +- Jaeger exporter populates Jaeger's Span Process from Resource. (#1673) +- Renamed the `LabelSet` method of `"go.opentelemetry.io/otel/sdk/resource".Resource` to `Set`. (#1692) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/jaeger` package. (#1693) +- Changed `WithSDK` to `WithSDKOptions` to accept variadic arguments of `TracerProviderOption` type in `go.opentelemetry.io/otel/exporters/trace/zipkin` package. (#1693) + +### Removed + +- Removed `serviceName` parameter from Zipkin exporter and uses resource instead. (#1549) +- Removed `WithConfig` from tracer provider to avoid overriding configuration. (#1633) +- Removed the exported `SimpleSpanProcessor` and `BatchSpanProcessor` structs. + These are now returned as a SpanProcessor interface from their respective constructors. (#1638) +- Removed `WithRecord()` from `trace.SpanOption` when creating a span. (#1660) +- Removed setting status to `Error` while recording an error as a span event in `RecordError`. (#1663) +- Removed `jaeger.WithProcess` configuration option. (#1673) +- Removed `ApplyConfig` method from `"go.opentelemetry.io/otel/sdk/trace".TracerProvider` and the now unneeded `Config` struct. (#1693) + +### Fixed + +- Jaeger Exporter: Ensure mapping between OTEL and Jaeger span data complies with the specification. (#1626) +- `SamplingResult.TraceState` is correctly propagated to a newly created span's `SpanContext`. (#1655) +- The `otel-collector` example now correctly flushes metric events prior to shutting down the exporter. (#1678) +- Do not set span status message in `SpanStatusFromHTTPStatusCode` if it can be inferred from `http.status_code`. (#1681) +- Synchronization issues in global trace delegate implementation. (#1686) +- Reduced excess memory usage by global `TracerProvider`. (#1687) + +## [0.18.0] - 2021-03-03 + +### Added + +- Added `resource.Default()` for use with meter and tracer providers. (#1507) +- `AttributePerEventCountLimit` and `AttributePerLinkCountLimit` for `SpanLimits`. (#1535) +- Added `Keys()` method to `propagation.TextMapCarrier` and `propagation.HeaderCarrier` to adapt `http.Header` to this interface. (#1544) +- Added `code` attributes to `go.opentelemetry.io/otel/semconv` package. (#1558) +- Compatibility testing suite in the CI system for the following systems. (#1567) + | OS | Go Version | Architecture | + | ------- | ---------- | ------------ | + | Ubuntu | 1.15 | amd64 | + | Ubuntu | 1.14 | amd64 | + | Ubuntu | 1.15 | 386 | + | Ubuntu | 1.14 | 386 | + | MacOS | 1.15 | amd64 | + | MacOS | 1.14 | amd64 | + | Windows | 1.15 | amd64 | + | Windows | 1.14 | amd64 | + | Windows | 1.15 | 386 | + | Windows | 1.14 | 386 | + +### Changed + +- Replaced interface `oteltest.SpanRecorder` with its existing implementation + `StandardSpanRecorder`. (#1542) +- Default span limit values to 128. (#1535) +- Rename `MaxEventsPerSpan`, `MaxAttributesPerSpan` and `MaxLinksPerSpan` to `EventCountLimit`, `AttributeCountLimit` and `LinkCountLimit`, and move these fields into `SpanLimits`. (#1535) +- Renamed the `otel/label` package to `otel/attribute`. (#1541) +- Vendor the Jaeger exporter's dependency on Apache Thrift. (#1551) +- Parallelize the CI linting and testing. (#1567) +- Stagger timestamps in exact aggregator tests. (#1569) +- Changed all examples to use `WithBatchTimeout(5 * time.Second)` rather than `WithBatchTimeout(5)`. (#1621) +- Prevent end-users from implementing some interfaces (#1575) + + ``` + "otel/exporters/otlp/otlphttp".Option + "otel/exporters/stdout".Option + "otel/oteltest".Option + "otel/trace".TracerOption + "otel/trace".SpanOption + "otel/trace".EventOption + "otel/trace".LifeCycleOption + "otel/trace".InstrumentationOption + "otel/sdk/resource".Option + "otel/sdk/trace".ParentBasedSamplerOption + "otel/sdk/trace".ReadOnlySpan + "otel/sdk/trace".ReadWriteSpan + ``` + +### Removed + +- Removed attempt to resample spans upon changing the span name with `span.SetName()`. (#1545) +- The `test-benchmark` is no longer a dependency of the `precommit` make target. (#1567) +- Removed the `test-386` make target. + This was replaced with a full compatibility testing suite (i.e. multi OS/arch) in the CI system. (#1567) + +### Fixed + +- The sequential timing check of timestamps in the stdout exporter are now setup explicitly to be sequential (#1571). (#1572) +- Windows build of Jaeger tests now compiles with OS specific functions (#1576). (#1577) +- The sequential timing check of timestamps of go.opentelemetry.io/otel/sdk/metric/aggregator/lastvalue are now setup explicitly to be sequential (#1578). (#1579) +- Validate tracestate header keys with vendors according to the W3C TraceContext specification (#1475). (#1581) +- The OTLP exporter includes related labels for translations of a GaugeArray (#1563). (#1570) + +## [0.17.0] - 2021-02-12 + +### Changed + +- Rename project default branch from `master` to `main`. (#1505) +- Reverse order in which `Resource` attributes are merged, per change in spec. (#1501) +- Add tooling to maintain "replace" directives in go.mod files automatically. (#1528) +- Create new modules: otel/metric, otel/trace, otel/oteltest, otel/sdk/export/metric, otel/sdk/metric (#1528) +- Move metric-related public global APIs from otel to otel/metric/global. (#1528) + +## Fixed + +- Fixed otlpgrpc reconnection issue. +- The example code in the README.md of `go.opentelemetry.io/otel/exporters/otlp` is moved to a compiled example test and used the new `WithAddress` instead of `WithEndpoint`. (#1513) +- The otel-collector example now uses the default OTLP receiver port of the collector. + +## [0.16.0] - 2021-01-13 + +### Added + +- Add the `ReadOnlySpan` and `ReadWriteSpan` interfaces to provide better control for accessing span data. (#1360) +- `NewGRPCDriver` function returns a `ProtocolDriver` that maintains a single gRPC connection to the collector. (#1369) +- Added documentation about the project's versioning policy. (#1388) +- Added `NewSplitDriver` for OTLP exporter that allows sending traces and metrics to different endpoints. (#1418) +- Added codeql worfklow to GitHub Actions (#1428) +- Added Gosec workflow to GitHub Actions (#1429) +- Add new HTTP driver for OTLP exporter in `exporters/otlp/otlphttp`. Currently it only supports the binary protobuf payloads. (#1420) +- Add an OpenCensus exporter bridge. (#1444) + +### Changed + +- Rename `internal/testing` to `internal/internaltest`. (#1449) +- Rename `export.SpanData` to `export.SpanSnapshot` and use it only for exporting spans. (#1360) +- Store the parent's full `SpanContext` rather than just its span ID in the `span` struct. (#1360) +- Improve span duration accuracy. (#1360) +- Migrated CI/CD from CircleCI to GitHub Actions (#1382) +- Remove duplicate checkout from GitHub Actions workflow (#1407) +- Metric `array` aggregator renamed `exact` to match its `aggregation.Kind` (#1412) +- Metric `exact` aggregator includes per-point timestamps (#1412) +- Metric stdout exporter uses MinMaxSumCount aggregator for ValueRecorder instruments (#1412) +- `NewExporter` from `exporters/otlp` now takes a `ProtocolDriver` as a parameter. (#1369) +- Many OTLP Exporter options became gRPC ProtocolDriver options. (#1369) +- Unify endpoint API that related to OTel exporter. (#1401) +- Optimize metric histogram aggregator to re-use its slice of buckets. (#1435) +- Metric aggregator Count() and histogram Bucket.Counts are consistently `uint64`. (1430) +- Histogram aggregator accepts functional options, uses default boundaries if none given. (#1434) +- `SamplingResult` now passed a `Tracestate` from the parent `SpanContext` (#1432) +- Moved gRPC driver for OTLP exporter to `exporters/otlp/otlpgrpc`. (#1420) +- The `TraceContext` propagator now correctly propagates `TraceState` through the `SpanContext`. (#1447) +- Metric Push and Pull Controller components are combined into a single "basic" Controller: + - `WithExporter()` and `Start()` to configure Push behavior + - `Start()` is optional; use `Collect()` and `ForEach()` for Pull behavior + - `Start()` and `Stop()` accept Context. (#1378) +- The `Event` type is moved from the `otel/sdk/export/trace` package to the `otel/trace` API package. (#1452) + +### Removed + +- Remove `errUninitializedSpan` as its only usage is now obsolete. (#1360) +- Remove Metric export functionality related to quantiles and summary data points: this is not specified (#1412) +- Remove DDSketch metric aggregator; our intention is to re-introduce this as an option of the histogram aggregator after [new OTLP histogram data types](https://github.com/open-telemetry/opentelemetry-proto/pull/226) are released (#1412) + +### Fixed + +- `BatchSpanProcessor.Shutdown()` will now shutdown underlying `export.SpanExporter`. (#1443) + +## [0.15.0] - 2020-12-10 + +### Added + +- The `WithIDGenerator` `TracerProviderOption` is added to the `go.opentelemetry.io/otel/trace` package to configure an `IDGenerator` for the `TracerProvider`. (#1363) + +### Changed + +- The Zipkin exporter now uses the Span status code to determine. (#1328) +- `NewExporter` and `Start` functions in `go.opentelemetry.io/otel/exporters/otlp` now receive `context.Context` as a first parameter. (#1357) +- Move the OpenCensus example into `example` directory. (#1359) +- Moved the SDK's `internal.IDGenerator` interface in to the `sdk/trace` package to enable support for externally-defined ID generators. (#1363) +- Bump `github.com/google/go-cmp` from 0.5.3 to 0.5.4 (#1374) +- Bump `github.com/golangci/golangci-lint` in `/internal/tools` (#1375) + +### Fixed + +- Metric SDK `SumObserver` and `UpDownSumObserver` instruments correctness fixes. (#1381) + +## [0.14.0] - 2020-11-19 + +### Added + +- An `EventOption` and the related `NewEventConfig` function are added to the `go.opentelemetry.io/otel` package to configure Span events. (#1254) +- A `TextMapPropagator` and associated `TextMapCarrier` are added to the `go.opentelemetry.io/otel/oteltest` package to test `TextMap` type propagators and their use. (#1259) +- `SpanContextFromContext` returns `SpanContext` from context. (#1255) +- `TraceState` has been added to `SpanContext`. (#1340) +- `DeploymentEnvironmentKey` added to `go.opentelemetry.io/otel/semconv` package. (#1323) +- Add an OpenCensus to OpenTelemetry tracing bridge. (#1305) +- Add a parent context argument to `SpanProcessor.OnStart` to follow the specification. (#1333) +- Add missing tests for `sdk/trace/attributes_map.go`. (#1337) + +### Changed + +- Move the `go.opentelemetry.io/otel/api/trace` package into `go.opentelemetry.io/otel/trace` with the following changes. (#1229) (#1307) + - `ID` has been renamed to `TraceID`. + - `IDFromHex` has been renamed to `TraceIDFromHex`. + - `EmptySpanContext` is removed. +- Move the `go.opentelemetry.io/otel/api/trace/tracetest` package into `go.opentelemetry.io/otel/oteltest`. (#1229) +- OTLP Exporter updates: + - supports OTLP v0.6.0 (#1230, #1354) + - supports configurable aggregation temporality (default: Cumulative, optional: Stateless). (#1296) +- The Sampler is now called on local child spans. (#1233) +- The `Kind` type from the `go.opentelemetry.io/otel/api/metric` package was renamed to `InstrumentKind` to more specifically describe what it is and avoid semantic ambiguity. (#1240) +- The `MetricKind` method of the `Descriptor` type in the `go.opentelemetry.io/otel/api/metric` package was renamed to `Descriptor.InstrumentKind`. + This matches the returned type and fixes misuse of the term metric. (#1240) +- Move test harness from the `go.opentelemetry.io/otel/api/apitest` package into `go.opentelemetry.io/otel/oteltest`. (#1241) +- Move the `go.opentelemetry.io/otel/api/metric/metrictest` package into `go.opentelemetry.io/oteltest` as part of #964. (#1252) +- Move the `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric` as part of #1303. (#1321) +- Move the `go.opentelemetry.io/otel/api/metric/registry` package into `go.opentelemetry.io/otel/metric/registry` as a part of #1303. (#1316) +- Move the `Number` type (together with related functions) from `go.opentelemetry.io/otel/api/metric` package into `go.opentelemetry.io/otel/metric/number` as a part of #1303. (#1316) +- The function signature of the Span `AddEvent` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required name and a variable number of `EventOption`s. (#1254) +- The function signature of the Span `RecordError` method in `go.opentelemetry.io/otel` is updated to no longer take an unused context and instead take a required error value and a variable number of `EventOption`s. (#1254) +- Move the `go.opentelemetry.io/otel/api/global` package to `go.opentelemetry.io/otel`. (#1262) (#1330) +- Move the `Version` function from `go.opentelemetry.io/otel/sdk` to `go.opentelemetry.io/otel`. (#1330) +- Rename correlation context header from `"otcorrelations"` to `"baggage"` to match the OpenTelemetry specification. (#1267) +- Fix `Code.UnmarshalJSON` to work with valid JSON only. (#1276) +- The `resource.New()` method changes signature to support builtin attributes and functional options, including `telemetry.sdk.*` and + `host.name` semantic conventions; the former method is renamed `resource.NewWithAttributes`. (#1235) +- The Prometheus exporter now exports non-monotonic counters (i.e. `UpDownCounter`s) as gauges. (#1210) +- Correct the `Span.End` method documentation in the `otel` API to state updates are not allowed on a span after it has ended. (#1310) +- Updated span collection limits for attribute, event and link counts to 1000 (#1318) +- Renamed `semconv.HTTPUrlKey` to `semconv.HTTPURLKey`. (#1338) + +### Removed + +- The `ErrInvalidHexID`, `ErrInvalidTraceIDLength`, `ErrInvalidSpanIDLength`, `ErrInvalidSpanIDLength`, or `ErrNilSpanID` from the `go.opentelemetry.io/otel` package are unexported now. (#1243) +- The `AddEventWithTimestamp` method on the `Span` interface in `go.opentelemetry.io/otel` is removed due to its redundancy. + It is replaced by using the `AddEvent` method with a `WithTimestamp` option. (#1254) +- The `MockSpan` and `MockTracer` types are removed from `go.opentelemetry.io/otel/oteltest`. + `Tracer` and `Span` from the same module should be used in their place instead. (#1306) +- `WorkerCount` option is removed from `go.opentelemetry.io/otel/exporters/otlp`. (#1350) +- Remove the following labels types: INT32, UINT32, UINT64 and FLOAT32. (#1314) + +### Fixed + +- Rename `MergeItererator` to `MergeIterator` in the `go.opentelemetry.io/otel/label` package. (#1244) +- The `go.opentelemetry.io/otel/api/global` packages global TextMapPropagator now delegates functionality to a globally set delegate for all previously returned propagators. (#1258) +- Fix condition in `label.Any`. (#1299) +- Fix global `TracerProvider` to pass options to its configured provider. (#1329) +- Fix missing handler for `ExactKind` aggregator in OTLP metrics transformer (#1309) + +## [0.13.0] - 2020-10-08 + +### Added + +- OTLP Metric exporter supports Histogram aggregation. (#1209) +- The `Code` struct from the `go.opentelemetry.io/otel/codes` package now supports JSON marshaling and unmarshaling as well as implements the `Stringer` interface. (#1214) +- A Baggage API to implement the OpenTelemetry specification. (#1217) +- Add Shutdown method to sdk/trace/provider, shutdown processors in the order they were registered. (#1227) + +### Changed + +- Set default propagator to no-op propagator. (#1184) +- The `HTTPSupplier`, `HTTPExtractor`, `HTTPInjector`, and `HTTPPropagator` from the `go.opentelemetry.io/otel/api/propagation` package were replaced with unified `TextMapCarrier` and `TextMapPropagator` in the `go.opentelemetry.io/otel/propagation` package. (#1212) (#1325) +- The `New` function from the `go.opentelemetry.io/otel/api/propagation` package was replaced with `NewCompositeTextMapPropagator` in the `go.opentelemetry.io/otel` package. (#1212) +- The status codes of the `go.opentelemetry.io/otel/codes` package have been updated to match the latest OpenTelemetry specification. + They now are `Unset`, `Error`, and `Ok`. + They no longer track the gRPC codes. (#1214) +- The `StatusCode` field of the `SpanData` struct in the `go.opentelemetry.io/otel/sdk/export/trace` package now uses the codes package from this package instead of the gRPC project. (#1214) +- Move the `go.opentelemetry.io/otel/api/baggage` package into `go.opentelemetry.io/otel/baggage`. (#1217) (#1325) +- A `Shutdown` method of `SpanProcessor` and all its implementations receives a context and returns an error. (#1264) + +### Fixed + +- Copies of data from arrays and slices passed to `go.opentelemetry.io/otel/label.ArrayValue()` are now used in the returned `Value` instead of using the mutable data itself. (#1226) + +### Removed + +- The `ExtractHTTP` and `InjectHTTP` functions from the `go.opentelemetry.io/otel/api/propagation` package were removed. (#1212) +- The `Propagators` interface from the `go.opentelemetry.io/otel/api/propagation` package was removed to conform to the OpenTelemetry specification. + The explicit `TextMapPropagator` type can be used in its place as this is the `Propagator` type the specification defines. (#1212) +- The `SetAttribute` method of the `Span` from the `go.opentelemetry.io/otel/api/trace` package was removed given its redundancy with the `SetAttributes` method. (#1216) +- The internal implementation of Baggage storage is removed in favor of using the new Baggage API functionality. (#1217) +- Remove duplicate hostname key `HostHostNameKey` in Resource semantic conventions. (#1219) +- Nested array/slice support has been removed. (#1226) + +## [0.12.0] - 2020-09-24 + +### Added + +- A `SpanConfigure` function in `go.opentelemetry.io/otel/api/trace` to create a new `SpanConfig` from `SpanOption`s. (#1108) +- In the `go.opentelemetry.io/otel/api/trace` package, `NewTracerConfig` was added to construct new `TracerConfig`s. + This addition was made to conform with our project option conventions. (#1155) +- Instrumentation library information was added to the Zipkin exporter. (#1119) +- The `SpanProcessor` interface now has a `ForceFlush()` method. (#1166) +- More semantic conventions for k8s as resource attributes. (#1167) + +### Changed + +- Add reconnecting udp connection type to Jaeger exporter. + This change adds a new optional implementation of the udp conn interface used to detect changes to an agent's host dns record. + It then adopts the new destination address to ensure the exporter doesn't get stuck. This change was ported from jaegertracing/jaeger-client-go#520. (#1063) +- Replace `StartOption` and `EndOption` in `go.opentelemetry.io/otel/api/trace` with `SpanOption`. + This change is matched by replacing the `StartConfig` and `EndConfig` with a unified `SpanConfig`. (#1108) +- Replace the `LinkedTo` span option in `go.opentelemetry.io/otel/api/trace` with `WithLinks`. + This is be more consistent with our other option patterns, i.e. passing the item to be configured directly instead of its component parts, and provides a cleaner function signature. (#1108) +- The `go.opentelemetry.io/otel/api/trace` `TracerOption` was changed to an interface to conform to project option conventions. (#1109) +- Move the `B3` and `TraceContext` from within the `go.opentelemetry.io/otel/api/trace` package to their own `go.opentelemetry.io/otel/propagators` package. + This removal of the propagators is reflective of the OpenTelemetry specification for these propagators as well as cleans up the `go.opentelemetry.io/otel/api/trace` API. (#1118) +- Rename Jaeger tags used for instrumentation library information to reflect changes in OpenTelemetry specification. (#1119) +- Rename `ProbabilitySampler` to `TraceIDRatioBased` and change semantics to ignore parent span sampling status. (#1115) +- Move `tools` package under `internal`. (#1141) +- Move `go.opentelemetry.io/otel/api/correlation` package to `go.opentelemetry.io/otel/api/baggage`. (#1142) + The `correlation.CorrelationContext` propagator has been renamed `baggage.Baggage`. Other exported functions and types are unchanged. +- Rename `ParentOrElse` sampler to `ParentBased` and allow setting samplers depending on parent span. (#1153) +- In the `go.opentelemetry.io/otel/api/trace` package, `SpanConfigure` was renamed to `NewSpanConfig`. (#1155) +- Change `dependabot.yml` to add a `Skip Changelog` label to dependabot-sourced PRs. (#1161) +- The [configuration style guide](https://github.com/open-telemetry/opentelemetry-go/blob/master/CONTRIBUTING.md#config) has been updated to + recommend the use of `newConfig()` instead of `configure()`. (#1163) +- The `otlp.Config` type has been unexported and changed to `otlp.config`, along with its initializer. (#1163) +- Ensure exported interface types include parameter names and update the + Style Guide to reflect this styling rule. (#1172) +- Don't consider unset environment variable for resource detection to be an error. (#1170) +- Rename `go.opentelemetry.io/otel/api/metric.ConfigureInstrument` to `NewInstrumentConfig` and + `go.opentelemetry.io/otel/api/metric.ConfigureMeter` to `NewMeterConfig`. +- ValueObserver instruments use LastValue aggregator by default. (#1165) +- OTLP Metric exporter supports LastValue aggregation. (#1165) +- Move the `go.opentelemetry.io/otel/api/unit` package to `go.opentelemetry.io/otel/unit`. (#1185) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NoopProvider` to `NoopMeterProvider` in the `go.opentelemetry.io/otel/api/metric` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metric/metrictest` package. (#1190) +- Rename `Provider` to `MeterProvider` in the `go.opentelemetry.io/otel/api/metric/registry` package. (#1190) +- Rename `NewProvider` to `NewMeterProvider` in the `go.opentelemetry.io/otel/api/metri/registryc` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `NoopProvider` to `NoopTracerProvider` in the `go.opentelemetry.io/otel/api/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/api/trace/tracetest` package. (#1190) +- Rename `WrapperProvider` to `WrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `NewWrapperProvider` to `NewWrapperTracerProvider` in the `go.opentelemetry.io/otel/bridge/opentracing` package. (#1190) +- Rename `Provider` method of the pull controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/pull` package. (#1190) +- Rename `Provider` method of the push controller to `MeterProvider` in the `go.opentelemetry.io/otel/sdk/metric/controller/push` package. (#1190) +- Rename `ProviderOptions` to `TracerProviderConfig` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `ProviderOption` to `TracerProviderOption` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `Provider` to `TracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Rename `NewProvider` to `NewTracerProvider` in the `go.opentelemetry.io/otel/sdk/trace` package. (#1190) +- Renamed `SamplingDecision` values to comply with OpenTelemetry specification change. (#1192) +- Renamed Zipkin attribute names from `ot.status_code & ot.status_description` to `otel.status_code & otel.status_description`. (#1201) +- The default SDK now invokes registered `SpanProcessor`s in the order they were registered with the `TracerProvider`. (#1195) +- Add test of spans being processed by the `SpanProcessor`s in the order they were registered. (#1203) + +### Removed + +- Remove the B3 propagator from `go.opentelemetry.io/otel/propagators`. It is now located in the + `go.opentelemetry.io/contrib/propagators/` module. (#1191) +- Remove the semantic convention for HTTP status text, `HTTPStatusTextKey` from package `go.opentelemetry.io/otel/semconv`. (#1194) + +### Fixed + +- Zipkin example no longer mentions `ParentSampler`, corrected to `ParentBased`. (#1171) +- Fix missing shutdown processor in otel-collector example. (#1186) +- Fix missing shutdown processor in basic and namedtracer examples. (#1197) + +## [0.11.0] - 2020-08-24 + +### Added + +- Support for exporting array-valued attributes via OTLP. (#992) +- `Noop` and `InMemory` `SpanBatcher` implementations to help with testing integrations. (#994) +- Support for filtering metric label sets. (#1047) +- A dimensionality-reducing metric Processor. (#1057) +- Integration tests for more OTel Collector Attribute types. (#1062) +- A new `WithSpanProcessor` `ProviderOption` is added to the `go.opentelemetry.io/otel/sdk/trace` package to create a `Provider` and automatically register the `SpanProcessor`. (#1078) + +### Changed + +- Rename `sdk/metric/processor/test` to `sdk/metric/processor/processortest`. (#1049) +- Rename `sdk/metric/controller/test` to `sdk/metric/controller/controllertest`. (#1049) +- Rename `api/testharness` to `api/apitest`. (#1049) +- Rename `api/trace/testtrace` to `api/trace/tracetest`. (#1049) +- Change Metric Processor to merge multiple observations. (#1024) +- The `go.opentelemetry.io/otel/bridge/opentracing` bridge package has been made into its own module. + This removes the package dependencies of this bridge from the rest of the OpenTelemetry based project. (#1038) +- Renamed `go.opentelemetry.io/otel/api/standard` package to `go.opentelemetry.io/otel/semconv` to avoid the ambiguous and generic name `standard` and better describe the package as containing OpenTelemetry semantic conventions. (#1016) +- The environment variable used for resource detection has been changed from `OTEL_RESOURCE_LABELS` to `OTEL_RESOURCE_ATTRIBUTES` (#1042) +- Replace `WithSyncer` with `WithBatcher` in examples. (#1044) +- Replace the `google.golang.org/grpc/codes` dependency in the API with an equivalent `go.opentelemetry.io/otel/codes` package. (#1046) +- Merge the `go.opentelemetry.io/otel/api/label` and `go.opentelemetry.io/otel/api/kv` into the new `go.opentelemetry.io/otel/label` package. (#1060) +- Unify Callback Function Naming. + Rename `*Callback` with `*Func`. (#1061) +- CI builds validate against last two versions of Go, dropping 1.13 and adding 1.15. (#1064) +- The `go.opentelemetry.io/otel/sdk/export/trace` interfaces `SpanSyncer` and `SpanBatcher` have been replaced with a specification compliant `Exporter` interface. + This interface still supports the export of `SpanData`, but only as a slice. + Implementation are also required now to return any error from `ExportSpans` if one occurs as well as implement a `Shutdown` method for exporter clean-up. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewBatchSpanProcessor` function no longer returns an error. + If a `nil` exporter is passed as an argument to this function, instead of it returning an error, it now returns a `BatchSpanProcessor` that handles the export of `SpanData` by not taking any action. (#1078) +- The `go.opentelemetry.io/otel/sdk/trace` `NewProvider` function to create a `Provider` no longer returns an error, instead only a `*Provider`. + This change is related to `NewBatchSpanProcessor` not returning an error which was the only error this function would return. (#1078) + +### Removed + +- Duplicate, unused API sampler interface. (#999) + Use the [`Sampler` interface](https://github.com/open-telemetry/opentelemetry-go/blob/v0.11.0/sdk/trace/sampling.go) provided by the SDK instead. +- The `grpctrace` instrumentation was moved to the `go.opentelemetry.io/contrib` repository and out of this repository. + This move includes moving the `grpc` example to the `go.opentelemetry.io/contrib` as well. (#1027) +- The `WithSpan` method of the `Tracer` interface. + The functionality this method provided was limited compared to what a user can provide themselves. + It was removed with the understanding that if there is sufficient user need it can be added back based on actual user usage. (#1043) +- The `RegisterSpanProcessor` and `UnregisterSpanProcessor` functions. + These were holdovers from an approach prior to the TracerProvider design. They were not used anymore. (#1077) +- The `oterror` package. (#1026) +- The `othttp` and `httptrace` instrumentations were moved to `go.opentelemetry.io/contrib`. (#1032) + +### Fixed + +- The `semconv.HTTPServerMetricAttributesFromHTTPRequest()` function no longer generates the high-cardinality `http.request.content.length` label. (#1031) +- Correct instrumentation version tag in Jaeger exporter. (#1037) +- The SDK span will now set an error event if the `End` method is called during a panic (i.e. it was deferred). (#1043) +- Move internally generated protobuf code from the `go.opentelemetry.io/otel` to the OTLP exporter to reduce dependency overhead. (#1050) +- The `otel-collector` example referenced outdated collector processors. (#1006) + +## [0.10.0] - 2020-07-29 + +This release migrates the default OpenTelemetry SDK into its own Go module, decoupling the SDK from the API and reducing dependencies for instrumentation packages. + +### Added + +- The Zipkin exporter now has `NewExportPipeline` and `InstallNewPipeline` constructor functions to match the common pattern. + These function build a new exporter with default SDK options and register the exporter with the `global` package respectively. (#944) +- Add propagator option for gRPC instrumentation. (#986) +- The `testtrace` package now tracks the `trace.SpanKind` for each span. (#987) + +### Changed + +- Replace the `RegisterGlobal` `Option` in the Jaeger exporter with an `InstallNewPipeline` constructor function. + This matches the other exporter constructor patterns and will register a new exporter after building it with default configuration. (#944) +- The trace (`go.opentelemetry.io/otel/exporters/trace/stdout`) and metric (`go.opentelemetry.io/otel/exporters/metric/stdout`) `stdout` exporters are now merged into a single exporter at `go.opentelemetry.io/otel/exporters/stdout`. + This new exporter was made into its own Go module to follow the pattern of all exporters and decouple it from the `go.opentelemetry.io/otel` module. (#956, #963) +- Move the `go.opentelemetry.io/otel/exporters/test` test package to `go.opentelemetry.io/otel/sdk/export/metric/metrictest`. (#962) +- The `go.opentelemetry.io/otel/api/kv/value` package was merged into the parent `go.opentelemetry.io/otel/api/kv` package. (#968) + - `value.Bool` was replaced with `kv.BoolValue`. + - `value.Int64` was replaced with `kv.Int64Value`. + - `value.Uint64` was replaced with `kv.Uint64Value`. + - `value.Float64` was replaced with `kv.Float64Value`. + - `value.Int32` was replaced with `kv.Int32Value`. + - `value.Uint32` was replaced with `kv.Uint32Value`. + - `value.Float32` was replaced with `kv.Float32Value`. + - `value.String` was replaced with `kv.StringValue`. + - `value.Int` was replaced with `kv.IntValue`. + - `value.Uint` was replaced with `kv.UintValue`. + - `value.Array` was replaced with `kv.ArrayValue`. +- Rename `Infer` to `Any` in the `go.opentelemetry.io/otel/api/kv` package. (#972) +- Change `othttp` to use the `httpsnoop` package to wrap the `ResponseWriter` so that optional interfaces (`http.Hijacker`, `http.Flusher`, etc.) that are implemented by the original `ResponseWriter`are also implemented by the wrapped `ResponseWriter`. (#979) +- Rename `go.opentelemetry.io/otel/sdk/metric/aggregator/test` package to `go.opentelemetry.io/otel/sdk/metric/aggregator/aggregatortest`. (#980) +- Make the SDK into its own Go module called `go.opentelemetry.io/otel/sdk`. (#985) +- Changed the default trace `Sampler` from `AlwaysOn` to `ParentOrElse(AlwaysOn)`. (#989) + +### Removed + +- The `IndexedAttribute` function from the `go.opentelemetry.io/otel/api/label` package was removed in favor of `IndexedLabel` which it was synonymous with. (#970) + +### Fixed + +- Bump github.com/golangci/golangci-lint from 1.28.3 to 1.29.0 in /tools. (#953) +- Bump github.com/google/go-cmp from 0.5.0 to 0.5.1. (#957) +- Use `global.Handle` for span export errors in the OTLP exporter. (#946) +- Correct Go language formatting in the README documentation. (#961) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/api` package. (#977) +- Remove default SDK dependencies from the `go.opentelemetry.io/otel/instrumentation` package. (#983) +- Move documented examples for `go.opentelemetry.io/otel/instrumentation/grpctrace` interceptors into Go example tests. (#984) + +## [0.9.0] - 2020-07-20 + +### Added + +- A new Resource Detector interface is included to allow resources to be automatically detected and included. (#939) +- A Detector to automatically detect resources from an environment variable. (#939) +- Github action to generate protobuf Go bindings locally in `internal/opentelemetry-proto-gen`. (#938) +- OTLP .proto files from `open-telemetry/opentelemetry-proto` imported as a git submodule under `internal/opentelemetry-proto`. + References to `github.com/open-telemetry/opentelemetry-proto` changed to `go.opentelemetry.io/otel/internal/opentelemetry-proto-gen`. (#942) + +### Changed + +- Non-nil value `struct`s for key-value pairs will be marshalled using JSON rather than `Sprintf`. (#948) + +### Removed + +- Removed dependency on `github.com/open-telemetry/opentelemetry-collector`. (#943) + +## [0.8.0] - 2020-07-09 + +### Added + +- The `B3Encoding` type to represent the B3 encoding(s) the B3 propagator can inject. + A value for HTTP supported encodings (Multiple Header: `MultipleHeader`, Single Header: `SingleHeader`) are included. (#882) +- The `FlagsDeferred` trace flag to indicate if the trace sampling decision has been deferred. (#882) +- The `FlagsDebug` trace flag to indicate if the trace is a debug trace. (#882) +- Add `peer.service` semantic attribute. (#898) +- Add database-specific semantic attributes. (#899) +- Add semantic convention for `faas.coldstart` and `container.id`. (#909) +- Add http content size semantic conventions. (#905) +- Include `http.request_content_length` in HTTP request basic attributes. (#905) +- Add semantic conventions for operating system process resource attribute keys. (#919) +- The Jaeger exporter now has a `WithBatchMaxCount` option to specify the maximum number of spans sent in a batch. (#931) + +### Changed + +- Update `CONTRIBUTING.md` to ask for updates to `CHANGELOG.md` with each pull request. (#879) +- Use lowercase header names for B3 Multiple Headers. (#881) +- The B3 propagator `SingleHeader` field has been replaced with `InjectEncoding`. + This new field can be set to combinations of the `B3Encoding` bitmasks and will inject trace information in these encodings. + If no encoding is set, the propagator will default to `MultipleHeader` encoding. (#882) +- The B3 propagator now extracts from either HTTP encoding of B3 (Single Header or Multiple Header) based on what is contained in the header. + Preference is given to Single Header encoding with Multiple Header being the fallback if Single Header is not found or is invalid. + This behavior change is made to dynamically support all correctly encoded traces received instead of having to guess the expected encoding prior to receiving. (#882) +- Extend semantic conventions for RPC. (#900) +- To match constant naming conventions in the `api/standard` package, the `FaaS*` key names are appended with a suffix of `Key`. (#920) + - `"api/standard".FaaSName` -> `FaaSNameKey` + - `"api/standard".FaaSID` -> `FaaSIDKey` + - `"api/standard".FaaSVersion` -> `FaaSVersionKey` + - `"api/standard".FaaSInstance` -> `FaaSInstanceKey` + +### Removed + +- The `FlagsUnused` trace flag is removed. + The purpose of this flag was to act as the inverse of `FlagsSampled`, the inverse of `FlagsSampled` is used instead. (#882) +- The B3 header constants (`B3SingleHeader`, `B3DebugFlagHeader`, `B3TraceIDHeader`, `B3SpanIDHeader`, `B3SampledHeader`, `B3ParentSpanIDHeader`) are removed. + If B3 header keys are needed [the authoritative OpenZipkin package constants](https://pkg.go.dev/github.com/openzipkin/zipkin-go@v0.2.2/propagation/b3?tab=doc#pkg-constants) should be used instead. (#882) + +### Fixed + +- The B3 Single Header name is now correctly `b3` instead of the previous `X-B3`. (#881) +- The B3 propagator now correctly supports sampling only values (`b3: 0`, `b3: 1`, or `b3: d`) for a Single B3 Header. (#882) +- The B3 propagator now propagates the debug flag. + This removes the behavior of changing the debug flag into a set sampling bit. + Instead, this now follow the B3 specification and omits the `X-B3-Sampling` header. (#882) +- The B3 propagator now tracks "unset" sampling state (meaning "defer the decision") and does not set the `X-B3-Sampling` header when injecting. (#882) +- Bump github.com/itchyny/gojq from 0.10.3 to 0.10.4 in /tools. (#883) +- Bump github.com/opentracing/opentracing-go from v1.1.1-0.20190913142402-a7454ce5950e to v1.2.0. (#885) +- The tracing time conversion for OTLP spans is now correctly set to `UnixNano`. (#896) +- Ensure span status is not set to `Unknown` when no HTTP status code is provided as it is assumed to be `200 OK`. (#908) +- Ensure `httptrace.clientTracer` closes `http.headers` span. (#912) +- Prometheus exporter will not apply stale updates or forget inactive metrics. (#903) +- Add test for api.standard `HTTPClientAttributesFromHTTPRequest`. (#905) +- Bump github.com/golangci/golangci-lint from 1.27.0 to 1.28.1 in /tools. (#901, #913) +- Update otel-colector example to use the v0.5.0 collector. (#915) +- The `grpctrace` instrumentation uses a span name conforming to the OpenTelemetry semantic conventions (does not contain a leading slash (`/`)). (#922) +- The `grpctrace` instrumentation includes an `rpc.method` attribute now set to the gRPC method name. (#900, #922) +- The `grpctrace` instrumentation `rpc.service` attribute now contains the package name if one exists. + This is in accordance with OpenTelemetry semantic conventions. (#922) +- Correlation Context extractor will no longer insert an empty map into the returned context when no valid values are extracted. (#923) +- Bump google.golang.org/api from 0.28.0 to 0.29.0 in /exporters/trace/jaeger. (#925) +- Bump github.com/itchyny/gojq from 0.10.4 to 0.11.0 in /tools. (#926) +- Bump github.com/golangci/golangci-lint from 1.28.1 to 1.28.2 in /tools. (#930) + +## [0.7.0] - 2020-06-26 + +This release implements the v0.5.0 version of the OpenTelemetry specification. + +### Added + +- The othttp instrumentation now includes default metrics. (#861) +- This CHANGELOG file to track all changes in the project going forward. +- Support for array type attributes. (#798) +- Apply transitive dependabot go.mod dependency updates as part of a new automatic Github workflow. (#844) +- Timestamps are now passed to exporters for each export. (#835) +- Add new `Accumulation` type to metric SDK to transport telemetry from `Accumulator`s to `Processor`s. + This replaces the prior `Record` `struct` use for this purpose. (#835) +- New dependabot integration to automate package upgrades. (#814) +- `Meter` and `Tracer` implementations accept instrumentation version version as an optional argument. + This instrumentation version is passed on to exporters. (#811) (#805) (#802) +- The OTLP exporter includes the instrumentation version in telemetry it exports. (#811) +- Environment variables for Jaeger exporter are supported. (#796) +- New `aggregation.Kind` in the export metric API. (#808) +- New example that uses OTLP and the collector. (#790) +- Handle errors in the span `SetName` during span initialization. (#791) +- Default service config to enable retries for retry-able failed requests in the OTLP exporter and an option to override this default. (#777) +- New `go.opentelemetry.io/otel/api/oterror` package to uniformly support error handling and definitions for the project. (#778) +- New `global` default implementation of the `go.opentelemetry.io/otel/api/oterror.Handler` interface to be used to handle errors prior to an user defined `Handler`. + There is also functionality for the user to register their `Handler` as well as a convenience function `Handle` to handle an error with this global `Handler`(#778) +- Options to specify propagators for httptrace and grpctrace instrumentation. (#784) +- The required `application/json` header for the Zipkin exporter is included in all exports. (#774) +- Integrate HTTP semantics helpers from the contrib repository into the `api/standard` package. #769 + +### Changed + +- Rename `Integrator` to `Processor` in the metric SDK. (#863) +- Rename `AggregationSelector` to `AggregatorSelector`. (#859) +- Rename `SynchronizedCopy` to `SynchronizedMove`. (#858) +- Rename `simple` integrator to `basic` integrator. (#857) +- Merge otlp collector examples. (#841) +- Change the metric SDK to support cumulative, delta, and pass-through exporters directly. + With these changes, cumulative and delta specific exporters are able to request the correct kind of aggregation from the SDK. (#840) +- The `Aggregator.Checkpoint` API is renamed to `SynchronizedCopy` and adds an argument, a different `Aggregator` into which the copy is stored. (#812) +- The `export.Aggregator` contract is that `Update()` and `SynchronizedCopy()` are synchronized with each other. + All the aggregation interfaces (`Sum`, `LastValue`, ...) are not meant to be synchronized, as the caller is expected to synchronize aggregators at a higher level after the `Accumulator`. + Some of the `Aggregators` used unnecessary locking and that has been cleaned up. (#812) +- Use of `metric.Number` was replaced by `int64` now that we use `sync.Mutex` in the `MinMaxSumCount` and `Histogram` `Aggregators`. (#812) +- Replace `AlwaysParentSample` with `ParentSample(fallback)` to match the OpenTelemetry v0.5.0 specification. (#810) +- Rename `sdk/export/metric/aggregator` to `sdk/export/metric/aggregation`. #808 +- Send configured headers with every request in the OTLP exporter, instead of just on connection creation. (#806) +- Update error handling for any one off error handlers, replacing, instead, with the `global.Handle` function. (#791) +- Rename `plugin` directory to `instrumentation` to match the OpenTelemetry specification. (#779) +- Makes the argument order to Histogram and DDSketch `New()` consistent. (#781) + +### Removed + +- `Uint64NumberKind` and related functions from the API. (#864) +- Context arguments from `Aggregator.Checkpoint` and `Integrator.Process` as they were unused. (#803) +- `SpanID` is no longer included in parameters for sampling decision to match the OpenTelemetry specification. (#775) + +### Fixed + +- Upgrade OTLP exporter to opentelemetry-proto matching the opentelemetry-collector v0.4.0 release. (#866) +- Allow changes to `go.sum` and `go.mod` when running dependabot tidy-up. (#871) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1. (#824) +- Bump github.com/prometheus/client_golang from 1.7.0 to 1.7.1 in /exporters/metric/prometheus. (#867) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/jaeger. (#853) +- Bump google.golang.org/grpc from 1.29.1 to 1.30.0 in /exporters/trace/zipkin. (#854) +- Bumps github.com/golang/protobuf from 1.3.2 to 1.4.2 (#848) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/otlp (#817) +- Bump github.com/golangci/golangci-lint from 1.25.1 to 1.27.0 in /tools (#828) +- Bump github.com/prometheus/client_golang from 1.5.0 to 1.7.0 in /exporters/metric/prometheus (#838) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/jaeger (#829) +- Bump github.com/benbjohnson/clock from 1.0.0 to 1.0.3 (#815) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/trace/zipkin (#823) +- Bump github.com/itchyny/gojq from 0.10.1 to 0.10.3 in /tools (#830) +- Bump github.com/stretchr/testify from 1.4.0 to 1.6.1 in /exporters/metric/prometheus (#822) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/zipkin (#820) +- Bump google.golang.org/grpc from 1.27.1 to 1.29.1 in /exporters/trace/jaeger (#831) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 (#836) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/trace/jaeger (#837) +- Bump github.com/google/go-cmp from 0.4.0 to 0.5.0 in /exporters/otlp (#839) +- Bump google.golang.org/api from 0.20.0 to 0.28.0 in /exporters/trace/jaeger (#843) +- Set span status from HTTP status code in the othttp instrumentation. (#832) +- Fixed typo in push controller comment. (#834) +- The `Aggregator` testing has been updated and cleaned. (#812) +- `metric.Number(0)` expressions are replaced by `0` where possible. (#812) +- Fixed `global` `handler_test.go` test failure. #804 +- Fixed `BatchSpanProcessor.Shutdown` to wait until all spans are processed. (#766) +- Fixed OTLP example's accidental early close of exporter. (#807) +- Ensure zipkin exporter reads and closes response body. (#788) +- Update instrumentation to use `api/standard` keys instead of custom keys. (#782) +- Clean up tools and RELEASING documentation. (#762) + +## [0.6.0] - 2020-05-21 + +### Added + +- Support for `Resource`s in the prometheus exporter. (#757) +- New pull controller. (#751) +- New `UpDownSumObserver` instrument. (#750) +- OpenTelemetry collector demo. (#711) +- New `SumObserver` instrument. (#747) +- New `UpDownCounter` instrument. (#745) +- New timeout `Option` and configuration function `WithTimeout` to the push controller. (#742) +- New `api/standards` package to implement semantic conventions and standard key-value generation. (#731) + +### Changed + +- Rename `Register*` functions in the metric API to `New*` for all `Observer` instruments. (#761) +- Use `[]float64` for histogram boundaries, not `[]metric.Number`. (#758) +- Change OTLP example to use exporter as a trace `Syncer` instead of as an unneeded `Batcher`. (#756) +- Replace `WithResourceAttributes()` with `WithResource()` in the trace SDK. (#754) +- The prometheus exporter now uses the new pull controller. (#751) +- Rename `ScheduleDelayMillis` to `BatchTimeout` in the trace `BatchSpanProcessor`.(#752) +- Support use of synchronous instruments in asynchronous callbacks (#725) +- Move `Resource` from the `Export` method parameter into the metric export `Record`. (#739) +- Rename `Observer` instrument to `ValueObserver`. (#734) +- The push controller now has a method (`Provider()`) to return a `metric.Provider` instead of the old `Meter` method that acted as a `metric.Provider`. (#738) +- Replace `Measure` instrument by `ValueRecorder` instrument. (#732) +- Rename correlation context header from `"Correlation-Context"` to `"otcorrelations"` to match the OpenTelemetry specification. (#727) + +### Fixed + +- Ensure gRPC `ClientStream` override methods do not panic in grpctrace package. (#755) +- Disable parts of `BatchSpanProcessor` test until a fix is found. (#743) +- Fix `string` case in `kv` `Infer` function. (#746) +- Fix panic in grpctrace client interceptors. (#740) +- Refactor the `api/metrics` push controller and add `CheckpointSet` synchronization. (#737) +- Rewrite span batch process queue batching logic. (#719) +- Remove the push controller named Meter map. (#738) +- Fix Histogram aggregator initial state (fix #735). (#736) +- Ensure golang alpine image is running `golang-1.14` for examples. (#733) +- Added test for grpctrace `UnaryInterceptorClient`. (#695) +- Rearrange `api/metric` code layout. (#724) + +## [0.5.0] - 2020-05-13 + +### Added + +- Batch `Observer` callback support. (#717) +- Alias `api` types to root package of project. (#696) +- Create basic `othttp.Transport` for simple client instrumentation. (#678) +- `SetAttribute(string, interface{})` to the trace API. (#674) +- Jaeger exporter option that allows user to specify custom http client. (#671) +- `Stringer` and `Infer` methods to `key`s. (#662) + +### Changed + +- Rename `NewKey` in the `kv` package to just `Key`. (#721) +- Move `core` and `key` to `kv` package. (#720) +- Make the metric API `Meter` a `struct` so the abstract `MeterImpl` can be passed and simplify implementation. (#709) +- Rename SDK `Batcher` to `Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `Ungrouped` integrator to `simple.Integrator` to match draft OpenTelemetry SDK specification. (#710) +- Rename SDK `SDK` `struct` to `Accumulator` to match draft OpenTelemetry SDK specification. (#710) +- Move `Number` from `core` to `api/metric` package. (#706) +- Move `SpanContext` from `core` to `trace` package. (#692) +- Change traceparent header from `Traceparent` to `traceparent` to implement the W3C specification. (#681) + +### Fixed + +- Update tooling to run generators in all submodules. (#705) +- gRPC interceptor regexp to match methods without a service name. (#683) +- Use a `const` for padding 64-bit B3 trace IDs. (#701) +- Update `mockZipkin` listen address from `:0` to `127.0.0.1:0`. (#700) +- Left-pad 64-bit B3 trace IDs with zero. (#698) +- Propagate at least the first W3C tracestate header. (#694) +- Remove internal `StateLocker` implementation. (#688) +- Increase instance size CI system uses. (#690) +- Add a `key` benchmark and use reflection in `key.Infer()`. (#679) +- Fix internal `global` test by using `global.Meter` with `RecordBatch()`. (#680) +- Reimplement histogram using mutex instead of `StateLocker`. (#669) +- Switch `MinMaxSumCount` to a mutex lock implementation instead of `StateLocker`. (#667) +- Update documentation to not include any references to `WithKeys`. (#672) +- Correct misspelling. (#668) +- Fix clobbering of the span context if extraction fails. (#656) +- Bump `golangci-lint` and work around the corrupting bug. (#666) (#670) + +## [0.4.3] - 2020-04-24 + +### Added + +- `Dockerfile` and `docker-compose.yml` to run example code. (#635) +- New `grpctrace` package that provides gRPC client and server interceptors for both unary and stream connections. (#621) +- New `api/label` package, providing common label set implementation. (#651) +- Support for JSON marshaling of `Resources`. (#654) +- `TraceID` and `SpanID` implementations for `Stringer` interface. (#642) +- `RemoteAddrKey` in the othttp plugin to include the HTTP client address in top-level spans. (#627) +- `WithSpanFormatter` option to the othttp plugin. (#617) +- Updated README to include section for compatible libraries and include reference to the contrib repository. (#612) +- The prometheus exporter now supports exporting histograms. (#601) +- A `String` method to the `Resource` to return a hashable identifier for a now unique resource. (#613) +- An `Iter` method to the `Resource` to return an array `AttributeIterator`. (#613) +- An `Equal` method to the `Resource` test the equivalence of resources. (#613) +- An iterable structure (`AttributeIterator`) for `Resource` attributes. + +### Changed + +- zipkin export's `NewExporter` now requires a `serviceName` argument to ensure this needed values is provided. (#644) +- Pass `Resources` through the metrics export pipeline. (#659) + +### Removed + +- `WithKeys` option from the metric API. (#639) + +### Fixed + +- Use the `label.Set.Equivalent` value instead of an encoding in the batcher. (#658) +- Correct typo `trace.Exporter` to `trace.SpanSyncer` in comments. (#653) +- Use type names for return values in jaeger exporter. (#648) +- Increase the visibility of the `api/key` package by updating comments and fixing usages locally. (#650) +- `Checkpoint` only after `Update`; Keep records in the `sync.Map` longer. (#647) +- Do not cache `reflect.ValueOf()` in metric Labels. (#649) +- Batch metrics exported from the OTLP exporter based on `Resource` and labels. (#626) +- Add error wrapping to the prometheus exporter. (#631) +- Update the OTLP exporter batching of traces to use a unique `string` representation of an associated `Resource` as the batching key. (#623) +- Update OTLP `SpanData` transform to only include the `ParentSpanID` if one exists. (#614) +- Update `Resource` internal representation to uniquely and reliably identify resources. (#613) +- Check return value from `CheckpointSet.ForEach` in prometheus exporter. (#622) +- Ensure spans created by httptrace client tracer reflect operation structure. (#618) +- Create a new recorder rather than reuse when multiple observations in same epoch for asynchronous instruments. #610 +- The default port the OTLP exporter uses to connect to the OpenTelemetry collector is updated to match the one the collector listens on by default. (#611) + +## [0.4.2] - 2020-03-31 + +### Fixed + +- Fix `pre_release.sh` to update version in `sdk/opentelemetry.go`. (#607) +- Fix time conversion from internal to OTLP in OTLP exporter. (#606) + +## [0.4.1] - 2020-03-31 + +### Fixed + +- Update `tag.sh` to create signed tags. (#604) + +## [0.4.0] - 2020-03-30 + +### Added + +- New API package `api/metric/registry` that exposes a `MeterImpl` wrapper for use by SDKs to generate unique instruments. (#580) +- Script to verify examples after a new release. (#579) + +### Removed + +- The dogstatsd exporter due to lack of support. + This additionally removes support for statsd. (#591) +- `LabelSet` from the metric API. + This is replaced by a `[]core.KeyValue` slice. (#595) +- `Labels` from the metric API's `Meter` interface. (#595) + +### Changed + +- The metric `export.Labels` became an interface which the SDK implements and the `export` package provides a simple, immutable implementation of this interface intended for testing purposes. (#574) +- Renamed `internal/metric.Meter` to `MeterImpl`. (#580) +- Renamed `api/global/internal.obsImpl` to `asyncImpl`. (#580) + +### Fixed + +- Corrected missing return in mock span. (#582) +- Update License header for all source files to match CNCF guidelines and include a test to ensure it is present. (#586) (#596) +- Update to v0.3.0 of the OTLP in the OTLP exporter. (#588) +- Update pre-release script to be compatible between GNU and BSD based systems. (#592) +- Add a `RecordBatch` benchmark. (#594) +- Moved span transforms of the OTLP exporter to the internal package. (#593) +- Build both go-1.13 and go-1.14 in circleci to test for all supported versions of Go. (#569) +- Removed unneeded allocation on empty labels in OLTP exporter. (#597) +- Update `BatchedSpanProcessor` to process the queue until no data but respect max batch size. (#599) +- Update project documentation godoc.org links to pkg.go.dev. (#602) + +## [0.3.0] - 2020-03-21 + +This is a first official beta release, which provides almost fully complete metrics, tracing, and context propagation functionality. +There is still a possibility of breaking changes. + +### Added + +- Add `Observer` metric instrument. (#474) +- Add global `Propagators` functionality to enable deferred initialization for propagators registered before the first Meter SDK is installed. (#494) +- Simplified export setup pipeline for the jaeger exporter to match other exporters. (#459) +- The zipkin trace exporter. (#495) +- The OTLP exporter to export metric and trace telemetry to the OpenTelemetry collector. (#497) (#544) (#545) +- Add `StatusMessage` field to the trace `Span`. (#524) +- Context propagation in OpenTracing bridge in terms of OpenTelemetry context propagation. (#525) +- The `Resource` type was added to the SDK. (#528) +- The global API now supports a `Tracer` and `Meter` function as shortcuts to getting a global `*Provider` and calling these methods directly. (#538) +- The metric API now defines a generic `MeterImpl` interface to support general purpose `Meter` construction. + Additionally, `SyncImpl` and `AsyncImpl` are added to support general purpose instrument construction. (#560) +- A metric `Kind` is added to represent the `MeasureKind`, `ObserverKind`, and `CounterKind`. (#560) +- Scripts to better automate the release process. (#576) + +### Changed + +- Default to to use `AlwaysSampler` instead of `ProbabilitySampler` to match OpenTelemetry specification. (#506) +- Renamed `AlwaysSampleSampler` to `AlwaysOnSampler` in the trace API. (#511) +- Renamed `NeverSampleSampler` to `AlwaysOffSampler` in the trace API. (#511) +- The `Status` field of the `Span` was changed to `StatusCode` to disambiguate with the added `StatusMessage`. (#524) +- Updated the trace `Sampler` interface conform to the OpenTelemetry specification. (#531) +- Rename metric API `Options` to `Config`. (#541) +- Rename metric `Counter` aggregator to be `Sum`. (#541) +- Unify metric options into `Option` from instrument specific options. (#541) +- The trace API's `TraceProvider` now support `Resource`s. (#545) +- Correct error in zipkin module name. (#548) +- The jaeger trace exporter now supports `Resource`s. (#551) +- Metric SDK now supports `Resource`s. + The `WithResource` option was added to configure a `Resource` on creation and the `Resource` method was added to the metric `Descriptor` to return the associated `Resource`. (#552) +- Replace `ErrNoLastValue` and `ErrEmptyDataSet` by `ErrNoData` in the metric SDK. (#557) +- The stdout trace exporter now supports `Resource`s. (#558) +- The metric `Descriptor` is now included at the API instead of the SDK. (#560) +- Replace `Ordered` with an iterator in `export.Labels`. (#567) + +### Removed + +- The vendor specific Stackdriver. It is now hosted on 3rd party vendor infrastructure. (#452) +- The `Unregister` method for metric observers as it is not in the OpenTelemetry specification. (#560) +- `GetDescriptor` from the metric SDK. (#575) +- The `Gauge` instrument from the metric API. (#537) + +### Fixed + +- Make histogram aggregator checkpoint consistent. (#438) +- Update README with import instructions and how to build and test. (#505) +- The default label encoding was updated to be unique. (#508) +- Use `NewRoot` in the othttp plugin for public endpoints. (#513) +- Fix data race in `BatchedSpanProcessor`. (#518) +- Skip test-386 for Mac OS 10.15.x (Catalina and upwards). #521 +- Use a variable-size array to represent ordered labels in maps. (#523) +- Update the OTLP protobuf and update changed import path. (#532) +- Use `StateLocker` implementation in `MinMaxSumCount`. (#546) +- Eliminate goroutine leak in histogram stress test. (#547) +- Update OTLP exporter with latest protobuf. (#550) +- Add filters to the othttp plugin. (#556) +- Provide an implementation of the `Header*` filters that do not depend on Go 1.14. (#565) +- Encode labels once during checkpoint. + The checkpoint function is executed in a single thread so we can do the encoding lazily before passing the encoded version of labels to the exporter. + This is a cheap and quick way to avoid encoding the labels on every collection interval. (#572) +- Run coverage over all packages in `COVERAGE_MOD_DIR`. (#573) + +## [0.2.3] - 2020-03-04 + +### Added + +- `RecordError` method on `Span`s in the trace API to Simplify adding error events to spans. (#473) +- Configurable push frequency for exporters setup pipeline. (#504) + +### Changed + +- Rename the `exporter` directory to `exporters`. + The `go.opentelemetry.io/otel/exporter/trace/jaeger` package was mistakenly released with a `v1.0.0` tag instead of `v0.1.0`. + This resulted in all subsequent releases not becoming the default latest. + A consequence of this was that all `go get`s pulled in the incompatible `v0.1.0` release of that package when pulling in more recent packages from other otel packages. + Renaming the `exporter` directory to `exporters` fixes this issue by renaming the package and therefore clearing any existing dependency tags. + Consequentially, this action also renames *all* exporter packages. (#502) + +### Removed + +- The `CorrelationContextHeader` constant in the `correlation` package is no longer exported. (#503) + +## [0.2.2] - 2020-02-27 + +### Added + +- `HTTPSupplier` interface in the propagation API to specify methods to retrieve and store a single value for a key to be associated with a carrier. (#467) +- `HTTPExtractor` interface in the propagation API to extract information from an `HTTPSupplier` into a context. (#467) +- `HTTPInjector` interface in the propagation API to inject information into an `HTTPSupplier.` (#467) +- `Config` and configuring `Option` to the propagator API. (#467) +- `Propagators` interface in the propagation API to contain the set of injectors and extractors for all supported carrier formats. (#467) +- `HTTPPropagator` interface in the propagation API to inject and extract from an `HTTPSupplier.` (#467) +- `WithInjectors` and `WithExtractors` functions to the propagator API to configure injectors and extractors to use. (#467) +- `ExtractHTTP` and `InjectHTTP` functions to apply configured HTTP extractors and injectors to a passed context. (#467) +- Histogram aggregator. (#433) +- `DefaultPropagator` function and have it return `trace.TraceContext` as the default context propagator. (#456) +- `AlwaysParentSample` sampler to the trace API. (#455) +- `WithNewRoot` option function to the trace API to specify the created span should be considered a root span. (#451) + +### Changed + +- Renamed `WithMap` to `ContextWithMap` in the correlation package. (#481) +- Renamed `FromContext` to `MapFromContext` in the correlation package. (#481) +- Move correlation context propagation to correlation package. (#479) +- Do not default to putting remote span context into links. (#480) +- `Tracer.WithSpan` updated to accept `StartOptions`. (#472) +- Renamed `MetricKind` to `Kind` to not stutter in the type usage. (#432) +- Renamed the `export` package to `metric` to match directory structure. (#432) +- Rename the `api/distributedcontext` package to `api/correlation`. (#444) +- Rename the `api/propagators` package to `api/propagation`. (#444) +- Move the propagators from the `propagators` package into the `trace` API package. (#444) +- Update `Float64Gauge`, `Int64Gauge`, `Float64Counter`, `Int64Counter`, `Float64Measure`, and `Int64Measure` metric methods to use value receivers instead of pointers. (#462) +- Moved all dependencies of tools package to a tools directory. (#466) + +### Removed + +- Binary propagators. (#467) +- NOOP propagator. (#467) + +### Fixed + +- Upgraded `github.com/golangci/golangci-lint` from `v1.21.0` to `v1.23.6` in `tools/`. (#492) +- Fix a possible nil-dereference crash (#478) +- Correct comments for `InstallNewPipeline` in the stdout exporter. (#483) +- Correct comments for `InstallNewPipeline` in the dogstatsd exporter. (#484) +- Correct comments for `InstallNewPipeline` in the prometheus exporter. (#482) +- Initialize `onError` based on `Config` in prometheus exporter. (#486) +- Correct module name in prometheus exporter README. (#475) +- Removed tracer name prefix from span names. (#430) +- Fix `aggregator_test.go` import package comment. (#431) +- Improved detail in stdout exporter. (#436) +- Fix a dependency issue (generate target should depend on stringer, not lint target) in Makefile. (#442) +- Reorders the Makefile targets within `precommit` target so we generate files and build the code before doing linting, so we can get much nicer errors about syntax errors from the compiler. (#442) +- Reword function documentation in gRPC plugin. (#446) +- Send the `span.kind` tag to Jaeger from the jaeger exporter. (#441) +- Fix `metadataSupplier` in the jaeger exporter to overwrite the header if existing instead of appending to it. (#441) +- Upgraded to Go 1.13 in CI. (#465) +- Correct opentelemetry.io URL in trace SDK documentation. (#464) +- Refactored reference counting logic in SDK determination of stale records. (#468) +- Add call to `runtime.Gosched` in instrument `acquireHandle` logic to not block the collector. (#469) + +## [0.2.1.1] - 2020-01-13 + +### Fixed + +- Use stateful batcher on Prometheus exporter fixing regresion introduced in #395. (#428) + +## [0.2.1] - 2020-01-08 + +### Added + +- Global meter forwarding implementation. + This enables deferred initialization for metric instruments registered before the first Meter SDK is installed. (#392) +- Global trace forwarding implementation. + This enables deferred initialization for tracers registered before the first Trace SDK is installed. (#406) +- Standardize export pipeline creation in all exporters. (#395) +- A testing, organization, and comments for 64-bit field alignment. (#418) +- Script to tag all modules in the project. (#414) + +### Changed + +- Renamed `propagation` package to `propagators`. (#362) +- Renamed `B3Propagator` propagator to `B3`. (#362) +- Renamed `TextFormatPropagator` propagator to `TextFormat`. (#362) +- Renamed `BinaryPropagator` propagator to `Binary`. (#362) +- Renamed `BinaryFormatPropagator` propagator to `BinaryFormat`. (#362) +- Renamed `NoopTextFormatPropagator` propagator to `NoopTextFormat`. (#362) +- Renamed `TraceContextPropagator` propagator to `TraceContext`. (#362) +- Renamed `SpanOption` to `StartOption` in the trace API. (#369) +- Renamed `StartOptions` to `StartConfig` in the trace API. (#369) +- Renamed `EndOptions` to `EndConfig` in the trace API. (#369) +- `Number` now has a pointer receiver for its methods. (#375) +- Renamed `CurrentSpan` to `SpanFromContext` in the trace API. (#379) +- Renamed `SetCurrentSpan` to `ContextWithSpan` in the trace API. (#379) +- Renamed `Message` in Event to `Name` in the trace API. (#389) +- Prometheus exporter no longer aggregates metrics, instead it only exports them. (#385) +- Renamed `HandleImpl` to `BoundInstrumentImpl` in the metric API. (#400) +- Renamed `Float64CounterHandle` to `Float64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Int64CounterHandle` to `Int64CounterBoundInstrument` in the metric API. (#400) +- Renamed `Float64GaugeHandle` to `Float64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Int64GaugeHandle` to `Int64GaugeBoundInstrument` in the metric API. (#400) +- Renamed `Float64MeasureHandle` to `Float64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Int64MeasureHandle` to `Int64MeasureBoundInstrument` in the metric API. (#400) +- Renamed `Release` method for bound instruments in the metric API to `Unbind`. (#400) +- Renamed `AcquireHandle` method for bound instruments in the metric API to `Bind`. (#400) +- Renamed the `File` option in the stdout exporter to `Writer`. (#404) +- Renamed all `Options` to `Config` for all metric exports where this wasn't already the case. + +### Fixed + +- Aggregator import path corrected. (#421) +- Correct links in README. (#368) +- The README was updated to match latest code changes in its examples. (#374) +- Don't capitalize error statements. (#375) +- Fix ignored errors. (#375) +- Fix ambiguous variable naming. (#375) +- Removed unnecessary type casting. (#375) +- Use named parameters. (#375) +- Updated release schedule. (#378) +- Correct http-stackdriver example module name. (#394) +- Removed the `http.request` span in `httptrace` package. (#397) +- Add comments in the metrics SDK (#399) +- Initialize checkpoint when creating ddsketch aggregator to prevent panic when merging into a empty one. (#402) (#403) +- Add documentation of compatible exporters in the README. (#405) +- Typo fix. (#408) +- Simplify span check logic in SDK tracer implementation. (#419) + +## [0.2.0] - 2019-12-03 + +### Added + +- Unary gRPC tracing example. (#351) +- Prometheus exporter. (#334) +- Dogstatsd metrics exporter. (#326) + +### Changed + +- Rename `MaxSumCount` aggregation to `MinMaxSumCount` and add the `Min` interface for this aggregation. (#352) +- Rename `GetMeter` to `Meter`. (#357) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Rename `HTTPB3Propagator` to `B3Propagator`. (#355) +- Rename `HTTPTraceContextPropagator` to `TraceContextPropagator`. (#355) +- Move `/global` package to `/api/global`. (#356) +- Rename `GetTracer` to `Tracer`. (#347) + +### Removed + +- `SetAttribute` from the `Span` interface in the trace API. (#361) +- `AddLink` from the `Span` interface in the trace API. (#349) +- `Link` from the `Span` interface in the trace API. (#349) + +### Fixed + +- Exclude example directories from coverage report. (#365) +- Lint make target now implements automatic fixes with `golangci-lint` before a second run to report the remaining issues. (#360) +- Drop `GO111MODULE` environment variable in Makefile as Go 1.13 is the project specified minimum version and this is environment variable is not needed for that version of Go. (#359) +- Run the race checker for all test. (#354) +- Redundant commands in the Makefile are removed. (#354) +- Split the `generate` and `lint` targets of the Makefile. (#354) +- Renames `circle-ci` target to more generic `ci` in Makefile. (#354) +- Add example Prometheus binary to gitignore. (#358) +- Support negative numbers with the `MaxSumCount`. (#335) +- Resolve race conditions in `push_test.go` identified in #339. (#340) +- Use `/usr/bin/env bash` as a shebang in scripts rather than `/bin/bash`. (#336) +- Trace benchmark now tests both `AlwaysSample` and `NeverSample`. + Previously it was testing `AlwaysSample` twice. (#325) +- Trace benchmark now uses a `[]byte` for `TraceID` to fix failing test. (#325) +- Added a trace benchmark to test variadic functions in `setAttribute` vs `setAttributes` (#325) +- The `defaultkeys` batcher was only using the encoded label set as its map key while building a checkpoint. + This allowed distinct label sets through, but any metrics sharing a label set could be overwritten or merged incorrectly. + This was corrected. (#333) + +## [0.1.2] - 2019-11-18 + +### Fixed + +- Optimized the `simplelru` map for attributes to reduce the number of allocations. (#328) +- Removed unnecessary unslicing of parameters that are already a slice. (#324) + +## [0.1.1] - 2019-11-18 + +This release contains a Metrics SDK with stdout exporter and supports basic aggregations such as counter, gauges, array, maxsumcount, and ddsketch. + +### Added + +- Metrics stdout export pipeline. (#265) +- Array aggregation for raw measure metrics. (#282) +- The core.Value now have a `MarshalJSON` method. (#281) + +### Removed + +- `WithService`, `WithResources`, and `WithComponent` methods of tracers. (#314) +- Prefix slash in `Tracer.Start()` for the Jaeger example. (#292) + +### Changed + +- Allocation in LabelSet construction to reduce GC overhead. (#318) +- `trace.WithAttributes` to append values instead of replacing (#315) +- Use a formula for tolerance in sampling tests. (#298) +- Move export types into trace and metric-specific sub-directories. (#289) +- `SpanKind` back to being based on an `int` type. (#288) + +### Fixed + +- URL to OpenTelemetry website in README. (#323) +- Name of othttp default tracer. (#321) +- `ExportSpans` for the stackdriver exporter now handles `nil` context. (#294) +- CI modules cache to correctly restore/save from/to the cache. (#316) +- Fix metric SDK race condition between `LoadOrStore` and the assignment `rec.recorder = i.meter.exporter.AggregatorFor(rec)`. (#293) +- README now reflects the new code structure introduced with these changes. (#291) +- Make the basic example work. (#279) + +## [0.1.0] - 2019-11-04 + +This is the first release of open-telemetry go library. +It contains api and sdk for trace and meter. + +### Added + +- Initial OpenTelemetry trace and metric API prototypes. +- Initial OpenTelemetry trace, metric, and export SDK packages. +- A wireframe bridge to support compatibility with OpenTracing. +- Example code for a basic, http-stackdriver, http, jaeger, and named tracer setup. +- Exporters for Jaeger, Stackdriver, and stdout. +- Propagators for binary, B3, and trace-context protocols. +- Project information and guidelines in the form of a README and CONTRIBUTING. +- Tools to build the project and a Makefile to automate the process. +- Apache-2.0 license. +- CircleCI build CI manifest files. +- CODEOWNERS file to track owners of this project. + +[Unreleased]: https://github.com/open-telemetry/opentelemetry-go/compare/v1.4.1...HEAD +[1.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.1 +[1.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.4.0 +[1.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.3.0 +[1.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.2.0 +[1.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.1.0 +[1.0.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.1 +[Metrics 0.24.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.24.0 +[1.0.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0 +[1.0.0-RC3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC3 +[1.0.0-RC2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC2 +[Experimental Metrics v0.22.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/metric/v0.22.0 +[1.0.0-RC1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v1.0.0-RC1 +[0.20.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.20.0 +[0.19.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.19.0 +[0.18.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.18.0 +[0.17.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.17.0 +[0.16.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.16.0 +[0.15.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.15.0 +[0.14.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.14.0 +[0.13.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.13.0 +[0.12.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.12.0 +[0.11.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.11.0 +[0.10.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.10.0 +[0.9.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.9.0 +[0.8.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.8.0 +[0.7.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.7.0 +[0.6.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.6.0 +[0.5.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.5.0 +[0.4.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.3 +[0.4.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.2 +[0.4.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.1 +[0.4.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.4.0 +[0.3.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.3.0 +[0.2.3]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.3 +[0.2.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.2 +[0.2.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1.1 +[0.2.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.1 +[0.2.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.2.0 +[0.1.2]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.2 +[0.1.1]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.1 +[0.1.0]: https://github.com/open-telemetry/opentelemetry-go/releases/tag/v0.1.0 diff --git a/vendor/go.opentelemetry.io/otel/CODEOWNERS b/vendor/go.opentelemetry.io/otel/CODEOWNERS new file mode 100644 index 0000000000..808755fe20 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CODEOWNERS @@ -0,0 +1,17 @@ +##################################################### +# +# List of approvers for this repository +# +##################################################### +# +# Learn about membership in OpenTelemetry community: +# https://github.com/open-telemetry/community/blob/main/community-membership.md +# +# +# Learn about CODEOWNERS file format: +# https://help.github.com/en/articles/about-code-owners +# + +* @jmacd @MrAlias @Aneurysm9 @evantorrie @XSAM @dashpole @paivagustavo @MadVikingGod @pellared + +CODEOWNERS @MrAlias @Aneurysm9 @MadVikingGod diff --git a/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md new file mode 100644 index 0000000000..7dead7084d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/CONTRIBUTING.md @@ -0,0 +1,521 @@ +# Contributing to opentelemetry-go + +The Go special interest group (SIG) meets regularly. See the +OpenTelemetry +[community](https://github.com/open-telemetry/community#golang-sdk) +repo for information on this and other language SIGs. + +See the [public meeting +notes](https://docs.google.com/document/d/1A63zSWX0x2CyCK_LoNhmQC4rqhLpYXJzXbEPDUQ2n6w/edit#heading=h.9tngw7jdwd6b) +for a summary description of past meetings. To request edit access, +join the meeting or get in touch on +[Slack](https://cloud-native.slack.com/archives/C01NPAXACKT). + +## Development + +You can view and edit the source code by cloning this repository: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go.git +``` + +Run `make test` to run the tests instead of `go test`. + +There are some generated files checked into the repo. To make sure +that the generated files are up-to-date, run `make` (or `make +precommit` - the `precommit` target is the default). + +The `precommit` target also fixes the formatting of the code and +checks the status of the go module files. + +If after running `make precommit` the output of `git status` contains +`nothing to commit, working tree clean` then it means that everything +is up-to-date and properly formatted. + +## Pull Requests + +### How to Send Pull Requests + +Everyone is welcome to contribute code to `opentelemetry-go` via +GitHub pull requests (PRs). + +To create a new PR, fork the project in GitHub and clone the upstream +repo: + +```sh +go get -d go.opentelemetry.io/otel +``` + +(This may print some warning about "build constraints exclude all Go +files", just ignore it.) + +This will put the project in `${GOPATH}/src/go.opentelemetry.io/otel`. You +can alternatively use `git` directly with: + +```sh +git clone https://github.com/open-telemetry/opentelemetry-go +``` + +(Note that `git clone` is *not* using the `go.opentelemetry.io/otel` name - +that name is a kind of a redirector to GitHub that `go get` can +understand, but `git` does not.) + +This would put the project in the `opentelemetry-go` directory in +current working directory. + +Enter the newly created directory and add your fork as a new remote: + +```sh +git remote add git@github.com:/opentelemetry-go +``` + +Check out a new branch, make modifications, run linters and tests, update +`CHANGELOG.md`, and push the branch to your fork: + +```sh +git checkout -b +# edit files +# update changelog +make precommit +git add -p +git commit +git push +``` + +Open a pull request against the main `opentelemetry-go` repo. Be sure to add the pull +request ID to the entry you added to `CHANGELOG.md`. + +### How to Receive Comments + +* If the PR is not ready for review, please put `[WIP]` in the title, + tag it as `work-in-progress`, or mark it as + [`draft`](https://github.blog/2019-02-14-introducing-draft-pull-requests/). +* Make sure CLA is signed and CI is clear. + +### How to Get PRs Merged + +A PR is considered to be **ready to merge** when: + +* It has received two approvals from Collaborators/Maintainers (at + different companies). This is not enforced through technical means + and a PR may be **ready to merge** with a single approval if the change + and its approach have been discussed and consensus reached. +* Feedback has been addressed. +* Any substantive changes to your PR will require that you clear any prior + Approval reviews, this includes changes resulting from other feedback. Unless + the approver explicitly stated that their approval will persist across + changes it should be assumed that the PR needs their review again. Other + project members (e.g. approvers, maintainers) can help with this if there are + any questions or if you forget to clear reviews. +* It has been open for review for at least one working day. This gives + people reasonable time to review. +* Trivial changes (typo, cosmetic, doc, etc.) do not have to wait for + one day and may be merged with a single Maintainer's approval. +* `CHANGELOG.md` has been updated to reflect what has been + added, changed, removed, or fixed. +* `README.md` has been updated if necessary. +* Urgent fix can take exception as long as it has been actively + communicated. + +Any Maintainer can merge the PR once it is **ready to merge**. + +## Design Choices + +As with other OpenTelemetry clients, opentelemetry-go follows the +[opentelemetry-specification](https://github.com/open-telemetry/opentelemetry-specification). + +It's especially valuable to read through the [library +guidelines](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/library-guidelines.md). + +### Focus on Capabilities, Not Structure Compliance + +OpenTelemetry is an evolving specification, one where the desires and +use cases are clear, but the method to satisfy those uses cases are +not. + +As such, Contributions should provide functionality and behavior that +conforms to the specification, but the interface and structure is +flexible. + +It is preferable to have contributions follow the idioms of the +language rather than conform to specific API names or argument +patterns in the spec. + +For a deeper discussion, see +[this](https://github.com/open-telemetry/opentelemetry-specification/issues/165). + +## Documentation + +Each non-example Go Module should have its own `README.md` containing: + +- A pkg.go.dev badge which can be generated [here](https://pkg.go.dev/badge/). +- Brief description. +- Installation instructions (and requirements if applicable). +- Hyperlink to an example. Depending on the component the example can be: + - An `example_test.go` like [here](exporters/stdout/stdouttrace/example_test.go). + - A sample Go application with its own `README.md`, like [here](example/zipkin). +- Additional documentation sections such us: + - Configuration, + - Contributing, + - References. + +[Here](exporters/jaeger/README.md) is an example of a concise `README.md`. + +Moreover, it should be possible to navigate to any `README.md` from the +root `README.md`. + +## Style Guide + +One of the primary goals of this project is that it is actually used by +developers. With this goal in mind the project strives to build +user-friendly and idiomatic Go code adhering to the Go community's best +practices. + +For a non-comprehensive but foundational overview of these best practices +the [Effective Go](https://golang.org/doc/effective_go.html) documentation +is an excellent starting place. + +As a convenience for developers building this project the `make precommit` +will format, lint, validate, and in some cases fix the changes you plan to +submit. This check will need to pass for your changes to be able to be +merged. + +In addition to idiomatic Go, the project has adopted certain standards for +implementations of common patterns. These standards should be followed as a +default, and if they are not followed documentation needs to be included as +to the reasons why. + +### Configuration + +When creating an instantiation function for a complex `type T struct`, it is +useful to allow variable number of options to be applied. However, the strong +type system of Go restricts the function design options. There are a few ways +to solve this problem, but we have landed on the following design. + +#### `config` + +Configuration should be held in a `struct` named `config`, or prefixed with +specific type name this Configuration applies to if there are multiple +`config` in the package. This type must contain configuration options. + +```go +// config contains configuration options for a thing. +type config struct { + // options ... +} +``` + +In general the `config` type will not need to be used externally to the +package and should be unexported. If, however, it is expected that the user +will likely want to build custom options for the configuration, the `config` +should be exported. Please, include in the documentation for the `config` +how the user can extend the configuration. + +It is important that internal `config` are not shared across package boundaries. +Meaning a `config` from one package should not be directly used by another. The +one exception is the API packages. The configs from the base API, eg. +`go.opentelemetry.io/otel/trace.TracerConfig` and +`go.opentelemetry.io/otel/metric.InstrumentConfig`, are intended to be consumed +by the SDK therefor it is expected that these are exported. + +When a config is exported we want to maintain forward and backward +compatibility, to achieve this no fields should be exported but should +instead be accessed by methods. + +Optionally, it is common to include a `newConfig` function (with the same +naming scheme). This function wraps any defaults setting and looping over +all options to create a configured `config`. + +```go +// newConfig returns an appropriately configured config. +func newConfig(options ...Option) config { + // Set default values for config. + config := config{/* […] */} + for _, option := range options { + config = option.apply(config) + } + // Preform any validation here. + return config +} +``` + +If validation of the `config` options is also preformed this can return an +error as well that is expected to be handled by the instantiation function +or propagated to the user. + +Given the design goal of not having the user need to work with the `config`, +the `newConfig` function should also be unexported. + +#### `Option` + +To set the value of the options a `config` contains, a corresponding +`Option` interface type should be used. + +```go +type Option interface { + apply(config) config +} +``` + +Having `apply` unexported makes sure that it will not be used externally. +Moreover, the interface becomes sealed so the user cannot easily implement +the interface on its own. + +The `apply` method should return a modified version of the passed config. +This approach, instead of passing a pointer, is used to prevent the config from being allocated to the heap. + +The name of the interface should be prefixed in the same way the +corresponding `config` is (if at all). + +#### Options + +All user configurable options for a `config` must have a related unexported +implementation of the `Option` interface and an exported configuration +function that wraps this implementation. + +The wrapping function name should be prefixed with `With*` (or in the +special case of a boolean options `Without*`) and should have the following +function signature. + +```go +func With*(…) Option { … } +``` + +##### `bool` Options + +```go +type defaultFalseOption bool + +func (o defaultFalseOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithOption sets a T to have an option included. +func WithOption() Option { + return defaultFalseOption(true) +} +``` + +```go +type defaultTrueOption bool + +func (o defaultTrueOption) apply(c config) config { + c.Bool = bool(o) + return c +} + +// WithoutOption sets a T to have Bool option excluded. +func WithoutOption() Option { + return defaultTrueOption(false) +} +``` + +##### Declared Type Options + +```go +type myTypeOption struct { + MyType MyType +} + +func (o myTypeOption) apply(c config) config { + c.MyType = o.MyType + return c +} + +// WithMyType sets T to have include MyType. +func WithMyType(t MyType) Option { + return myTypeOption{t} +} +``` + +##### Functional Options + +```go +type optionFunc func(config) config + +func (fn optionFunc) apply(c config) config { + return fn(c) +} + +// WithMyType sets t as MyType. +func WithMyType(t MyType) Option { + return optionFunc(func(c config) config { + c.MyType = t + return c + }) +} +``` + +#### Instantiation + +Using this configuration pattern to configure instantiation with a `NewT` +function. + +```go +func NewT(options ...Option) T {…} +``` + +Any required parameters can be declared before the variadic `options`. + +#### Dealing with Overlap + +Sometimes there are multiple complex `struct` that share common +configuration and also have distinct configuration. To avoid repeated +portions of `config`s, a common `config` can be used with the union of +options being handled with the `Option` interface. + +For example. + +```go +// config holds options for all animals. +type config struct { + Weight float64 + Color string + MaxAltitude float64 +} + +// DogOption apply Dog specific options. +type DogOption interface { + applyDog(config) config +} + +// BirdOption apply Bird specific options. +type BirdOption interface { + applyBird(config) config +} + +// Option apply options for all animals. +type Option interface { + BirdOption + DogOption +} + +type weightOption float64 + +func (o weightOption) applyDog(c config) config { + c.Weight = float64(o) + return c +} + +func (o weightOption) applyBird(c config) config { + c.Weight = float64(o) + return c +} + +func WithWeight(w float64) Option { return weightOption(w) } + +type furColorOption string + +func (o furColorOption) applyDog(c config) config { + c.Color = string(o) + return c +} + +func WithFurColor(c string) DogOption { return furColorOption(c) } + +type maxAltitudeOption float64 + +func (o maxAltitudeOption) applyBird(c config) config { + c.MaxAltitude = float64(o) + return c +} + +func WithMaxAltitude(a float64) BirdOption { return maxAltitudeOption(a) } + +func NewDog(name string, o ...DogOption) Dog {…} +func NewBird(name string, o ...BirdOption) Bird {…} +``` + +### Interfaces + +To allow other developers to better comprehend the code, it is important +to ensure it is sufficiently documented. One simple measure that contributes +to this aim is self-documenting by naming method parameters. Therefore, +where appropriate, methods of every exported interface type should have +their parameters appropriately named. + +#### Interface Stability + +All exported stable interfaces that include the following warning in their +doumentation are allowed to be extended with additional methods. + +> Warning: methods may be added to this interface in minor releases. + +Otherwise, stable interfaces MUST NOT be modified. + +If new functionality is needed for an interface that cannot be changed it MUST +be added by including an additional interface. That added interface can be a +simple interface for the specific functionality that you want to add or it can +be a super-set of the original interface. For example, if you wanted to a +`Close` method to the `Exporter` interface: + +```go +type Exporter interface { + Export() +} +``` + +A new interface, `Closer`, can be added: + +```go +type Closer interface { + Close() +} +``` + +Code that is passed the `Exporter` interface can now check to see if the passed +value also satisfies the new interface. E.g. + +```go +func caller(e Exporter) { + /* ... */ + if c, ok := e.(Closer); ok { + c.Close() + } + /* ... */ +} +``` + +Alternatively, a new type that is the super-set of an `Exporter` can be created. + +```go +type ClosingExporter struct { + Exporter + Close() +} +``` + +This new type can be used similar to the simple interface above in that a +passed `Exporter` type can be asserted to satisfy the `ClosingExporter` type +and the `Close` method called. + +This super-set approach can be useful if there is explicit behavior that needs +to be coupled with the original type and passed as a unified type to a new +function, but, because of this coupling, it also limits the applicability of +the added functionality. If there exist other interfaces where this +functionality should be added, each one will need their own super-set +interfaces and will duplicate the pattern. For this reason, the simple targeted +interface that defines the specific functionality should be preferred. + +## Approvers and Maintainers + +Approvers: + +- [Evan Torrie](https://github.com/evantorrie), Verizon Media +- [Josh MacDonald](https://github.com/jmacd), LightStep +- [Sam Xie](https://github.com/XSAM), Cisco/AppDynamics +- [David Ashpole](https://github.com/dashpole), Google +- [Gustavo Silva Paiva](https://github.com/paivagustavo), LightStep +- [Robert Pająk](https://github.com/pellared), Splunk + +Maintainers: + +- [Aaron Clawson](https://github.com/MadVikingGod), LightStep +- [Anthony Mirabella](https://github.com/Aneurysm9), AWS +- [Tyler Yahn](https://github.com/MrAlias), Splunk + +### Become an Approver or a Maintainer + +See the [community membership document in OpenTelemetry community +repo](https://github.com/open-telemetry/community/blob/main/community-membership.md). diff --git a/vendor/go.opentelemetry.io/otel/LICENSE b/vendor/go.opentelemetry.io/otel/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/Makefile b/vendor/go.opentelemetry.io/otel/Makefile new file mode 100644 index 0000000000..b085561dba --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/Makefile @@ -0,0 +1,212 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +TOOLS_MOD_DIR := ./internal/tools + +ALL_DOCS := $(shell find . -name '*.md' -type f | sort) +ALL_GO_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | sort) +OTEL_GO_MOD_DIRS := $(filter-out $(TOOLS_MOD_DIR), $(ALL_GO_MOD_DIRS)) +ALL_COVERAGE_MOD_DIRS := $(shell find . -type f -name 'go.mod' -exec dirname {} \; | egrep -v '^./example|^$(TOOLS_MOD_DIR)' | sort) + +GO = go +TIMEOUT = 60 + +.DEFAULT_GOAL := precommit + +.PHONY: precommit ci +precommit: license-check misspell go-mod-tidy golangci-lint-fix test-default +ci: dependabot-check license-check lint vanity-import-check build test-default check-clean-work-tree test-coverage + +# Tools + +TOOLS = $(CURDIR)/.tools + +$(TOOLS): + @mkdir -p $@ +$(TOOLS)/%: | $(TOOLS) + cd $(TOOLS_MOD_DIR) && \ + $(GO) build -o $@ $(PACKAGE) + +MULTIMOD = $(TOOLS)/multimod +$(TOOLS)/multimod: PACKAGE=go.opentelemetry.io/build-tools/multimod + +SEMCONVGEN = $(TOOLS)/semconvgen +$(TOOLS)/semconvgen: PACKAGE=go.opentelemetry.io/build-tools/semconvgen + +CROSSLINK = $(TOOLS)/crosslink +$(TOOLS)/crosslink: PACKAGE=go.opentelemetry.io/otel/$(TOOLS_MOD_DIR)/crosslink + +GOLANGCI_LINT = $(TOOLS)/golangci-lint +$(TOOLS)/golangci-lint: PACKAGE=github.com/golangci/golangci-lint/cmd/golangci-lint + +MISSPELL = $(TOOLS)/misspell +$(TOOLS)/misspell: PACKAGE=github.com/client9/misspell/cmd/misspell + +GOCOVMERGE = $(TOOLS)/gocovmerge +$(TOOLS)/gocovmerge: PACKAGE=github.com/wadey/gocovmerge + +STRINGER = $(TOOLS)/stringer +$(TOOLS)/stringer: PACKAGE=golang.org/x/tools/cmd/stringer + +PORTO = $(TOOLS)/porto +$(TOOLS)/porto: PACKAGE=github.com/jcchavezs/porto/cmd/porto + +GOJQ = $(TOOLS)/gojq +$(TOOLS)/gojq: PACKAGE=github.com/itchyny/gojq/cmd/gojq + +.PHONY: tools +tools: $(CROSSLINK) $(GOLANGCI_LINT) $(MISSPELL) $(GOCOVMERGE) $(STRINGER) $(PORTO) $(GOJQ) $(SEMCONVGEN) $(MULTIMOD) + +# Build + +.PHONY: generate build + +generate: $(OTEL_GO_MOD_DIRS:%=generate/%) +generate/%: DIR=$* +generate/%: | $(STRINGER) $(PORTO) + @echo "$(GO) generate $(DIR)/..." \ + && cd $(DIR) \ + && PATH="$(TOOLS):$${PATH}" $(GO) generate ./... && $(PORTO) -w . + +build: generate $(OTEL_GO_MOD_DIRS:%=build/%) $(OTEL_GO_MOD_DIRS:%=build-tests/%) +build/%: DIR=$* +build/%: + @echo "$(GO) build $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) build ./... + +build-tests/%: DIR=$* +build-tests/%: + @echo "$(GO) build tests $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -vet=off -run xxxxxMatchNothingxxxxx >/dev/null + +# Tests + +TEST_TARGETS := test-default test-bench test-short test-verbose test-race +.PHONY: $(TEST_TARGETS) test +test-default test-race: ARGS=-race +test-bench: ARGS=-run=xxxxxMatchNothingxxxxx -test.benchtime=1ms -bench=. +test-short: ARGS=-short +test-verbose: ARGS=-v -race +$(TEST_TARGETS): test +test: $(OTEL_GO_MOD_DIRS:%=test/%) +test/%: DIR=$* +test/%: + @echo "$(GO) test -timeout $(TIMEOUT)s $(ARGS) $(DIR)/..." \ + && cd $(DIR) \ + && $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -timeout $(TIMEOUT)s $(ARGS) + +COVERAGE_MODE = atomic +COVERAGE_PROFILE = coverage.out +.PHONY: test-coverage +test-coverage: | $(GOCOVMERGE) + @set -e; \ + printf "" > coverage.txt; \ + for dir in $(ALL_COVERAGE_MOD_DIRS); do \ + echo "$(GO) test -coverpkg=go.opentelemetry.io/otel/... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" $${dir}/..."; \ + (cd "$${dir}" && \ + $(GO) list ./... \ + | grep -v third_party \ + | xargs $(GO) test -coverpkg=./... -covermode=$(COVERAGE_MODE) -coverprofile="$(COVERAGE_PROFILE)" && \ + $(GO) tool cover -html=coverage.out -o coverage.html); \ + done; \ + $(GOCOVMERGE) $$(find . -name coverage.out) > coverage.txt + +.PHONY: golangci-lint golangci-lint-fix +golangci-lint-fix: ARGS=--fix +golangci-lint-fix: golangci-lint +golangci-lint: $(OTEL_GO_MOD_DIRS:%=golangci-lint/%) +golangci-lint/%: DIR=$* +golangci-lint/%: | $(GOLANGCI_LINT) + @echo 'golangci-lint $(if $(ARGS),$(ARGS) ,)$(DIR)' \ + && cd $(DIR) \ + && $(GOLANGCI_LINT) run --allow-serial-runners $(ARGS) + +.PHONY: crosslink +crosslink: | $(CROSSLINK) + @echo "cross-linking all go modules" \ + && $(CROSSLINK) + +.PHONY: go-mod-tidy +go-mod-tidy: $(ALL_GO_MOD_DIRS:%=go-mod-tidy/%) +go-mod-tidy/%: DIR=$* +go-mod-tidy/%: | crosslink + @echo "$(GO) mod tidy in $(DIR)" \ + && cd $(DIR) \ + && $(GO) mod tidy + +.PHONY: lint-modules +lint-modules: go-mod-tidy + +.PHONY: lint +lint: misspell lint-modules golangci-lint + +.PHONY: vanity-import-check +vanity-import-check: | $(PORTO) + @$(PORTO) --include-internal -l . + +.PHONY: misspell +misspell: | $(MISSPELL) + @$(MISSPELL) -w $(ALL_DOCS) + +.PHONY: license-check +license-check: + @licRes=$$(for f in $$(find . -type f \( -iname '*.go' -o -iname '*.sh' \) ! -path '**/third_party/*') ; do \ + awk '/Copyright The OpenTelemetry Authors|generated|GENERATED/ && NR<=3 { found=1; next } END { if (!found) print FILENAME }' $$f; \ + done); \ + if [ -n "$${licRes}" ]; then \ + echo "license header checking failed:"; echo "$${licRes}"; \ + exit 1; \ + fi + +DEPENDABOT_PATH=./.github/dependabot.yml +.PHONY: dependabot-check +dependabot-check: + @result=$$( \ + for f in $$( find . -type f -name go.mod -exec dirname {} \; | sed 's/^.//' ); \ + do grep -q "directory: \+$$f" $(DEPENDABOT_PATH) \ + || echo "$$f"; \ + done; \ + ); \ + if [ -n "$$result" ]; then \ + echo "missing dependabot entry:"; echo "$$result"; \ + echo "new modules need to be added to the $(DEPENDABOT_PATH) file"; \ + exit 1; \ + fi + +.PHONY: check-clean-work-tree +check-clean-work-tree: + @if ! git diff --quiet; then \ + echo; \ + echo 'Working tree is not clean, did you forget to run "make precommit"?'; \ + echo; \ + git status; \ + exit 1; \ + fi + +.PHONY: prerelease +prerelease: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) prerelease -m ${MODSET} + +COMMIT ?= "HEAD" +.PHONY: add-tags +add-tags: | $(MULTIMOD) + @[ "${MODSET}" ] || ( echo ">> env var MODSET is not set"; exit 1 ) + $(MULTIMOD) verify && $(MULTIMOD) tag -m ${MODSET} -c ${COMMIT} diff --git a/vendor/go.opentelemetry.io/otel/README.md b/vendor/go.opentelemetry.io/otel/README.md new file mode 100644 index 0000000000..21c2a71612 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/README.md @@ -0,0 +1,102 @@ +# OpenTelemetry-Go + +[![CI](https://github.com/open-telemetry/opentelemetry-go/workflows/ci/badge.svg)](https://github.com/open-telemetry/opentelemetry-go/actions?query=workflow%3Aci+branch%3Amain) +[![codecov.io](https://codecov.io/gh/open-telemetry/opentelemetry-go/coverage.svg?branch=main)](https://app.codecov.io/gh/open-telemetry/opentelemetry-go?branch=main) +[![PkgGoDev](https://pkg.go.dev/badge/go.opentelemetry.io/otel)](https://pkg.go.dev/go.opentelemetry.io/otel) +[![Go Report Card](https://goreportcard.com/badge/go.opentelemetry.io/otel)](https://goreportcard.com/report/go.opentelemetry.io/otel) +[![Slack](https://img.shields.io/badge/slack-@cncf/otel--go-brightgreen.svg?logo=slack)](https://cloud-native.slack.com/archives/C01NPAXACKT) + +OpenTelemetry-Go is the [Go](https://golang.org/) implementation of [OpenTelemetry](https://opentelemetry.io/). +It provides a set of APIs to directly measure performance and behavior of your software and send this data to observability platforms. + +## Project Status + +| Signal | Status | Project | +| ------- | ---------- | ------- | +| Traces | Stable | N/A | +| Metrics | Alpha | N/A | +| Logs | Frozen [1] | N/A | + +- [1]: The Logs signal development is halted for this project while we develop both Traces and Metrics. + No Logs Pull Requests are currently being accepted. + +Progress and status specific to this repository is tracked in our local +[project boards](https://github.com/open-telemetry/opentelemetry-go/projects) +and +[milestones](https://github.com/open-telemetry/opentelemetry-go/milestones). + +Project versioning information and stability guarantees can be found in the +[versioning documentation](./VERSIONING.md). + +### Compatibility + +OpenTelemetry-Go attempts to track the current supported versions of the +[Go language](https://golang.org/doc/devel/release#policy). The release +schedule after a new minor version of go is as follows: + +- The first release or one month, which ever is sooner, will add build steps for the new go version. +- The first release after three months will remove support for the oldest go version. + +This project is tested on the following systems. + +| OS | Go Version | Architecture | +| ------- | ---------- | ------------ | +| Ubuntu | 1.17 | amd64 | +| Ubuntu | 1.16 | amd64 | +| Ubuntu | 1.17 | 386 | +| Ubuntu | 1.16 | 386 | +| MacOS | 1.17 | amd64 | +| MacOS | 1.16 | amd64 | +| Windows | 1.17 | amd64 | +| Windows | 1.16 | amd64 | +| Windows | 1.17 | 386 | +| Windows | 1.16 | 386 | + +While this project should work for other systems, no compatibility guarantees +are made for those systems currently. + +## Getting Started + +You can find a getting started guide on [opentelemetry.io](https://opentelemetry.io/docs/go/getting-started/). + +OpenTelemetry's goal is to provide a single set of APIs to capture distributed +traces and metrics from your application and send them to an observability +platform. This project allows you to do just that for applications written in +Go. There are two steps to this process: instrument your application, and +configure an exporter. + +### Instrumentation + +To start capturing distributed traces and metric events from your application +it first needs to be instrumented. The easiest way to do this is by using an +instrumentation library for your code. Be sure to check out [the officially +supported instrumentation +libraries](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/instrumentation). + +If you need to extend the telemetry an instrumentation library provides or want +to build your own instrumentation for your application directly you will need +to use the +[go.opentelemetry.io/otel/api](https://pkg.go.dev/go.opentelemetry.io/otel/api) +package. The included [examples](./example/) are a good way to see some +practical uses of this process. + +### Export + +Now that your application is instrumented to collect telemetry, it needs an +export pipeline to send that telemetry to an observability platform. + +All officially supported exporters for the OpenTelemetry project are contained in the [exporters directory](./exporters). + +| Exporter | Metrics | Traces | +| :-----------------------------------: | :-----: | :----: | +| [Jaeger](./exporters/jaeger/) | | ✓ | +| [OTLP](./exporters/otlp/) | ✓ | ✓ | +| [Prometheus](./exporters/prometheus/) | ✓ | | +| [stdout](./exporters/stdout/) | ✓ | ✓ | +| [Zipkin](./exporters/zipkin/) | | ✓ | + +Additionally, OpenTelemetry community supported exporters can be found in the [contrib repository](https://github.com/open-telemetry/opentelemetry-go-contrib/tree/main/exporters). + +## Contributing + +See the [contributing documentation](CONTRIBUTING.md). diff --git a/vendor/go.opentelemetry.io/otel/RELEASING.md b/vendor/go.opentelemetry.io/otel/RELEASING.md new file mode 100644 index 0000000000..e3bff66c6a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/RELEASING.md @@ -0,0 +1,132 @@ +# Release Process + +## Semantic Convention Generation + +If a new version of the OpenTelemetry Specification has been released it will be necessary to generate a new +semantic convention package from the YAML definitions in the specification repository. There is a `semconvgen` utility +installed by `make tools` that can be used to generate the a package with the name matching the specification +version number under the `semconv` package. This will ideally be done soon after the specification release is +tagged. Make sure that the specification repo contains a checkout of the the latest tagged release so that the +generated files match the released semantic conventions. + +There are currently two categories of semantic conventions that must be generated, `resource` and `trace`. + +``` +.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/resource -t semconv/template.j2 +.tools/semconvgen -i /path/to/specification/repo/semantic_conventions/trace -t semconv/template.j2 +``` + +Using default values for all options other than `input` will result in using the `template.j2` template to +generate `resource.go` and `trace.go` in `/path/to/otelgo/repo/semconv/`. + +There are several ancillary files that are not generated and should be copied into the new package from the +prior package, with updates made as appropriate to canonical import path statements and constant values. +These files include: + +* doc.go +* exception.go +* http(_test)?.go +* schema.go + +Uses of the previous schema version in this repository should be updated to use the newly generated version. +No tooling for this exists at present, so use find/replace in your editor of choice or craft a `grep | sed` +pipeline if you like living on the edge. + +## Pre-Release + +First, decide which module sets will be released and update their versions +in `versions.yaml`. Commit this change to a new branch. + +Update go.mod for submodules to depend on the new release which will happen in the next step. + +1. Run the `prerelease` make target. It creates a branch + `prerelease__` that will contain all release changes. + + ``` + make prerelease MODSET= + ``` + +2. Verify the changes. + + ``` + git diff ...prerelease__ + ``` + + This should have changed the version for all modules to be ``. + If these changes look correct, merge them into your pre-release branch: + + ```go + git merge prerelease__ + ``` + +3. Update the [Changelog](./CHANGELOG.md). + - Make sure all relevant changes for this release are included and are in language that non-contributors to the project can understand. + To verify this, you can look directly at the commits since the ``. + + ``` + git --no-pager log --pretty=oneline "..HEAD" + ``` + + - Move all the `Unreleased` changes into a new section following the title scheme (`[] - `). + - Update all the appropriate links at the bottom. + +4. Push the changes to upstream and create a Pull Request on GitHub. + Be sure to include the curated changes from the [Changelog](./CHANGELOG.md) in the description. + +## Tag + +Once the Pull Request with all the version changes has been approved and merged it is time to tag the merged commit. + +***IMPORTANT***: It is critical you use the same tag that you used in the Pre-Release step! +Failure to do so will leave things in a broken state. As long as you do not +change `versions.yaml` between pre-release and this step, things should be fine. + +***IMPORTANT***: [There is currently no way to remove an incorrectly tagged version of a Go module](https://github.com/golang/go/issues/34189). +It is critical you make sure the version you push upstream is correct. +[Failure to do so will lead to minor emergencies and tough to work around](https://github.com/open-telemetry/opentelemetry-go/issues/331). + +1. For each module set that will be released, run the `add-tags` make target + using the `` of the commit on the main branch for the merged Pull Request. + + ``` + make add-tags MODSET= COMMIT= + ``` + + It should only be necessary to provide an explicit `COMMIT` value if the + current `HEAD` of your working directory is not the correct commit. + +2. Push tags to the upstream remote (not your fork: `github.com/open-telemetry/opentelemetry-go.git`). + Make sure you push all sub-modules as well. + + ``` + git push upstream + git push upstream + ... + ``` + +## Release + +Finally create a Release for the new `` on GitHub. +The release body should include all the release notes from the Changelog for this release. + +## Verify Examples + +After releasing verify that examples build outside of the repository. + +``` +./verify_examples.sh +``` + +The script copies examples into a different directory removes any `replace` declarations in `go.mod` and builds them. +This ensures they build with the published release, not the local copy. + +## Post-Release + +### Contrib Repository + +Once verified be sure to [make a release for the `contrib` repository](https://github.com/open-telemetry/opentelemetry-go-contrib/blob/main/RELEASING.md) that uses this release. + +### Website Documentation + +Update [the documentation](./website_docs) for [the OpenTelemetry website](https://opentelemetry.io/docs/go/). +Importantly, bump any package versions referenced to be the latest one you just released and ensure all code examples still compile and are accurate. diff --git a/vendor/go.opentelemetry.io/otel/VERSIONING.md b/vendor/go.opentelemetry.io/otel/VERSIONING.md new file mode 100644 index 0000000000..412f1e362b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/VERSIONING.md @@ -0,0 +1,224 @@ +# Versioning + +This document describes the versioning policy for this repository. This policy +is designed so the following goals can be achieved. + +**Users are provided a codebase of value that is stable and secure.** + +## Policy + +* Versioning of this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver + 2.0](https://semver.org/spec/v2.0.0.html) with the following exceptions. + * New methods may be added to exported API interfaces. All exported + interfaces that fall within this exception will include the following + paragraph in their public documentation. + + > Warning: methods may be added to this interface in minor releases. + + * If a module is version `v2` or higher, the major version of the module + must be included as a `/vN` at the end of the module paths used in + `go.mod` files (e.g., `module go.opentelemetry.io/otel/v2`, `require + go.opentelemetry.io/otel/v2 v2.0.1`) and in the package import path + (e.g., `import "go.opentelemetry.io/otel/v2/trace"`). This includes the + paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/otel/v2@v2.0.1`. Note there is both a `/v2` and a + `@v2.0.1` in that example. One way to think about it is that the module + name now includes the `/v2`, so include `/v2` whenever you are using the + module name). + * If a module is version `v0` or `v1`, do not include the major version in + either the module path or the import path. + * Modules will be used to encapsulate signals and components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API will be versioned + with a major version greater than `v0`. + * The decision to make a module stable will be made on a case-by-case + basis by the maintainers of this project. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * All stable modules that use the same major version number will use the + same entire version number. + * Stable modules may be released with an incremented minor or patch + version even though that module has not been changed, but rather so + that it will remain at the same version as other stable modules that + did undergo change. + * When an experimental module becomes stable a new stable module version + will be released and will include this now stable module. The new + stable module version will be an increment of the minor version number + and will be applied to all existing stable modules as well as the newly + stable module being released. +* Versioning of the associated [contrib + repository](https://github.com/open-telemetry/opentelemetry-go-contrib) of + this project will be idiomatic of a Go project using [Go + modules](https://github.com/golang/go/wiki/Modules). + * [Semantic import + versioning](https://github.com/golang/go/wiki/Modules#semantic-import-versioning) + will be used. + * Versions will comply with [semver 2.0](https://semver.org/spec/v2.0.0.html). + * If a module is version `v2` or higher, the + major version of the module must be included as a `/vN` at the end of the + module paths used in `go.mod` files (e.g., `module + go.opentelemetry.io/contrib/instrumentation/host/v2`, `require + go.opentelemetry.io/contrib/instrumentation/host/v2 v2.0.1`) and in the + package import path (e.g., `import + "go.opentelemetry.io/contrib/instrumentation/host/v2"`). This includes + the paths used in `go get` commands (e.g., `go get + go.opentelemetry.io/contrib/instrumentation/host/v2@v2.0.1`. Note there + is both a `/v2` and a `@v2.0.1` in that example. One way to think about + it is that the module name now includes the `/v2`, so include `/v2` + whenever you are using the module name). + * If a module is version `v0` or `v1`, do not include the major version + in either the module path or the import path. + * In addition to public APIs, telemetry produced by stable instrumentation + will remain stable and backwards compatible. This is to avoid breaking + alerts and dashboard. + * Modules will be used to encapsulate instrumentation, detectors, exporters, + propagators, and any other independent sets of related components. + * Experimental modules still under active development will be versioned at + `v0` to imply the stability guarantee defined by + [semver](https://semver.org/spec/v2.0.0.html#spec-item-4). + + > Major version zero (0.y.z) is for initial development. Anything MAY + > change at any time. The public API SHOULD NOT be considered stable. + + * Mature modules for which we guarantee a stable public API and telemetry will + be versioned with a major version greater than `v0`. + * Experimental modules will start their versioning at `v0.0.0` and will + increment their minor version when backwards incompatible changes are + released and increment their patch version when backwards compatible + changes are released. + * Stable contrib modules cannot depend on experimental modules from this + project. + * All stable contrib modules of the same major version with this project + will use the same entire version as this project. + * Stable modules may be released with an incremented minor or patch + version even though that module's code has not been changed. Instead + the only change that will have been included is to have updated that + modules dependency on this project's stable APIs. + * When an experimental module in contrib becomes stable a new stable + module version will be released and will include this now stable + module. The new stable module version will be an increment of the minor + version number and will be applied to all existing stable contrib + modules, this project's modules, and the newly stable module being + released. + * Contrib modules will be kept up to date with this project's releases. + * Due to the dependency contrib modules will implicitly have on this + project's modules the release of stable contrib modules to match the + released version number will be staggered after this project's release. + There is no explicit time guarantee for how long after this projects + release the contrib release will be. Effort should be made to keep them + as close in time as possible. + * No additional stable release in this project can be made until the + contrib repository has a matching stable release. + * No release can be made in the contrib repository after this project's + stable release except for a stable release of the contrib repository. +* GitHub releases will be made for all releases. +* Go modules will be made available at Go package mirrors. + +## Example Versioning Lifecycle + +To better understand the implementation of the above policy the following +example is provided. This project is simplified to include only the following +modules and their versions: + +* `otel`: `v0.14.0` +* `otel/trace`: `v0.14.0` +* `otel/metric`: `v0.14.0` +* `otel/baggage`: `v0.14.0` +* `otel/sdk/trace`: `v0.14.0` +* `otel/sdk/metric`: `v0.14.0` + +These modules have been developed to a point where the `otel/trace`, +`otel/baggage`, and `otel/sdk/trace` modules have reached a point that they +should be considered for a stable release. The `otel/metric` and +`otel/sdk/metric` are still under active development and the `otel` module +depends on both `otel/trace` and `otel/metric`. + +The `otel` package is refactored to remove its dependencies on `otel/metric` so +it can be released as stable as well. With that done the following release +candidates are made: + +* `otel`: `v1.0.0-RC1` +* `otel/trace`: `v1.0.0-RC1` +* `otel/baggage`: `v1.0.0-RC1` +* `otel/sdk/trace`: `v1.0.0-RC1` + +The `otel/metric` and `otel/sdk/metric` modules remain at `v0.14.0`. + +A few minor issues are discovered in the `otel/trace` package. These issues are +resolved with some minor, but backwards incompatible, changes and are released +as a second release candidate: + +* `otel`: `v1.0.0-RC2` +* `otel/trace`: `v1.0.0-RC2` +* `otel/baggage`: `v1.0.0-RC2` +* `otel/sdk/trace`: `v1.0.0-RC2` + +Notice that all module version numbers are incremented to adhere to our +versioning policy. + +After these release candidates have been evaluated to satisfaction, they are +released as version `v1.0.0`. + +* `otel`: `v1.0.0` +* `otel/trace`: `v1.0.0` +* `otel/baggage`: `v1.0.0` +* `otel/sdk/trace`: `v1.0.0` + +Since both the `go` utility and the Go module system support [the semantic +versioning definition of +precedence](https://semver.org/spec/v2.0.0.html#spec-item-11), this release +will correctly be interpreted as the successor to the previous release +candidates. + +Active development of this project continues. The `otel/metric` module now has +backwards incompatible changes to its API that need to be released and the +`otel/baggage` module has a minor bug fix that needs to be released. The +following release is made: + +* `otel`: `v1.0.1` +* `otel/trace`: `v1.0.1` +* `otel/metric`: `v0.15.0` +* `otel/baggage`: `v1.0.1` +* `otel/sdk/trace`: `v1.0.1` +* `otel/sdk/metric`: `v0.15.0` + +Notice that, again, all stable module versions are incremented in unison and +the `otel/sdk/metric` package, which depends on the `otel/metric` package, also +bumped its version. This bump of the `otel/sdk/metric` package makes sense +given their coupling, though it is not explicitly required by our versioning +policy. + +As we progress, the `otel/metric` and `otel/sdk/metric` packages have reached a +point where they should be evaluated for stability. The `otel` module is +reintegrated with the `otel/metric` package and the following release is made: + +* `otel`: `v1.1.0-RC1` +* `otel/trace`: `v1.1.0-RC1` +* `otel/metric`: `v1.1.0-RC1` +* `otel/baggage`: `v1.1.0-RC1` +* `otel/sdk/trace`: `v1.1.0-RC1` +* `otel/sdk/metric`: `v1.1.0-RC1` + +All the modules are evaluated and determined to a viable stable release. They +are then released as version `v1.1.0` (the minor version is incremented to +indicate the addition of new signal). + +* `otel`: `v1.1.0` +* `otel/trace`: `v1.1.0` +* `otel/metric`: `v1.1.0` +* `otel/baggage`: `v1.1.0` +* `otel/sdk/trace`: `v1.1.0` +* `otel/sdk/metric`: `v1.1.0` diff --git a/vendor/go.opentelemetry.io/otel/attribute/doc.go b/vendor/go.opentelemetry.io/otel/attribute/doc.go new file mode 100644 index 0000000000..dafe7424df --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/doc.go @@ -0,0 +1,16 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package attribute provides key and value attributes. +package attribute // import "go.opentelemetry.io/otel/attribute" diff --git a/vendor/go.opentelemetry.io/otel/attribute/encoder.go b/vendor/go.opentelemetry.io/otel/attribute/encoder.go new file mode 100644 index 0000000000..8b940f78dc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/encoder.go @@ -0,0 +1,150 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "bytes" + "sync" + "sync/atomic" +) + +type ( + // Encoder is a mechanism for serializing a label set into a + // specific string representation that supports caching, to + // avoid repeated serialization. An example could be an + // exporter encoding the label set into a wire representation. + Encoder interface { + // Encode returns the serialized encoding of the label + // set using its Iterator. This result may be cached + // by a attribute.Set. + Encode(iterator Iterator) string + + // ID returns a value that is unique for each class of + // label encoder. Label encoders allocate these using + // `NewEncoderID`. + ID() EncoderID + } + + // EncoderID is used to identify distinct Encoder + // implementations, for caching encoded results. + EncoderID struct { + value uint64 + } + + // defaultLabelEncoder uses a sync.Pool of buffers to reduce + // the number of allocations used in encoding labels. This + // implementation encodes a comma-separated list of key=value, + // with '/'-escaping of '=', ',', and '\'. + defaultLabelEncoder struct { + // pool is a pool of labelset builders. The buffers in this + // pool grow to a size that most label encodings will not + // allocate new memory. + pool sync.Pool // *bytes.Buffer + } +) + +// escapeChar is used to ensure uniqueness of the label encoding where +// keys or values contain either '=' or ','. Since there is no parser +// needed for this encoding and its only requirement is to be unique, +// this choice is arbitrary. Users will see these in some exporters +// (e.g., stdout), so the backslash ('\') is used as a conventional choice. +const escapeChar = '\\' + +var ( + _ Encoder = &defaultLabelEncoder{} + + // encoderIDCounter is for generating IDs for other label + // encoders. + encoderIDCounter uint64 + + defaultEncoderOnce sync.Once + defaultEncoderID = NewEncoderID() + defaultEncoderInstance *defaultLabelEncoder +) + +// NewEncoderID returns a unique label encoder ID. It should be +// called once per each type of label encoder. Preferably in init() or +// in var definition. +func NewEncoderID() EncoderID { + return EncoderID{value: atomic.AddUint64(&encoderIDCounter, 1)} +} + +// DefaultEncoder returns a label encoder that encodes labels +// in such a way that each escaped label's key is followed by an equal +// sign and then by an escaped label's value. All key-value pairs are +// separated by a comma. +// +// Escaping is done by prepending a backslash before either a +// backslash, equal sign or a comma. +func DefaultEncoder() Encoder { + defaultEncoderOnce.Do(func() { + defaultEncoderInstance = &defaultLabelEncoder{ + pool: sync.Pool{ + New: func() interface{} { + return &bytes.Buffer{} + }, + }, + } + }) + return defaultEncoderInstance +} + +// Encode is a part of an implementation of the LabelEncoder +// interface. +func (d *defaultLabelEncoder) Encode(iter Iterator) string { + buf := d.pool.Get().(*bytes.Buffer) + defer d.pool.Put(buf) + buf.Reset() + + for iter.Next() { + i, keyValue := iter.IndexedLabel() + if i > 0 { + _, _ = buf.WriteRune(',') + } + copyAndEscape(buf, string(keyValue.Key)) + + _, _ = buf.WriteRune('=') + + if keyValue.Value.Type() == STRING { + copyAndEscape(buf, keyValue.Value.AsString()) + } else { + _, _ = buf.WriteString(keyValue.Value.Emit()) + } + } + return buf.String() +} + +// ID is a part of an implementation of the LabelEncoder interface. +func (*defaultLabelEncoder) ID() EncoderID { + return defaultEncoderID +} + +// copyAndEscape escapes `=`, `,` and its own escape character (`\`), +// making the default encoding unique. +func copyAndEscape(buf *bytes.Buffer, val string) { + for _, ch := range val { + switch ch { + case '=', ',', escapeChar: + buf.WriteRune(escapeChar) + } + buf.WriteRune(ch) + } +} + +// Valid returns true if this encoder ID was allocated by +// `NewEncoderID`. Invalid encoder IDs will not be cached. +func (id EncoderID) Valid() bool { + return id.value != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/iterator.go b/vendor/go.opentelemetry.io/otel/attribute/iterator.go new file mode 100644 index 0000000000..e03aabb62b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/iterator.go @@ -0,0 +1,143 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Iterator allows iterating over the set of labels in order, +// sorted by key. +type Iterator struct { + storage *Set + idx int +} + +// MergeIterator supports iterating over two sets of labels while +// eliminating duplicate values from the combined set. The first +// iterator value takes precedence. +type MergeIterator struct { + one oneIterator + two oneIterator + current KeyValue +} + +type oneIterator struct { + iter Iterator + done bool + label KeyValue +} + +// Next moves the iterator to the next position. Returns false if there +// are no more labels. +func (i *Iterator) Next() bool { + i.idx++ + return i.idx < i.Len() +} + +// Label returns current KeyValue. Must be called only after Next returns +// true. +func (i *Iterator) Label() KeyValue { + kv, _ := i.storage.Get(i.idx) + return kv +} + +// Attribute is a synonym for Label(). +func (i *Iterator) Attribute() KeyValue { + return i.Label() +} + +// IndexedLabel returns current index and attribute. Must be called only +// after Next returns true. +func (i *Iterator) IndexedLabel() (int, KeyValue) { + return i.idx, i.Label() +} + +// Len returns a number of labels in the iterator's `*Set`. +func (i *Iterator) Len() int { + return i.storage.Len() +} + +// ToSlice is a convenience function that creates a slice of labels +// from the passed iterator. The iterator is set up to start from the +// beginning before creating the slice. +func (i *Iterator) ToSlice() []KeyValue { + l := i.Len() + if l == 0 { + return nil + } + i.idx = -1 + slice := make([]KeyValue, 0, l) + for i.Next() { + slice = append(slice, i.Label()) + } + return slice +} + +// NewMergeIterator returns a MergeIterator for merging two label sets +// Duplicates are resolved by taking the value from the first set. +func NewMergeIterator(s1, s2 *Set) MergeIterator { + mi := MergeIterator{ + one: makeOne(s1.Iter()), + two: makeOne(s2.Iter()), + } + return mi +} + +func makeOne(iter Iterator) oneIterator { + oi := oneIterator{ + iter: iter, + } + oi.advance() + return oi +} + +func (oi *oneIterator) advance() { + if oi.done = !oi.iter.Next(); !oi.done { + oi.label = oi.iter.Label() + } +} + +// Next returns true if there is another label available. +func (m *MergeIterator) Next() bool { + if m.one.done && m.two.done { + return false + } + if m.one.done { + m.current = m.two.label + m.two.advance() + return true + } + if m.two.done { + m.current = m.one.label + m.one.advance() + return true + } + if m.one.label.Key == m.two.label.Key { + m.current = m.one.label // first iterator label value wins + m.one.advance() + m.two.advance() + return true + } + if m.one.label.Key < m.two.label.Key { + m.current = m.one.label + m.one.advance() + return true + } + m.current = m.two.label + m.two.advance() + return true +} + +// Label returns the current value after Next() returns true. +func (m *MergeIterator) Label() KeyValue { + return m.current +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/key.go b/vendor/go.opentelemetry.io/otel/attribute/key.go new file mode 100644 index 0000000000..0656a04e43 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/key.go @@ -0,0 +1,134 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +// Key represents the key part in key-value pairs. It's a string. The +// allowed character set in the key depends on the use of the key. +type Key string + +// Bool creates a KeyValue instance with a BOOL Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Bool(name, value). +func (k Key) Bool(v bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolValue(v), + } +} + +// BoolSlice creates a KeyValue instance with a BOOLSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- BoolSlice(name, value). +func (k Key) BoolSlice(v []bool) KeyValue { + return KeyValue{ + Key: k, + Value: BoolSliceValue(v), + } +} + +// Int creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int(name, value). +func (k Key) Int(v int) KeyValue { + return KeyValue{ + Key: k, + Value: IntValue(v), + } +} + +// IntSlice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- IntSlice(name, value). +func (k Key) IntSlice(v []int) KeyValue { + return KeyValue{ + Key: k, + Value: IntSliceValue(v), + } +} + +// Int64 creates a KeyValue instance with an INT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64(name, value). +func (k Key) Int64(v int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64Value(v), + } +} + +// Int64Slice creates a KeyValue instance with an INT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Int64Slice(name, value). +func (k Key) Int64Slice(v []int64) KeyValue { + return KeyValue{ + Key: k, + Value: Int64SliceValue(v), + } +} + +// Float64 creates a KeyValue instance with a FLOAT64 Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64(v float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64Value(v), + } +} + +// Float64Slice creates a KeyValue instance with a FLOAT64SLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- Float64(name, value). +func (k Key) Float64Slice(v []float64) KeyValue { + return KeyValue{ + Key: k, + Value: Float64SliceValue(v), + } +} + +// String creates a KeyValue instance with a STRING Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- String(name, value). +func (k Key) String(v string) KeyValue { + return KeyValue{ + Key: k, + Value: StringValue(v), + } +} + +// StringSlice creates a KeyValue instance with a STRINGSLICE Value. +// +// If creating both a key and value at the same time, use the provided +// convenience function instead -- StringSlice(name, value). +func (k Key) StringSlice(v []string) KeyValue { + return KeyValue{ + Key: k, + Value: StringSliceValue(v), + } +} + +// Defined returns true for non-empty keys. +func (k Key) Defined() bool { + return len(k) != 0 +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/kv.go b/vendor/go.opentelemetry.io/otel/attribute/kv.go new file mode 100644 index 0000000000..1ddf3ce058 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/kv.go @@ -0,0 +1,86 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "fmt" +) + +// KeyValue holds a key and value pair. +type KeyValue struct { + Key Key + Value Value +} + +// Valid returns if kv is a valid OpenTelemetry attribute. +func (kv KeyValue) Valid() bool { + return kv.Key.Defined() && kv.Value.Type() != INVALID +} + +// Bool creates a KeyValue with a BOOL Value type. +func Bool(k string, v bool) KeyValue { + return Key(k).Bool(v) +} + +// BoolSlice creates a KeyValue with a BOOLSLICE Value type. +func BoolSlice(k string, v []bool) KeyValue { + return Key(k).BoolSlice(v) +} + +// Int creates a KeyValue with an INT64 Value type. +func Int(k string, v int) KeyValue { + return Key(k).Int(v) +} + +// IntSlice creates a KeyValue with an INT64SLICE Value type. +func IntSlice(k string, v []int) KeyValue { + return Key(k).IntSlice(v) +} + +// Int64 creates a KeyValue with an INT64 Value type. +func Int64(k string, v int64) KeyValue { + return Key(k).Int64(v) +} + +// Int64Slice creates a KeyValue with an INT64SLICE Value type. +func Int64Slice(k string, v []int64) KeyValue { + return Key(k).Int64Slice(v) +} + +// Float64 creates a KeyValue with a FLOAT64 Value type. +func Float64(k string, v float64) KeyValue { + return Key(k).Float64(v) +} + +// Float64Slice creates a KeyValue with a FLOAT64SLICE Value type. +func Float64Slice(k string, v []float64) KeyValue { + return Key(k).Float64Slice(v) +} + +// String creates a KeyValue with a STRING Value type. +func String(k, v string) KeyValue { + return Key(k).String(v) +} + +// StringSlice creates a KeyValue with a STRINGSLICE Value type. +func StringSlice(k string, v []string) KeyValue { + return Key(k).StringSlice(v) +} + +// Stringer creates a new key-value pair with a passed name and a string +// value generated by the passed Stringer interface. +func Stringer(k string, v fmt.Stringer) KeyValue { + return Key(k).String(v.String()) +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/set.go b/vendor/go.opentelemetry.io/otel/attribute/set.go new file mode 100644 index 0000000000..a28f1435cb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/set.go @@ -0,0 +1,435 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "reflect" + "sort" +) + +type ( + // Set is the representation for a distinct label set. It + // manages an immutable set of labels, with an internal cache + // for storing label encodings. + // + // This type supports the `Equivalent` method of comparison + // using values of type `Distinct`. + // + // This type is used to implement: + // 1. Metric labels + // 2. Resource sets + // 3. Correlation map (TODO) + Set struct { + equivalent Distinct + } + + // Distinct wraps a variable-size array of `KeyValue`, + // constructed with keys in sorted order. This can be used as + // a map key or for equality checking between Sets. + Distinct struct { + iface interface{} + } + + // Filter supports removing certain labels from label sets. + // When the filter returns true, the label will be kept in + // the filtered label set. When the filter returns false, the + // label is excluded from the filtered label set, and the + // label instead appears in the `removed` list of excluded labels. + Filter func(KeyValue) bool + + // Sortable implements `sort.Interface`, used for sorting + // `KeyValue`. This is an exported type to support a + // memory optimization. A pointer to one of these is needed + // for the call to `sort.Stable()`, which the caller may + // provide in order to avoid an allocation. See + // `NewSetWithSortable()`. + Sortable []KeyValue +) + +var ( + // keyValueType is used in `computeDistinctReflect`. + keyValueType = reflect.TypeOf(KeyValue{}) + + // emptySet is returned for empty label sets. + emptySet = &Set{ + equivalent: Distinct{ + iface: [0]KeyValue{}, + }, + } +) + +// EmptySet returns a reference to a Set with no elements. +// +// This is a convenience provided for optimized calling utility. +func EmptySet() *Set { + return emptySet +} + +// reflect abbreviates `reflect.ValueOf`. +func (d Distinct) reflect() reflect.Value { + return reflect.ValueOf(d.iface) +} + +// Valid returns true if this value refers to a valid `*Set`. +func (d Distinct) Valid() bool { + return d.iface != nil +} + +// Len returns the number of labels in this set. +func (l *Set) Len() int { + if l == nil || !l.equivalent.Valid() { + return 0 + } + return l.equivalent.reflect().Len() +} + +// Get returns the KeyValue at ordered position `idx` in this set. +func (l *Set) Get(idx int) (KeyValue, bool) { + if l == nil { + return KeyValue{}, false + } + value := l.equivalent.reflect() + + if idx >= 0 && idx < value.Len() { + // Note: The Go compiler successfully avoids an allocation for + // the interface{} conversion here: + return value.Index(idx).Interface().(KeyValue), true + } + + return KeyValue{}, false +} + +// Value returns the value of a specified key in this set. +func (l *Set) Value(k Key) (Value, bool) { + if l == nil { + return Value{}, false + } + rValue := l.equivalent.reflect() + vlen := rValue.Len() + + idx := sort.Search(vlen, func(idx int) bool { + return rValue.Index(idx).Interface().(KeyValue).Key >= k + }) + if idx >= vlen { + return Value{}, false + } + keyValue := rValue.Index(idx).Interface().(KeyValue) + if k == keyValue.Key { + return keyValue.Value, true + } + return Value{}, false +} + +// HasValue tests whether a key is defined in this set. +func (l *Set) HasValue(k Key) bool { + if l == nil { + return false + } + _, ok := l.Value(k) + return ok +} + +// Iter returns an iterator for visiting the labels in this set. +func (l *Set) Iter() Iterator { + return Iterator{ + storage: l, + idx: -1, + } +} + +// ToSlice returns the set of labels belonging to this set, sorted, +// where keys appear no more than once. +func (l *Set) ToSlice() []KeyValue { + iter := l.Iter() + return iter.ToSlice() +} + +// Equivalent returns a value that may be used as a map key. The +// Distinct type guarantees that the result will equal the equivalent +// Distinct value of any label set with the same elements as this, +// where sets are made unique by choosing the last value in the input +// for any given key. +func (l *Set) Equivalent() Distinct { + if l == nil || !l.equivalent.Valid() { + return emptySet.equivalent + } + return l.equivalent +} + +// Equals returns true if the argument set is equivalent to this set. +func (l *Set) Equals(o *Set) bool { + return l.Equivalent() == o.Equivalent() +} + +// Encoded returns the encoded form of this set, according to +// `encoder`. +func (l *Set) Encoded(encoder Encoder) string { + if l == nil || encoder == nil { + return "" + } + + return encoder.Encode(l.Iter()) +} + +func empty() Set { + return Set{ + equivalent: emptySet.equivalent, + } +} + +// NewSet returns a new `Set`. See the documentation for +// `NewSetWithSortableFiltered` for more details. +// +// Except for empty sets, this method adds an additional allocation +// compared with calls that include a `*Sortable`. +func NewSet(kvs ...KeyValue) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, new(Sortable), nil) + return s +} + +// NewSetWithSortable returns a new `Set`. See the documentation for +// `NewSetWithSortableFiltered` for more details. +// +// This call includes a `*Sortable` option as a memory optimization. +func NewSetWithSortable(kvs []KeyValue, tmp *Sortable) Set { + // Check for empty set. + if len(kvs) == 0 { + return empty() + } + s, _ := NewSetWithSortableFiltered(kvs, tmp, nil) + return s +} + +// NewSetWithFiltered returns a new `Set`. See the documentation for +// `NewSetWithSortableFiltered` for more details. +// +// This call includes a `Filter` to include/exclude label keys from +// the return value. Excluded keys are returned as a slice of label +// values. +func NewSetWithFiltered(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + return NewSetWithSortableFiltered(kvs, new(Sortable), filter) +} + +// NewSetWithSortableFiltered returns a new `Set`. +// +// Duplicate keys are eliminated by taking the last value. This +// re-orders the input slice so that unique last-values are contiguous +// at the end of the slice. +// +// This ensures the following: +// +// - Last-value-wins semantics +// - Caller sees the reordering, but doesn't lose values +// - Repeated call preserve last-value wins. +// +// Note that methods are defined on `*Set`, although this returns `Set`. +// Callers can avoid memory allocations by: +// +// - allocating a `Sortable` for use as a temporary in this method +// - allocating a `Set` for storing the return value of this +// constructor. +// +// The result maintains a cache of encoded labels, by attribute.EncoderID. +// This value should not be copied after its first use. +// +// The second `[]KeyValue` return value is a list of labels that were +// excluded by the Filter (if non-nil). +func NewSetWithSortableFiltered(kvs []KeyValue, tmp *Sortable, filter Filter) (Set, []KeyValue) { + // Check for empty set. + if len(kvs) == 0 { + return empty(), nil + } + + *tmp = kvs + + // Stable sort so the following de-duplication can implement + // last-value-wins semantics. + sort.Stable(tmp) + + *tmp = nil + + position := len(kvs) - 1 + offset := position - 1 + + // The requirements stated above require that the stable + // result be placed in the end of the input slice, while + // overwritten values are swapped to the beginning. + // + // De-duplicate with last-value-wins semantics. Preserve + // duplicate values at the beginning of the input slice. + for ; offset >= 0; offset-- { + if kvs[offset].Key == kvs[position].Key { + continue + } + position-- + kvs[offset], kvs[position] = kvs[position], kvs[offset] + } + if filter != nil { + return filterSet(kvs[position:], filter) + } + return Set{ + equivalent: computeDistinct(kvs[position:]), + }, nil +} + +// filterSet reorders `kvs` so that included keys are contiguous at +// the end of the slice, while excluded keys precede the included keys. +func filterSet(kvs []KeyValue, filter Filter) (Set, []KeyValue) { + var excluded []KeyValue + + // Move labels that do not match the filter so + // they're adjacent before calling computeDistinct(). + distinctPosition := len(kvs) + + // Swap indistinct keys forward and distinct keys toward the + // end of the slice. + offset := len(kvs) - 1 + for ; offset >= 0; offset-- { + if filter(kvs[offset]) { + distinctPosition-- + kvs[offset], kvs[distinctPosition] = kvs[distinctPosition], kvs[offset] + continue + } + } + excluded = kvs[:distinctPosition] + + return Set{ + equivalent: computeDistinct(kvs[distinctPosition:]), + }, excluded +} + +// Filter returns a filtered copy of this `Set`. See the +// documentation for `NewSetWithSortableFiltered` for more details. +func (l *Set) Filter(re Filter) (Set, []KeyValue) { + if re == nil { + return Set{ + equivalent: l.equivalent, + }, nil + } + + // Note: This could be refactored to avoid the temporary slice + // allocation, if it proves to be expensive. + return filterSet(l.ToSlice(), re) +} + +// computeDistinct returns a `Distinct` using either the fixed- or +// reflect-oriented code path, depending on the size of the input. +// The input slice is assumed to already be sorted and de-duplicated. +func computeDistinct(kvs []KeyValue) Distinct { + iface := computeDistinctFixed(kvs) + if iface == nil { + iface = computeDistinctReflect(kvs) + } + return Distinct{ + iface: iface, + } +} + +// computeDistinctFixed computes a `Distinct` for small slices. It +// returns nil if the input is too large for this code path. +func computeDistinctFixed(kvs []KeyValue) interface{} { + switch len(kvs) { + case 1: + ptr := new([1]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 2: + ptr := new([2]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 3: + ptr := new([3]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 4: + ptr := new([4]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 5: + ptr := new([5]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 6: + ptr := new([6]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 7: + ptr := new([7]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 8: + ptr := new([8]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 9: + ptr := new([9]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + case 10: + ptr := new([10]KeyValue) + copy((*ptr)[:], kvs) + return *ptr + default: + return nil + } +} + +// computeDistinctReflect computes a `Distinct` using reflection, +// works for any size input. +func computeDistinctReflect(kvs []KeyValue) interface{} { + at := reflect.New(reflect.ArrayOf(len(kvs), keyValueType)).Elem() + for i, keyValue := range kvs { + *(at.Index(i).Addr().Interface().(*KeyValue)) = keyValue + } + return at.Interface() +} + +// MarshalJSON returns the JSON encoding of the `*Set`. +func (l *Set) MarshalJSON() ([]byte, error) { + return json.Marshal(l.equivalent.iface) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (l Set) MarshalLog() interface{} { + kvs := make(map[string]string) + for _, kv := range l.ToSlice() { + kvs[string(kv.Key)] = kv.Value.Emit() + } + return kvs +} + +// Len implements `sort.Interface`. +func (l *Sortable) Len() int { + return len(*l) +} + +// Swap implements `sort.Interface`. +func (l *Sortable) Swap(i, j int) { + (*l)[i], (*l)[j] = (*l)[j], (*l)[i] +} + +// Less implements `sort.Interface`. +func (l *Sortable) Less(i, j int) bool { + return (*l)[i].Key < (*l)[j].Key +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/type_string.go b/vendor/go.opentelemetry.io/otel/attribute/type_string.go new file mode 100644 index 0000000000..e584b24776 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/type_string.go @@ -0,0 +1,31 @@ +// Code generated by "stringer -type=Type"; DO NOT EDIT. + +package attribute + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[INVALID-0] + _ = x[BOOL-1] + _ = x[INT64-2] + _ = x[FLOAT64-3] + _ = x[STRING-4] + _ = x[BOOLSLICE-5] + _ = x[INT64SLICE-6] + _ = x[FLOAT64SLICE-7] + _ = x[STRINGSLICE-8] +} + +const _Type_name = "INVALIDBOOLINT64FLOAT64STRINGBOOLSLICEINT64SLICEFLOAT64SLICESTRINGSLICE" + +var _Type_index = [...]uint8{0, 7, 11, 16, 23, 29, 38, 48, 60, 71} + +func (i Type) String() string { + if i < 0 || i >= Type(len(_Type_index)-1) { + return "Type(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Type_name[_Type_index[i]:_Type_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/attribute/value.go b/vendor/go.opentelemetry.io/otel/attribute/value.go new file mode 100644 index 0000000000..6ec5cb290d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/attribute/value.go @@ -0,0 +1,271 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package attribute // import "go.opentelemetry.io/otel/attribute" + +import ( + "encoding/json" + "fmt" + "strconv" + + "go.opentelemetry.io/otel/internal" +) + +//go:generate stringer -type=Type + +// Type describes the type of the data Value holds. +type Type int + +// Value represents the value part in key-value pairs. +type Value struct { + vtype Type + numeric uint64 + stringly string + slice interface{} +} + +const ( + // INVALID is used for a Value with no value set. + INVALID Type = iota + // BOOL is a boolean Type Value. + BOOL + // INT64 is a 64-bit signed integral Type Value. + INT64 + // FLOAT64 is a 64-bit floating point Type Value. + FLOAT64 + // STRING is a string Type Value. + STRING + // BOOLSLICE is a slice of booleans Type Value. + BOOLSLICE + // INT64SLICE is a slice of 64-bit signed integral numbers Type Value. + INT64SLICE + // FLOAT64SLICE is a slice of 64-bit floating point numbers Type Value. + FLOAT64SLICE + // STRINGSLICE is a slice of strings Type Value. + STRINGSLICE +) + +// BoolValue creates a BOOL Value. +func BoolValue(v bool) Value { + return Value{ + vtype: BOOL, + numeric: internal.BoolToRaw(v), + } +} + +// BoolSliceValue creates a BOOLSLICE Value. +func BoolSliceValue(v []bool) Value { + cp := make([]bool, len(v)) + copy(cp, v) + return Value{ + vtype: BOOLSLICE, + slice: &cp, + } +} + +// IntValue creates an INT64 Value. +func IntValue(v int) Value { + return Int64Value(int64(v)) +} + +// IntSliceValue creates an INTSLICE Value. +func IntSliceValue(v []int) Value { + cp := make([]int64, 0, len(v)) + for _, i := range v { + cp = append(cp, int64(i)) + } + return Value{ + vtype: INT64SLICE, + slice: &cp, + } +} + +// Int64Value creates an INT64 Value. +func Int64Value(v int64) Value { + return Value{ + vtype: INT64, + numeric: internal.Int64ToRaw(v), + } +} + +// Int64SliceValue creates an INT64SLICE Value. +func Int64SliceValue(v []int64) Value { + cp := make([]int64, len(v)) + copy(cp, v) + return Value{ + vtype: INT64SLICE, + slice: &cp, + } +} + +// Float64Value creates a FLOAT64 Value. +func Float64Value(v float64) Value { + return Value{ + vtype: FLOAT64, + numeric: internal.Float64ToRaw(v), + } +} + +// Float64SliceValue creates a FLOAT64SLICE Value. +func Float64SliceValue(v []float64) Value { + cp := make([]float64, len(v)) + copy(cp, v) + return Value{ + vtype: FLOAT64SLICE, + slice: &cp, + } +} + +// StringValue creates a STRING Value. +func StringValue(v string) Value { + return Value{ + vtype: STRING, + stringly: v, + } +} + +// StringSliceValue creates a STRINGSLICE Value. +func StringSliceValue(v []string) Value { + cp := make([]string, len(v)) + copy(cp, v) + return Value{ + vtype: STRINGSLICE, + slice: &cp, + } +} + +// Type returns a type of the Value. +func (v Value) Type() Type { + return v.vtype +} + +// AsBool returns the bool value. Make sure that the Value's type is +// BOOL. +func (v Value) AsBool() bool { + return internal.RawToBool(v.numeric) +} + +// AsBoolSlice returns the []bool value. Make sure that the Value's type is +// BOOLSLICE. +func (v Value) AsBoolSlice() []bool { + if s, ok := v.slice.(*[]bool); ok { + return *s + } + return nil +} + +// AsInt64 returns the int64 value. Make sure that the Value's type is +// INT64. +func (v Value) AsInt64() int64 { + return internal.RawToInt64(v.numeric) +} + +// AsInt64Slice returns the []int64 value. Make sure that the Value's type is +// INT64SLICE. +func (v Value) AsInt64Slice() []int64 { + if s, ok := v.slice.(*[]int64); ok { + return *s + } + return nil +} + +// AsFloat64 returns the float64 value. Make sure that the Value's +// type is FLOAT64. +func (v Value) AsFloat64() float64 { + return internal.RawToFloat64(v.numeric) +} + +// AsFloat64Slice returns the []float64 value. Make sure that the Value's type is +// FLOAT64SLICE. +func (v Value) AsFloat64Slice() []float64 { + if s, ok := v.slice.(*[]float64); ok { + return *s + } + return nil +} + +// AsString returns the string value. Make sure that the Value's type +// is STRING. +func (v Value) AsString() string { + return v.stringly +} + +// AsStringSlice returns the []string value. Make sure that the Value's type is +// STRINGSLICE. +func (v Value) AsStringSlice() []string { + if s, ok := v.slice.(*[]string); ok { + return *s + } + return nil +} + +type unknownValueType struct{} + +// AsInterface returns Value's data as interface{}. +func (v Value) AsInterface() interface{} { + switch v.Type() { + case BOOL: + return v.AsBool() + case BOOLSLICE: + return v.AsBoolSlice() + case INT64: + return v.AsInt64() + case INT64SLICE: + return v.AsInt64Slice() + case FLOAT64: + return v.AsFloat64() + case FLOAT64SLICE: + return v.AsFloat64Slice() + case STRING: + return v.stringly + case STRINGSLICE: + return v.AsStringSlice() + } + return unknownValueType{} +} + +// Emit returns a string representation of Value's data. +func (v Value) Emit() string { + switch v.Type() { + case BOOLSLICE: + return fmt.Sprint(*(v.slice.(*[]bool))) + case BOOL: + return strconv.FormatBool(v.AsBool()) + case INT64SLICE: + return fmt.Sprint(*(v.slice.(*[]int64))) + case INT64: + return strconv.FormatInt(v.AsInt64(), 10) + case FLOAT64SLICE: + return fmt.Sprint(*(v.slice.(*[]float64))) + case FLOAT64: + return fmt.Sprint(v.AsFloat64()) + case STRINGSLICE: + return fmt.Sprint(*(v.slice.(*[]string))) + case STRING: + return v.stringly + default: + return "unknown" + } +} + +// MarshalJSON returns the JSON encoding of the Value. +func (v Value) MarshalJSON() ([]byte, error) { + var jsonVal struct { + Type string + Value interface{} + } + jsonVal.Type = v.Type().String() + jsonVal.Value = v.AsInterface() + return json.Marshal(jsonVal) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/baggage/baggage.go new file mode 100644 index 0000000000..824c67b27a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/baggage.go @@ -0,0 +1,556 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "errors" + "fmt" + "net/url" + "regexp" + "strings" + + "go.opentelemetry.io/otel/internal/baggage" +) + +const ( + maxMembers = 180 + maxBytesPerMembers = 4096 + maxBytesPerBaggageString = 8192 + + listDelimiter = "," + keyValueDelimiter = "=" + propertyDelimiter = ";" + + keyDef = `([\x21\x23-\x27\x2A\x2B\x2D\x2E\x30-\x39\x41-\x5a\x5e-\x7a\x7c\x7e]+)` + valueDef = `([\x21\x23-\x2b\x2d-\x3a\x3c-\x5B\x5D-\x7e]*)` + keyValueDef = `\s*` + keyDef + `\s*` + keyValueDelimiter + `\s*` + valueDef + `\s*` +) + +var ( + keyRe = regexp.MustCompile(`^` + keyDef + `$`) + valueRe = regexp.MustCompile(`^` + valueDef + `$`) + propertyRe = regexp.MustCompile(`^(?:\s*` + keyDef + `\s*|` + keyValueDef + `)$`) +) + +var ( + errInvalidKey = errors.New("invalid key") + errInvalidValue = errors.New("invalid value") + errInvalidProperty = errors.New("invalid baggage list-member property") + errInvalidMember = errors.New("invalid baggage list-member") + errMemberNumber = errors.New("too many list-members in baggage-string") + errMemberBytes = errors.New("list-member too large") + errBaggageBytes = errors.New("baggage-string too large") +) + +// Property is an additional metadata entry for a baggage list-member. +type Property struct { + key, value string + + // hasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + hasValue bool + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +func NewKeyProperty(key string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + + p := Property{key: key, hasData: true} + return p, nil +} + +func NewKeyValueProperty(key, value string) (Property, error) { + if !keyRe.MatchString(key) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + + p := Property{ + key: key, + value: value, + hasValue: true, + hasData: true, + } + return p, nil +} + +func newInvalidProperty() Property { + return Property{} +} + +// parseProperty attempts to decode a Property from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +func parseProperty(property string) (Property, error) { + if property == "" { + return newInvalidProperty(), nil + } + + match := propertyRe.FindStringSubmatch(property) + if len(match) != 4 { + return newInvalidProperty(), fmt.Errorf("%w: %q", errInvalidProperty, property) + } + + p := Property{hasData: true} + if match[1] != "" { + p.key = match[1] + } else { + p.key = match[2] + p.value = match[3] + p.hasValue = true + } + + return p, nil +} + +// validate ensures p conforms to the W3C Baggage specification, returning an +// error otherwise. +func (p Property) validate() error { + errFunc := func(err error) error { + return fmt.Errorf("invalid property: %w", err) + } + + if !p.hasData { + return errFunc(fmt.Errorf("%w: %q", errInvalidProperty, p)) + } + + if !keyRe.MatchString(p.key) { + return errFunc(fmt.Errorf("%w: %q", errInvalidKey, p.key)) + } + if p.hasValue && !valueRe.MatchString(p.value) { + return errFunc(fmt.Errorf("%w: %q", errInvalidValue, p.value)) + } + if !p.hasValue && p.value != "" { + return errFunc(errors.New("inconsistent value")) + } + return nil +} + +// Key returns the Property key. +func (p Property) Key() string { + return p.key +} + +// Value returns the Property value. Additionally a boolean value is returned +// indicating if the returned value is the empty if the Property has a value +// that is empty or if the value is not set. +func (p Property) Value() (string, bool) { + return p.value, p.hasValue +} + +// String encodes Property into a string compliant with the W3C Baggage +// specification. +func (p Property) String() string { + if p.hasValue { + return fmt.Sprintf("%s%s%v", p.key, keyValueDelimiter, p.value) + } + return p.key +} + +type properties []Property + +func fromInternalProperties(iProps []baggage.Property) properties { + if len(iProps) == 0 { + return nil + } + + props := make(properties, len(iProps)) + for i, p := range iProps { + props[i] = Property{ + key: p.Key, + value: p.Value, + hasValue: p.HasValue, + } + } + return props +} + +func (p properties) asInternal() []baggage.Property { + if len(p) == 0 { + return nil + } + + iProps := make([]baggage.Property, len(p)) + for i, prop := range p { + iProps[i] = baggage.Property{ + Key: prop.key, + Value: prop.value, + HasValue: prop.hasValue, + } + } + return iProps +} + +func (p properties) Copy() properties { + if len(p) == 0 { + return nil + } + + props := make(properties, len(p)) + copy(props, p) + return props +} + +// validate ensures each Property in p conforms to the W3C Baggage +// specification, returning an error otherwise. +func (p properties) validate() error { + for _, prop := range p { + if err := prop.validate(); err != nil { + return err + } + } + return nil +} + +// String encodes properties into a string compliant with the W3C Baggage +// specification. +func (p properties) String() string { + props := make([]string, len(p)) + for i, prop := range p { + props[i] = prop.String() + } + return strings.Join(props, propertyDelimiter) +} + +// Member is a list-member of a baggage-string as defined by the W3C Baggage +// specification. +type Member struct { + key, value string + properties properties + + // hasData indicates whether the created property contains data or not. + // Properties that do not contain data are invalid with no other check + // required. + hasData bool +} + +// NewMember returns a new Member from the passed arguments. An error is +// returned if the created Member would be invalid according to the W3C +// Baggage specification. +func NewMember(key, value string, props ...Property) (Member, error) { + m := Member{ + key: key, + value: value, + properties: properties(props).Copy(), + hasData: true, + } + if err := m.validate(); err != nil { + return newInvalidMember(), err + } + + return m, nil +} + +func newInvalidMember() Member { + return Member{} +} + +// parseMember attempts to decode a Member from the passed string. It returns +// an error if the input is invalid according to the W3C Baggage +// specification. +func parseMember(member string) (Member, error) { + if n := len(member); n > maxBytesPerMembers { + return newInvalidMember(), fmt.Errorf("%w: %d", errMemberBytes, n) + } + + var ( + key, value string + props properties + ) + + parts := strings.SplitN(member, propertyDelimiter, 2) + switch len(parts) { + case 2: + // Parse the member properties. + for _, pStr := range strings.Split(parts[1], propertyDelimiter) { + p, err := parseProperty(pStr) + if err != nil { + return newInvalidMember(), err + } + props = append(props, p) + } + fallthrough + case 1: + // Parse the member key/value pair. + + // Take into account a value can contain equal signs (=). + kv := strings.SplitN(parts[0], keyValueDelimiter, 2) + if len(kv) != 2 { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidMember, member) + } + // "Leading and trailing whitespaces are allowed but MUST be trimmed + // when converting the header into a data structure." + key = strings.TrimSpace(kv[0]) + var err error + value, err = url.QueryUnescape(strings.TrimSpace(kv[1])) + if err != nil { + return newInvalidMember(), fmt.Errorf("%w: %q", err, value) + } + if !keyRe.MatchString(key) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return newInvalidMember(), fmt.Errorf("%w: %q", errInvalidValue, value) + } + default: + // This should never happen unless a developer has changed the string + // splitting somehow. Panic instead of failing silently and allowing + // the bug to slip past the CI checks. + panic("failed to parse baggage member") + } + + return Member{key: key, value: value, properties: props, hasData: true}, nil +} + +// validate ensures m conforms to the W3C Baggage specification, returning an +// error otherwise. +func (m Member) validate() error { + if !m.hasData { + return fmt.Errorf("%w: %q", errInvalidMember, m) + } + + if !keyRe.MatchString(m.key) { + return fmt.Errorf("%w: %q", errInvalidKey, m.key) + } + if !valueRe.MatchString(m.value) { + return fmt.Errorf("%w: %q", errInvalidValue, m.value) + } + return m.properties.validate() +} + +// Key returns the Member key. +func (m Member) Key() string { return m.key } + +// Value returns the Member value. +func (m Member) Value() string { return m.value } + +// Properties returns a copy of the Member properties. +func (m Member) Properties() []Property { return m.properties.Copy() } + +// String encodes Member into a string compliant with the W3C Baggage +// specification. +func (m Member) String() string { + // A key is just an ASCII string, but a value is URL encoded UTF-8. + s := fmt.Sprintf("%s%s%s", m.key, keyValueDelimiter, url.QueryEscape(m.value)) + if len(m.properties) > 0 { + s = fmt.Sprintf("%s%s%s", s, propertyDelimiter, m.properties.String()) + } + return s +} + +// Baggage is a list of baggage members representing the baggage-string as +// defined by the W3C Baggage specification. +type Baggage struct { //nolint:golint + list baggage.List +} + +// New returns a new valid Baggage. It returns an error if it results in a +// Baggage exceeding limits set in that specification. +// +// It expects all the provided members to have already been validated. +func New(members ...Member) (Baggage, error) { + if len(members) == 0 { + return Baggage{}, nil + } + + b := make(baggage.List) + for _, m := range members { + if !m.hasData { + return Baggage{}, errInvalidMember + } + + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // Check member numbers after deduplicating. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + bag := Baggage{b} + if n := len(bag.String()); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + return bag, nil +} + +// Parse attempts to decode a baggage-string from the passed string. It +// returns an error if the input is invalid according to the W3C Baggage +// specification. +// +// If there are duplicate list-members contained in baggage, the last one +// defined (reading left-to-right) will be the only one kept. This diverges +// from the W3C Baggage specification which allows duplicate list-members, but +// conforms to the OpenTelemetry Baggage specification. +func Parse(bStr string) (Baggage, error) { + if bStr == "" { + return Baggage{}, nil + } + + if n := len(bStr); n > maxBytesPerBaggageString { + return Baggage{}, fmt.Errorf("%w: %d", errBaggageBytes, n) + } + + b := make(baggage.List) + for _, memberStr := range strings.Split(bStr, listDelimiter) { + m, err := parseMember(memberStr) + if err != nil { + return Baggage{}, err + } + // OpenTelemetry resolves duplicates by last-one-wins. + b[m.key] = baggage.Item{ + Value: m.value, + Properties: m.properties.asInternal(), + } + } + + // OpenTelemetry does not allow for duplicate list-members, but the W3C + // specification does. Now that we have deduplicated, ensure the baggage + // does not exceed list-member limits. + if len(b) > maxMembers { + return Baggage{}, errMemberNumber + } + + return Baggage{b}, nil +} + +// Member returns the baggage list-member identified by key. +// +// If there is no list-member matching the passed key the returned Member will +// be a zero-value Member. +// The returned member is not validated, as we assume the validation happened +// when it was added to the Baggage. +func (b Baggage) Member(key string) Member { + v, ok := b.list[key] + if !ok { + // We do not need to worry about distiguising between the situation + // where a zero-valued Member is included in the Baggage because a + // zero-valued Member is invalid according to the W3C Baggage + // specification (it has an empty key). + return newInvalidMember() + } + + return Member{ + key: key, + value: v.Value, + properties: fromInternalProperties(v.Properties), + } +} + +// Members returns all the baggage list-members. +// The order of the returned list-members does not have significance. +// +// The returned members are not validated, as we assume the validation happened +// when they were added to the Baggage. +func (b Baggage) Members() []Member { + if len(b.list) == 0 { + return nil + } + + members := make([]Member, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }) + } + return members +} + +// SetMember returns a copy the Baggage with the member included. If the +// baggage contains a Member with the same key the existing Member is +// replaced. +// +// If member is invalid according to the W3C Baggage specification, an error +// is returned with the original Baggage. +func (b Baggage) SetMember(member Member) (Baggage, error) { + if !member.hasData { + return b, errInvalidMember + } + + n := len(b.list) + if _, ok := b.list[member.key]; !ok { + n++ + } + list := make(baggage.List, n) + + for k, v := range b.list { + // Do not copy if we are just going to overwrite. + if k == member.key { + continue + } + list[k] = v + } + + list[member.key] = baggage.Item{ + Value: member.value, + Properties: member.properties.asInternal(), + } + + return Baggage{list: list}, nil +} + +// DeleteMember returns a copy of the Baggage with the list-member identified +// by key removed. +func (b Baggage) DeleteMember(key string) Baggage { + n := len(b.list) + if _, ok := b.list[key]; ok { + n-- + } + list := make(baggage.List, n) + + for k, v := range b.list { + if k == key { + continue + } + list[k] = v + } + + return Baggage{list: list} +} + +// Len returns the number of list-members in the Baggage. +func (b Baggage) Len() int { + return len(b.list) +} + +// String encodes Baggage into a string compliant with the W3C Baggage +// specification. The returned string will be invalid if the Baggage contains +// any invalid list-members. +func (b Baggage) String() string { + members := make([]string, 0, len(b.list)) + for k, v := range b.list { + members = append(members, Member{ + key: k, + value: v.Value, + properties: fromInternalProperties(v.Properties), + }.String()) + } + return strings.Join(members, listDelimiter) +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/context.go b/vendor/go.opentelemetry.io/otel/baggage/context.go new file mode 100644 index 0000000000..24b34b7564 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/context.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/baggage" + +import ( + "context" + + "go.opentelemetry.io/otel/internal/baggage" +) + +// ContextWithBaggage returns a copy of parent with baggage. +func ContextWithBaggage(parent context.Context, b Baggage) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, b.list) +} + +// ContextWithoutBaggage returns a copy of parent with no baggage. +func ContextWithoutBaggage(parent context.Context) context.Context { + // Delegate so any hooks for the OpenTracing bridge are handled. + return baggage.ContextWithList(parent, nil) +} + +// FromContext returns the baggage contained in ctx. +func FromContext(ctx context.Context) Baggage { + // Delegate so any hooks for the OpenTracing bridge are handled. + return Baggage{list: baggage.ListFromContext(ctx)} +} diff --git a/vendor/go.opentelemetry.io/otel/baggage/doc.go b/vendor/go.opentelemetry.io/otel/baggage/doc.go new file mode 100644 index 0000000000..4545100df6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/baggage/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides functionality for storing and retrieving +baggage items in Go context. For propagating the baggage, see the +go.opentelemetry.io/otel/propagation package. +*/ +package baggage // import "go.opentelemetry.io/otel/baggage" diff --git a/vendor/go.opentelemetry.io/otel/codes/codes.go b/vendor/go.opentelemetry.io/otel/codes/codes.go new file mode 100644 index 0000000000..064a9279fd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/codes.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package codes // import "go.opentelemetry.io/otel/codes" + +import ( + "encoding/json" + "fmt" + "strconv" +) + +const ( + // Unset is the default status code. + Unset Code = 0 + // Error indicates the operation contains an error. + Error Code = 1 + // Ok indicates operation has been validated by an Application developers + // or Operator to have completed successfully, or contain no error. + Ok Code = 2 + + maxCode = 3 +) + +// Code is an 32-bit representation of a status state. +type Code uint32 + +var codeToStr = map[Code]string{ + Unset: "Unset", + Error: "Error", + Ok: "Ok", +} + +var strToCode = map[string]Code{ + `"Unset"`: Unset, + `"Error"`: Error, + `"Ok"`: Ok, +} + +// String returns the Code as a string. +func (c Code) String() string { + return codeToStr[c] +} + +// UnmarshalJSON unmarshals b into the Code. +// +// This is based on the functionality in the gRPC codes package: +// https://github.com/grpc/grpc-go/blob/bb64fee312b46ebee26be43364a7a966033521b1/codes/codes.go#L218-L244 +func (c *Code) UnmarshalJSON(b []byte) error { + // From json.Unmarshaler: By convention, to approximate the behavior of + // Unmarshal itself, Unmarshalers implement UnmarshalJSON([]byte("null")) as + // a no-op. + if string(b) == "null" { + return nil + } + if c == nil { + return fmt.Errorf("nil receiver passed to UnmarshalJSON") + } + + var x interface{} + if err := json.Unmarshal(b, &x); err != nil { + return err + } + switch x.(type) { + case string: + if jc, ok := strToCode[string(b)]; ok { + *c = jc + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + case float64: + if ci, err := strconv.ParseUint(string(b), 10, 32); err == nil { + if ci >= maxCode { + return fmt.Errorf("invalid code: %q", ci) + } + + *c = Code(ci) + return nil + } + return fmt.Errorf("invalid code: %q", string(b)) + default: + return fmt.Errorf("invalid code: %q", string(b)) + } +} + +// MarshalJSON returns c as the JSON encoding of c. +func (c *Code) MarshalJSON() ([]byte, error) { + if c == nil { + return []byte("null"), nil + } + str, ok := codeToStr[*c] + if !ok { + return nil, fmt.Errorf("invalid code: %d", *c) + } + return []byte(fmt.Sprintf("%q", str)), nil +} diff --git a/vendor/go.opentelemetry.io/otel/codes/doc.go b/vendor/go.opentelemetry.io/otel/codes/doc.go new file mode 100644 index 0000000000..df3e0f1b62 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/codes/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package codes defines the canonical error codes used by OpenTelemetry. + +It conforms to [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#statuscanonicalcode). +*/ +package codes // import "go.opentelemetry.io/otel/codes" diff --git a/vendor/go.opentelemetry.io/otel/doc.go b/vendor/go.opentelemetry.io/otel/doc.go new file mode 100644 index 0000000000..daa36c89dc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/doc.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package otel provides global access to the OpenTelemetry API. The subpackages of +the otel package provide an implementation of the OpenTelemetry API. + +The provided API is used to instrument code and measure data about that code's +performance and operation. The measured data, by default, is not processed or +transmitted anywhere. An implementation of the OpenTelemetry SDK, like the +default SDK implementation (go.opentelemetry.io/otel/sdk), and associated +exporters are used to process and transport this data. + +To read the getting started guide, see https://opentelemetry.io/docs/go/getting-started/. + +To read more about tracing, see go.opentelemetry.io/otel/trace. + +To read more about metrics, see go.opentelemetry.io/otel/metric. + +To read more about propagation, see go.opentelemetry.io/otel/propagation and +go.opentelemetry.io/otel/baggage. +*/ +package otel // import "go.opentelemetry.io/otel" diff --git a/vendor/go.opentelemetry.io/otel/error_handler.go b/vendor/go.opentelemetry.io/otel/error_handler.go new file mode 100644 index 0000000000..72fad85412 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/error_handler.go @@ -0,0 +1,38 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// ErrorHandler handles irremediable events. +type ErrorHandler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Handle handles any error deemed irremediable by an OpenTelemetry + // component. + Handle(error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// ErrorHandlerFunc is a convenience adapter to allow the use of a function +// as an ErrorHandler. +type ErrorHandlerFunc func(error) + +var _ ErrorHandler = ErrorHandlerFunc(nil) + +// Handle handles the irremediable error by calling the ErrorHandlerFunc itself. +func (f ErrorHandlerFunc) Handle(err error) { + f(err) +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md new file mode 100644 index 0000000000..8a40a86a24 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/README.md @@ -0,0 +1,49 @@ +# OpenTelemetry-Go OTLP Span Exporter + +[![Go Reference](https://pkg.go.dev/badge/go.opentelemetry.io/otel/exporters/otlp/otlptrace.svg)](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) + +[OpenTelemetry Protocol Exporter](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.5.0/specification/protocol/exporter.md) implementation. + +## Installation + +``` +go get -u go.opentelemetry.io/otel/exporters/otlp/otlptrace +``` + +## Examples + +- [Exporter setup and examples](./otlptracehttp/example_test.go) +- [Full example sending telemetry to a local collector](../../../example/otel-collector) + +## [`otlptrace`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace) + +The `otlptrace` package provides an exporter implementing the OTel span exporter interface. +This exporter is configured using a client satisfying the `otlptrace.Client` interface. +This client handles the transformation of data into wire format and the transmission of that data to the collector. + +## [`otlptracegrpc`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc) + +The `otlptracegrpc` package implements a client for the span exporter that sends trace telemetry data to the collector using gRPC with protobuf-encoded payloads. + +## [`otlptracehttp`](https://pkg.go.dev/go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp) + +The `otlptracehttp` package implements a client for the span exporter that sends trace telemetry data to the collector using HTTP with protobuf-encoded payloads. + +## Configuration + +### Environment Variables + +The following environment variables can be used (instead of options objects) to +override the default configuration. For more information about how each of +these environment variables is interpreted, see [the OpenTelemetry +specification](https://github.com/open-telemetry/opentelemetry-specification/blob/v1.8.0/specification/protocol/exporter.md). + +| Environment variable | Option | Default value | +| ------------------------------------------------------------------------ |------------------------------ | ----------------------------------- | +| `OTEL_EXPORTER_OTLP_ENDPOINT` `OTEL_EXPORTER_OTLP_TRACES_ENDPOINT` | `WithEndpoint` `WithInsecure` | `https://localhost:4317` | +| `OTEL_EXPORTER_OTLP_CERTIFICATE` `OTEL_EXPORTER_OTLP_TRACES_CERTIFICATE` | `WithTLSClientConfig` | | +| `OTEL_EXPORTER_OTLP_HEADERS` `OTEL_EXPORTER_OTLP_TRACES_HEADERS` | `WithHeaders` | | +| `OTEL_EXPORTER_OTLP_COMPRESSION` `OTEL_EXPORTER_OTLP_TRACES_COMPRESSION` | `WithCompression` | | +| `OTEL_EXPORTER_OTLP_TIMEOUT` `OTEL_EXPORTER_OTLP_TRACES_TIMEOUT` | `WithTimeout` | `10s` | + +Configuration using options have precedence over the environment variables. diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go new file mode 100644 index 0000000000..dbb40cf582 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/clients.go @@ -0,0 +1,54 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// Client manages connections to the collector, handles the +// transformation of data into wire format, and the transmission of that +// data to the collector. +type Client interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Start should establish connection(s) to endpoint(s). It is + // called just once by the exporter, so the implementation + // does not need to worry about idempotence and locking. + Start(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Stop should close the connections. The function is called + // only once by the exporter, so the implementation does not + // need to worry about idempotence, but it may be called + // concurrently with UploadTraces, so proper + // locking is required. The function serves as a + // synchronization point - after the function returns, the + // process of closing connections is assumed to be finished. + Stop(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // UploadTraces should transform the passed traces to the wire + // format and send it to the collector. May be called + // concurrently. + UploadTraces(ctx context.Context, protoSpans []*tracepb.ResourceSpans) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go new file mode 100644 index 0000000000..7e9bb6c47a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/exporter.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otlptrace // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace" + +import ( + "context" + "errors" + "sync" + + "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + tracesdk "go.opentelemetry.io/otel/sdk/trace" +) + +var ( + errAlreadyStarted = errors.New("already started") +) + +// Exporter exports trace data in the OTLP wire format. +type Exporter struct { + client Client + + mu sync.RWMutex + started bool + + startOnce sync.Once + stopOnce sync.Once +} + +// ExportSpans exports a batch of spans. +func (e *Exporter) ExportSpans(ctx context.Context, ss []tracesdk.ReadOnlySpan) error { + protoSpans := tracetransform.Spans(ss) + if len(protoSpans) == 0 { + return nil + } + + return e.client.UploadTraces(ctx, protoSpans) +} + +// Start establishes a connection to the receiving endpoint. +func (e *Exporter) Start(ctx context.Context) error { + var err = errAlreadyStarted + e.startOnce.Do(func() { + e.mu.Lock() + e.started = true + e.mu.Unlock() + err = e.client.Start(ctx) + }) + + return err +} + +// Shutdown flushes all exports and closes all connections to the receiving endpoint. +func (e *Exporter) Shutdown(ctx context.Context) error { + e.mu.RLock() + started := e.started + e.mu.RUnlock() + + if !started { + return nil + } + + var err error + + e.stopOnce.Do(func() { + err = e.client.Stop(ctx) + e.mu.Lock() + e.started = false + e.mu.Unlock() + }) + + return err +} + +var _ tracesdk.SpanExporter = (*Exporter)(nil) + +// New constructs a new Exporter and starts it. +func New(ctx context.Context, client Client) (*Exporter, error) { + exp := NewUnstarted(client) + if err := exp.Start(ctx); err != nil { + return nil, err + } + return exp, nil +} + +// NewUnstarted constructs a new Exporter and does not start it. +func NewUnstarted(client Client) *Exporter { + return &Exporter{ + client: client, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go new file mode 100644 index 0000000000..d9086a390d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/attribute.go @@ -0,0 +1,158 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/resource" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +// KeyValues transforms a slice of attribute KeyValues into OTLP key-values. +func KeyValues(attrs []attribute.KeyValue) []*commonpb.KeyValue { + if len(attrs) == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, len(attrs)) + for _, kv := range attrs { + out = append(out, KeyValue(kv)) + } + return out +} + +// Iterator transforms an attribute iterator into OTLP key-values. +func Iterator(iter attribute.Iterator) []*commonpb.KeyValue { + l := iter.Len() + if l == 0 { + return nil + } + + out := make([]*commonpb.KeyValue, 0, l) + for iter.Next() { + out = append(out, KeyValue(iter.Attribute())) + } + return out +} + +// ResourceAttributes transforms a Resource OTLP key-values. +func ResourceAttributes(resource *resource.Resource) []*commonpb.KeyValue { + return Iterator(resource.Iter()) +} + +// KeyValue transforms an attribute KeyValue into an OTLP key-value. +func KeyValue(kv attribute.KeyValue) *commonpb.KeyValue { + return &commonpb.KeyValue{Key: string(kv.Key), Value: Value(kv.Value)} +} + +// Value transforms an attribute Value into an OTLP AnyValue. +func Value(v attribute.Value) *commonpb.AnyValue { + av := new(commonpb.AnyValue) + switch v.Type() { + case attribute.BOOL: + av.Value = &commonpb.AnyValue_BoolValue{ + BoolValue: v.AsBool(), + } + case attribute.BOOLSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: boolSliceValues(v.AsBoolSlice()), + }, + } + case attribute.INT64: + av.Value = &commonpb.AnyValue_IntValue{ + IntValue: v.AsInt64(), + } + case attribute.INT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: int64SliceValues(v.AsInt64Slice()), + }, + } + case attribute.FLOAT64: + av.Value = &commonpb.AnyValue_DoubleValue{ + DoubleValue: v.AsFloat64(), + } + case attribute.FLOAT64SLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: float64SliceValues(v.AsFloat64Slice()), + }, + } + case attribute.STRING: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: v.AsString(), + } + case attribute.STRINGSLICE: + av.Value = &commonpb.AnyValue_ArrayValue{ + ArrayValue: &commonpb.ArrayValue{ + Values: stringSliceValues(v.AsStringSlice()), + }, + } + default: + av.Value = &commonpb.AnyValue_StringValue{ + StringValue: "INVALID", + } + } + return av +} + +func boolSliceValues(vals []bool) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_BoolValue{ + BoolValue: v, + }, + } + } + return converted +} + +func int64SliceValues(vals []int64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_IntValue{ + IntValue: v, + }, + } + } + return converted +} + +func float64SliceValues(vals []float64) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_DoubleValue{ + DoubleValue: v, + }, + } + } + return converted +} + +func stringSliceValues(vals []string) []*commonpb.AnyValue { + converted := make([]*commonpb.AnyValue, len(vals)) + for i, v := range vals { + converted[i] = &commonpb.AnyValue{ + Value: &commonpb.AnyValue_StringValue{ + StringValue: v, + }, + } + } + return converted +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go new file mode 100644 index 0000000000..6246b17f57 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/instrumentation.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/instrumentation" + commonpb "go.opentelemetry.io/proto/otlp/common/v1" +) + +func InstrumentationLibrary(il instrumentation.Library) *commonpb.InstrumentationLibrary { + if il == (instrumentation.Library{}) { + return nil + } + return &commonpb.InstrumentationLibrary{ + Name: il.Name, + Version: il.Version, + } +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go new file mode 100644 index 0000000000..05a1f78adb --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/resource.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/sdk/resource" + resourcepb "go.opentelemetry.io/proto/otlp/resource/v1" +) + +// Resource transforms a Resource into an OTLP Resource. +func Resource(r *resource.Resource) *resourcepb.Resource { + if r == nil { + return nil + } + return &resourcepb.Resource{Attributes: ResourceAttributes(r)} +} diff --git a/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go new file mode 100644 index 0000000000..2f0f5eacb7 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform/span.go @@ -0,0 +1,221 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package tracetransform // import "go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + tracesdk "go.opentelemetry.io/otel/sdk/trace" + "go.opentelemetry.io/otel/trace" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +const ( + maxEventsPerSpan = 128 +) + +// Spans transforms a slice of OpenTelemetry spans into a slice of OTLP +// ResourceSpans. +func Spans(sdl []tracesdk.ReadOnlySpan) []*tracepb.ResourceSpans { + if len(sdl) == 0 { + return nil + } + + rsm := make(map[attribute.Distinct]*tracepb.ResourceSpans) + + type ilsKey struct { + r attribute.Distinct + il instrumentation.Library + } + ilsm := make(map[ilsKey]*tracepb.InstrumentationLibrarySpans) + + var resources int + for _, sd := range sdl { + if sd == nil { + continue + } + + rKey := sd.Resource().Equivalent() + iKey := ilsKey{ + r: rKey, + il: sd.InstrumentationLibrary(), + } + ils, iOk := ilsm[iKey] + if !iOk { + // Either the resource or instrumentation library were unknown. + ils = &tracepb.InstrumentationLibrarySpans{ + InstrumentationLibrary: InstrumentationLibrary(sd.InstrumentationLibrary()), + Spans: []*tracepb.Span{}, + SchemaUrl: sd.InstrumentationLibrary().SchemaURL, + } + } + ils.Spans = append(ils.Spans, span(sd)) + ilsm[iKey] = ils + + rs, rOk := rsm[rKey] + if !rOk { + resources++ + // The resource was unknown. + rs = &tracepb.ResourceSpans{ + Resource: Resource(sd.Resource()), + InstrumentationLibrarySpans: []*tracepb.InstrumentationLibrarySpans{ils}, + SchemaUrl: sd.Resource().SchemaURL(), + } + rsm[rKey] = rs + continue + } + + // The resource has been seen before. Check if the instrumentation + // library lookup was unknown because if so we need to add it to the + // ResourceSpans. Otherwise, the instrumentation library has already + // been seen and the append we did above will be included it in the + // InstrumentationLibrarySpans reference. + if !iOk { + rs.InstrumentationLibrarySpans = append(rs.InstrumentationLibrarySpans, ils) + } + } + + // Transform the categorized map into a slice + rss := make([]*tracepb.ResourceSpans, 0, resources) + for _, rs := range rsm { + rss = append(rss, rs) + } + return rss +} + +// span transforms a Span into an OTLP span. +func span(sd tracesdk.ReadOnlySpan) *tracepb.Span { + if sd == nil { + return nil + } + + tid := sd.SpanContext().TraceID() + sid := sd.SpanContext().SpanID() + + s := &tracepb.Span{ + TraceId: tid[:], + SpanId: sid[:], + TraceState: sd.SpanContext().TraceState().String(), + Status: status(sd.Status().Code, sd.Status().Description), + StartTimeUnixNano: uint64(sd.StartTime().UnixNano()), + EndTimeUnixNano: uint64(sd.EndTime().UnixNano()), + Links: links(sd.Links()), + Kind: spanKind(sd.SpanKind()), + Name: sd.Name(), + Attributes: KeyValues(sd.Attributes()), + Events: spanEvents(sd.Events()), + DroppedAttributesCount: uint32(sd.DroppedAttributes()), + DroppedEventsCount: uint32(sd.DroppedEvents()), + DroppedLinksCount: uint32(sd.DroppedLinks()), + } + + if psid := sd.Parent().SpanID(); psid.IsValid() { + s.ParentSpanId = psid[:] + } + + return s +} + +// status transform a span code and message into an OTLP span status. +func status(status codes.Code, message string) *tracepb.Status { + var c tracepb.Status_StatusCode + switch status { + case codes.Ok: + c = tracepb.Status_STATUS_CODE_OK + case codes.Error: + c = tracepb.Status_STATUS_CODE_ERROR + default: + c = tracepb.Status_STATUS_CODE_UNSET + } + return &tracepb.Status{ + Code: c, + Message: message, + } +} + +// links transforms span Links to OTLP span links. +func links(links []tracesdk.Link) []*tracepb.Span_Link { + if len(links) == 0 { + return nil + } + + sl := make([]*tracepb.Span_Link, 0, len(links)) + for _, otLink := range links { + // This redefinition is necessary to prevent otLink.*ID[:] copies + // being reused -- in short we need a new otLink per iteration. + otLink := otLink + + tid := otLink.SpanContext.TraceID() + sid := otLink.SpanContext.SpanID() + + sl = append(sl, &tracepb.Span_Link{ + TraceId: tid[:], + SpanId: sid[:], + Attributes: KeyValues(otLink.Attributes), + }) + } + return sl +} + +// spanEvents transforms span Events to an OTLP span events. +func spanEvents(es []tracesdk.Event) []*tracepb.Span_Event { + if len(es) == 0 { + return nil + } + + evCount := len(es) + if evCount > maxEventsPerSpan { + evCount = maxEventsPerSpan + } + events := make([]*tracepb.Span_Event, 0, evCount) + nEvents := 0 + + // Transform message events + for _, e := range es { + if nEvents >= maxEventsPerSpan { + break + } + nEvents++ + events = append(events, + &tracepb.Span_Event{ + Name: e.Name, + TimeUnixNano: uint64(e.Time.UnixNano()), + Attributes: KeyValues(e.Attributes), + // TODO (rghetia) : Add Drop Counts when supported. + }, + ) + } + + return events +} + +// spanKind transforms a SpanKind to an OTLP span kind. +func spanKind(kind trace.SpanKind) tracepb.Span_SpanKind { + switch kind { + case trace.SpanKindInternal: + return tracepb.Span_SPAN_KIND_INTERNAL + case trace.SpanKindClient: + return tracepb.Span_SPAN_KIND_CLIENT + case trace.SpanKindServer: + return tracepb.Span_SPAN_KIND_SERVER + case trace.SpanKindProducer: + return tracepb.Span_SPAN_KIND_PRODUCER + case trace.SpanKindConsumer: + return tracepb.Span_SPAN_KIND_CONSUMER + default: + return tracepb.Span_SPAN_KIND_UNSPECIFIED + } +} diff --git a/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh new file mode 100644 index 0000000000..9a58fb1d37 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/get_main_pkgs.sh @@ -0,0 +1,41 @@ +#!/usr/bin/env bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +top_dir='.' +if [[ $# -gt 0 ]]; then + top_dir="${1}" +fi + +p=$(pwd) +mod_dirs=() + +# Note `mapfile` does not exist in older bash versions: +# https://stackoverflow.com/questions/41475261/need-alternative-to-readarray-mapfile-for-script-on-older-version-of-bash + +while IFS= read -r line; do + mod_dirs+=("$line") +done < <(find "${top_dir}" -type f -name 'go.mod' -exec dirname {} \; | sort) + +for mod_dir in "${mod_dirs[@]}"; do + cd "${mod_dir}" + + while IFS= read -r line; do + echo ".${line#${p}}" + done < <(go list --find -f '{{.Name}}|{{.Dir}}' ./... | grep '^main|' | cut -f 2- -d '|') + cd "${p}" +done diff --git a/vendor/go.opentelemetry.io/otel/handler.go b/vendor/go.opentelemetry.io/otel/handler.go new file mode 100644 index 0000000000..35263e01ac --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/handler.go @@ -0,0 +1,98 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "log" + "os" + "sync" +) + +var ( + // globalErrorHandler provides an ErrorHandler that can be used + // throughout an OpenTelemetry instrumented project. When a user + // specified ErrorHandler is registered (`SetErrorHandler`) all calls to + // `Handle` and will be delegated to the registered ErrorHandler. + globalErrorHandler = defaultErrorHandler() + + // Compile-time check that delegator implements ErrorHandler. + _ ErrorHandler = (*delegator)(nil) + // Compile-time check that errLogger implements ErrorHandler. + _ ErrorHandler = (*errLogger)(nil) +) + +type delegator struct { + lock *sync.RWMutex + eh ErrorHandler +} + +func (d *delegator) Handle(err error) { + d.lock.RLock() + defer d.lock.RUnlock() + d.eh.Handle(err) +} + +// setDelegate sets the ErrorHandler delegate. +func (d *delegator) setDelegate(eh ErrorHandler) { + d.lock.Lock() + defer d.lock.Unlock() + d.eh = eh +} + +func defaultErrorHandler() *delegator { + return &delegator{ + lock: &sync.RWMutex{}, + eh: &errLogger{l: log.New(os.Stderr, "", log.LstdFlags)}, + } + +} + +// errLogger logs errors if no delegate is set, otherwise they are delegated. +type errLogger struct { + l *log.Logger +} + +// Handle logs err if no delegate is set, otherwise it is delegated. +func (h *errLogger) Handle(err error) { + h.l.Print(err) +} + +// GetErrorHandler returns the global ErrorHandler instance. +// +// The default ErrorHandler instance returned will log all errors to STDERR +// until an override ErrorHandler is set with SetErrorHandler. All +// ErrorHandler returned prior to this will automatically forward errors to +// the set instance instead of logging. +// +// Subsequent calls to SetErrorHandler after the first will not forward errors +// to the new ErrorHandler for prior returned instances. +func GetErrorHandler() ErrorHandler { + return globalErrorHandler +} + +// SetErrorHandler sets the global ErrorHandler to h. +// +// The first time this is called all ErrorHandler previously returned from +// GetErrorHandler will send errors to h instead of the default logging +// ErrorHandler. Subsequent calls will set the global ErrorHandler, but not +// delegate errors to h. +func SetErrorHandler(h ErrorHandler) { + globalErrorHandler.setDelegate(h) +} + +// Handle is a convenience function for ErrorHandler().Handle(err) +func Handle(err error) { + GetErrorHandler().Handle(err) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go new file mode 100644 index 0000000000..b96e5408e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/baggage.go @@ -0,0 +1,43 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package baggage provides base types and functionality to store and retrieve +baggage in Go context. This package exists because the OpenTracing bridge to +OpenTelemetry needs to synchronize state whenever baggage for a context is +modified and that context contains an OpenTracing span. If it were not for +this need this package would not need to exist and the +`go.opentelemetry.io/otel/baggage` package would be the singular place where +W3C baggage is handled. +*/ +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +// List is the collection of baggage members. The W3C allows for duplicates, +// but OpenTelemetry does not, therefore, this is represented as a map. +type List map[string]Item + +// Item is the value and metadata properties part of a list-member. +type Item struct { + Value string + Properties []Property +} + +// Property is a metadata entry for a list-member. +type Property struct { + Key, Value string + + // HasValue indicates if a zero-value value means the property does not + // have a value or if it was the zero-value. + HasValue bool +} diff --git a/vendor/go.opentelemetry.io/otel/internal/baggage/context.go b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go new file mode 100644 index 0000000000..3c2784eea3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/baggage/context.go @@ -0,0 +1,95 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package baggage // import "go.opentelemetry.io/otel/internal/baggage" + +import "context" + +type baggageContextKeyType int + +const baggageKey baggageContextKeyType = iota + +// SetHookFunc is a callback called when storing baggage in the context. +type SetHookFunc func(context.Context, List) context.Context + +// GetHookFunc is a callback called when getting baggage from the context. +type GetHookFunc func(context.Context, List) List + +type baggageState struct { + list List + + setHook SetHookFunc + getHook GetHookFunc +} + +// ContextWithSetHook returns a copy of parent with hook configured to be +// invoked every time ContextWithBaggage is called. +// +// Passing nil SetHookFunc creates a context with no set hook to call. +func ContextWithSetHook(parent context.Context, hook SetHookFunc) context.Context { + var s baggageState + switch v := parent.Value(baggageKey).(type) { + case baggageState: + s = v + } + + s.setHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithGetHook returns a copy of parent with hook configured to be +// invoked every time FromContext is called. +// +// Passing nil GetHookFunc creates a context with no get hook to call. +func ContextWithGetHook(parent context.Context, hook GetHookFunc) context.Context { + var s baggageState + switch v := parent.Value(baggageKey).(type) { + case baggageState: + s = v + } + + s.getHook = hook + return context.WithValue(parent, baggageKey, s) +} + +// ContextWithList returns a copy of parent with baggage. Passing nil list +// returns a context without any baggage. +func ContextWithList(parent context.Context, list List) context.Context { + var s baggageState + switch v := parent.Value(baggageKey).(type) { + case baggageState: + s = v + } + + s.list = list + ctx := context.WithValue(parent, baggageKey, s) + if s.setHook != nil { + ctx = s.setHook(ctx, list) + } + + return ctx +} + +// ListFromContext returns the baggage contained in ctx. +func ListFromContext(ctx context.Context) List { + switch v := ctx.Value(baggageKey).(type) { + case baggageState: + if v.getHook != nil { + return v.getHook(ctx, v.list) + } + return v.list + default: + return nil + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go new file mode 100644 index 0000000000..0a378476b0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/internal_logging.go @@ -0,0 +1,63 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "log" + "os" + "sync" + + "github.com/go-logr/logr" + "github.com/go-logr/stdr" +) + +// globalLogger is the logging interface used within the otel api and sdk provide deatails of the internals. +// +// The default logger uses stdr which is backed by the standard `log.Logger` +// interface. This logger will only show messages at the Error Level. +var globalLogger logr.Logger = stdr.New(log.New(os.Stderr, "", log.LstdFlags|log.Lshortfile)) +var globalLoggerLock = &sync.RWMutex{} + +// SetLogger overrides the globalLogger with l. +// +// To see Info messages use a logger with `l.V(1).Enabled() == true` +// To see Debug messages use a logger with `l.V(5).Enabled() == true` +func SetLogger(l logr.Logger) { + globalLoggerLock.Lock() + defer globalLoggerLock.Unlock() + globalLogger = l +} + +// Info prints messages about the general state of the API or SDK. +// This should usually be less then 5 messages a minute +func Info(msg string, keysAndValues ...interface{}) { + globalLoggerLock.RLock() + defer globalLoggerLock.RUnlock() + globalLogger.V(1).Info(msg, keysAndValues...) +} + +// Error prints messages about exceptional states of the API or SDK. +func Error(err error, msg string, keysAndValues ...interface{}) { + globalLoggerLock.RLock() + defer globalLoggerLock.RUnlock() + globalLogger.Error(err, msg, keysAndValues...) +} + +// Debug prints messages about all internal changes in the API or SDK. +func Debug(msg string, keysAndValues ...interface{}) { + globalLoggerLock.RLock() + defer globalLoggerLock.RUnlock() + globalLogger.V(5).Info(msg, keysAndValues...) +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/propagator.go b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go new file mode 100644 index 0000000000..06bac35c2f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/propagator.go @@ -0,0 +1,82 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel/propagation" +) + +// textMapPropagator is a default TextMapPropagator that delegates calls to a +// registered delegate if one is set, otherwise it defaults to delegating the +// calls to a the default no-op propagation.TextMapPropagator. +type textMapPropagator struct { + mtx sync.Mutex + once sync.Once + delegate propagation.TextMapPropagator + noop propagation.TextMapPropagator +} + +// Compile-time guarantee that textMapPropagator implements the +// propagation.TextMapPropagator interface. +var _ propagation.TextMapPropagator = (*textMapPropagator)(nil) + +func newTextMapPropagator() *textMapPropagator { + return &textMapPropagator{ + noop: propagation.NewCompositeTextMapPropagator(), + } +} + +// SetDelegate sets a delegate propagation.TextMapPropagator that all calls are +// forwarded to. Delegation can only be performed once, all subsequent calls +// perform no delegation. +func (p *textMapPropagator) SetDelegate(delegate propagation.TextMapPropagator) { + if delegate == nil { + return + } + + p.mtx.Lock() + p.once.Do(func() { p.delegate = delegate }) + p.mtx.Unlock() +} + +// effectiveDelegate returns the current delegate of p if one is set, +// otherwise the default noop TextMapPropagator is returned. This method +// can be called concurrently. +func (p *textMapPropagator) effectiveDelegate() propagation.TextMapPropagator { + p.mtx.Lock() + defer p.mtx.Unlock() + if p.delegate != nil { + return p.delegate + } + return p.noop +} + +// Inject set cross-cutting concerns from the Context into the carrier. +func (p *textMapPropagator) Inject(ctx context.Context, carrier propagation.TextMapCarrier) { + p.effectiveDelegate().Inject(ctx, carrier) +} + +// Extract reads cross-cutting concerns from the carrier into a Context. +func (p *textMapPropagator) Extract(ctx context.Context, carrier propagation.TextMapCarrier) context.Context { + return p.effectiveDelegate().Extract(ctx, carrier) +} + +// Fields returns the keys whose values are set with Inject. +func (p *textMapPropagator) Fields() []string { + return p.effectiveDelegate().Fields() +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/state.go b/vendor/go.opentelemetry.io/otel/internal/global/state.go new file mode 100644 index 0000000000..d6b3e900cd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/state.go @@ -0,0 +1,106 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +import ( + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/propagation" + "go.opentelemetry.io/otel/trace" +) + +type ( + tracerProviderHolder struct { + tp trace.TracerProvider + } + + propagatorsHolder struct { + tm propagation.TextMapPropagator + } +) + +var ( + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + + delegateTraceOnce sync.Once + delegateTextMapPropagatorOnce sync.Once +) + +// TracerProvider is the internal implementation for global.TracerProvider. +func TracerProvider() trace.TracerProvider { + return globalTracer.Load().(tracerProviderHolder).tp +} + +// SetTracerProvider is the internal implementation for global.SetTracerProvider. +func SetTracerProvider(tp trace.TracerProvider) { + delegateTraceOnce.Do(func() { + current := TracerProvider() + if current == tp { + // Setting the provider to the prior default is nonsense, panic. + // Panic is acceptable because we are likely still early in the + // process lifetime. + panic("invalid TracerProvider, the global instance cannot be reinstalled") + } else if def, ok := current.(*tracerProvider); ok { + def.setDelegate(tp) + } + + }) + globalTracer.Store(tracerProviderHolder{tp: tp}) +} + +// TextMapPropagator is the internal implementation for global.TextMapPropagator. +func TextMapPropagator() propagation.TextMapPropagator { + return globalPropagators.Load().(propagatorsHolder).tm +} + +// SetTextMapPropagator is the internal implementation for global.SetTextMapPropagator. +func SetTextMapPropagator(p propagation.TextMapPropagator) { + // For the textMapPropagator already returned by TextMapPropagator + // delegate to p. + delegateTextMapPropagatorOnce.Do(func() { + if current := TextMapPropagator(); current == p { + // Setting the provider to the prior default is nonsense, panic. + // Panic is acceptable because we are likely still early in the + // process lifetime. + panic("invalid TextMapPropagator, the global instance cannot be reinstalled") + } else if def, ok := current.(*textMapPropagator); ok { + def.SetDelegate(p) + } + }) + // Return p when subsequent calls to TextMapPropagator are made. + globalPropagators.Store(propagatorsHolder{tm: p}) +} + +func defaultTracerValue() *atomic.Value { + v := &atomic.Value{} + v.Store(tracerProviderHolder{tp: &tracerProvider{}}) + return v +} + +func defaultPropagatorsValue() *atomic.Value { + v := &atomic.Value{} + v.Store(propagatorsHolder{tm: newTextMapPropagator()}) + return v +} + +// ResetForTest restores the initial global state, for testing purposes. +func ResetForTest() { + globalTracer = defaultTracerValue() + globalPropagators = defaultPropagatorsValue() + delegateTraceOnce = sync.Once{} + delegateTextMapPropagatorOnce = sync.Once{} +} diff --git a/vendor/go.opentelemetry.io/otel/internal/global/trace.go b/vendor/go.opentelemetry.io/otel/internal/global/trace.go new file mode 100644 index 0000000000..5f008d0982 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/global/trace.go @@ -0,0 +1,192 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/global" + +/* +This file contains the forwarding implementation of the TracerProvider used as +the default global instance. Prior to initialization of an SDK, Tracers +returned by the global TracerProvider will provide no-op functionality. This +means that all Span created prior to initialization are no-op Spans. + +Once an SDK has been initialized, all provided no-op Tracers are swapped for +Tracers provided by the SDK defined TracerProvider. However, any Span started +prior to this initialization does not change its behavior. Meaning, the Span +remains a no-op Span. + +The implementation to track and swap Tracers locks all new Tracer creation +until the swap is complete. This assumes that this operation is not +performance-critical. If that assumption is incorrect, be sure to configure an +SDK prior to any Tracer creation. +*/ + +import ( + "context" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/trace" +) + +// tracerProvider is a placeholder for a configured SDK TracerProvider. +// +// All TracerProvider functionality is forwarded to a delegate once +// configured. +type tracerProvider struct { + mtx sync.Mutex + tracers map[il]*tracer + delegate trace.TracerProvider +} + +// Compile-time guarantee that tracerProvider implements the TracerProvider +// interface. +var _ trace.TracerProvider = &tracerProvider{} + +// setDelegate configures p to delegate all TracerProvider functionality to +// provider. +// +// All Tracers provided prior to this function call are switched out to be +// Tracers provided by provider. +// +// It is guaranteed by the caller that this happens only once. +func (p *tracerProvider) setDelegate(provider trace.TracerProvider) { + p.mtx.Lock() + defer p.mtx.Unlock() + + p.delegate = provider + + if len(p.tracers) == 0 { + return + } + + for _, t := range p.tracers { + t.setDelegate(provider) + } + + p.tracers = nil +} + +// Tracer implements TracerProvider. +func (p *tracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + p.mtx.Lock() + defer p.mtx.Unlock() + + if p.delegate != nil { + return p.delegate.Tracer(name, opts...) + } + + // At this moment it is guaranteed that no sdk is installed, save the tracer in the tracers map. + + c := trace.NewTracerConfig(opts...) + key := il{ + name: name, + version: c.InstrumentationVersion(), + } + + if p.tracers == nil { + p.tracers = make(map[il]*tracer) + } + + if val, ok := p.tracers[key]; ok { + return val + } + + t := &tracer{name: name, opts: opts, provider: p} + p.tracers[key] = t + return t +} + +type il struct { + name string + version string +} + +// tracer is a placeholder for a trace.Tracer. +// +// All Tracer functionality is forwarded to a delegate once configured. +// Otherwise, all functionality is forwarded to a NoopTracer. +type tracer struct { + name string + opts []trace.TracerOption + provider *tracerProvider + + delegate atomic.Value +} + +// Compile-time guarantee that tracer implements the trace.Tracer interface. +var _ trace.Tracer = &tracer{} + +// setDelegate configures t to delegate all Tracer functionality to Tracers +// created by provider. +// +// All subsequent calls to the Tracer methods will be passed to the delegate. +// +// It is guaranteed by the caller that this happens only once. +func (t *tracer) setDelegate(provider trace.TracerProvider) { + t.delegate.Store(provider.Tracer(t.name, t.opts...)) +} + +// Start implements trace.Tracer by forwarding the call to t.delegate if +// set, otherwise it forwards the call to a NoopTracer. +func (t *tracer) Start(ctx context.Context, name string, opts ...trace.SpanStartOption) (context.Context, trace.Span) { + delegate := t.delegate.Load() + if delegate != nil { + return delegate.(trace.Tracer).Start(ctx, name, opts...) + } + + s := nonRecordingSpan{sc: trace.SpanContextFromContext(ctx), tracer: t} + ctx = trace.ContextWithSpan(ctx, s) + return ctx, s +} + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + sc trace.SpanContext + tracer *tracer +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE b/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go b/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go new file mode 100644 index 0000000000..77781ead11 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/metric/global/meter.go @@ -0,0 +1,287 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/metric/global" + +import ( + "context" + "sync" + "sync/atomic" + "unsafe" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/internal/metric/registry" + "go.opentelemetry.io/otel/metric" + "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" +) + +// This file contains the forwarding implementation of MeterProvider used as +// the default global instance. Metric events using instruments provided by +// this implementation are no-ops until the first Meter implementation is set +// as the global provider. +// +// The implementation here uses Mutexes to maintain a list of active Meters in +// the MeterProvider and Instruments in each Meter, under the assumption that +// these interfaces are not performance-critical. +// +// We have the invariant that setDelegate() will be called before a new +// MeterProvider implementation is registered as the global provider. Mutexes +// in the MeterProvider and Meters ensure that each instrument has a delegate +// before the global provider is set. +// +// Metric uniqueness checking is implemented by calling the exported +// methods of the api/metric/registry package. + +type meterKey struct { + InstrumentationName string + InstrumentationVersion string + SchemaURL string +} + +type meterProvider struct { + delegate metric.MeterProvider + + // lock protects `delegate` and `meters`. + lock sync.Mutex + + // meters maintains a unique entry for every named Meter + // that has been registered through the global instance. + meters map[meterKey]*meterEntry +} + +type meterImpl struct { + delegate unsafe.Pointer // (*metric.MeterImpl) + + lock sync.Mutex + syncInsts []*syncImpl + asyncInsts []*asyncImpl +} + +type meterEntry struct { + unique sdkapi.MeterImpl + impl meterImpl +} + +type instrument struct { + descriptor sdkapi.Descriptor +} + +type syncImpl struct { + delegate unsafe.Pointer // (*sdkapi.SyncImpl) + + instrument +} + +type asyncImpl struct { + delegate unsafe.Pointer // (*sdkapi.AsyncImpl) + + instrument + + runner sdkapi.AsyncRunner +} + +var _ metric.MeterProvider = &meterProvider{} +var _ sdkapi.MeterImpl = &meterImpl{} +var _ sdkapi.InstrumentImpl = &syncImpl{} +var _ sdkapi.AsyncImpl = &asyncImpl{} + +func (inst *instrument) Descriptor() sdkapi.Descriptor { + return inst.descriptor +} + +// MeterProvider interface and delegation + +func newMeterProvider() *meterProvider { + return &meterProvider{ + meters: map[meterKey]*meterEntry{}, + } +} + +func (p *meterProvider) setDelegate(provider metric.MeterProvider) { + p.lock.Lock() + defer p.lock.Unlock() + + p.delegate = provider + for key, entry := range p.meters { + entry.impl.setDelegate(key, provider) + } + p.meters = nil +} + +func (p *meterProvider) Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { + p.lock.Lock() + defer p.lock.Unlock() + + if p.delegate != nil { + return p.delegate.Meter(instrumentationName, opts...) + } + + cfg := metric.NewMeterConfig(opts...) + key := meterKey{ + InstrumentationName: instrumentationName, + InstrumentationVersion: cfg.InstrumentationVersion(), + SchemaURL: cfg.SchemaURL(), + } + entry, ok := p.meters[key] + if !ok { + entry = &meterEntry{} + // Note: This code implements its own MeterProvider + // name-uniqueness logic because there is + // synchronization required at the moment of + // delegation. We use the same instrument-uniqueness + // checking the real SDK uses here: + entry.unique = registry.NewUniqueInstrumentMeterImpl(&entry.impl) + p.meters[key] = entry + } + return metric.WrapMeterImpl(entry.unique) +} + +// Meter interface and delegation + +func (m *meterImpl) setDelegate(key meterKey, provider metric.MeterProvider) { + m.lock.Lock() + defer m.lock.Unlock() + + d := new(sdkapi.MeterImpl) + *d = provider.Meter( + key.InstrumentationName, + metric.WithInstrumentationVersion(key.InstrumentationVersion), + metric.WithSchemaURL(key.SchemaURL), + ).MeterImpl() + m.delegate = unsafe.Pointer(d) + + for _, inst := range m.syncInsts { + inst.setDelegate(*d) + } + m.syncInsts = nil + for _, obs := range m.asyncInsts { + obs.setDelegate(*d) + } + m.asyncInsts = nil +} + +func (m *meterImpl) NewSyncInstrument(desc sdkapi.Descriptor) (sdkapi.SyncImpl, error) { + m.lock.Lock() + defer m.lock.Unlock() + + if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { + return (*meterPtr).NewSyncInstrument(desc) + } + + inst := &syncImpl{ + instrument: instrument{ + descriptor: desc, + }, + } + m.syncInsts = append(m.syncInsts, inst) + return inst, nil +} + +// Synchronous delegation + +func (inst *syncImpl) setDelegate(d sdkapi.MeterImpl) { + implPtr := new(sdkapi.SyncImpl) + + var err error + *implPtr, err = d.NewSyncInstrument(inst.descriptor) + + if err != nil { + // TODO: There is no standard way to deliver this error to the user. + // See https://github.com/open-telemetry/opentelemetry-go/issues/514 + // Note that the default SDK will not generate any errors yet, this is + // only for added safety. + panic(err) + } + + atomic.StorePointer(&inst.delegate, unsafe.Pointer(implPtr)) +} + +func (inst *syncImpl) Implementation() interface{} { + if implPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); implPtr != nil { + return (*implPtr).Implementation() + } + return inst +} + +// Async delegation + +func (m *meterImpl) NewAsyncInstrument( + desc sdkapi.Descriptor, + runner sdkapi.AsyncRunner, +) (sdkapi.AsyncImpl, error) { + + m.lock.Lock() + defer m.lock.Unlock() + + if meterPtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); meterPtr != nil { + return (*meterPtr).NewAsyncInstrument(desc, runner) + } + + inst := &asyncImpl{ + instrument: instrument{ + descriptor: desc, + }, + runner: runner, + } + m.asyncInsts = append(m.asyncInsts, inst) + return inst, nil +} + +func (obs *asyncImpl) Implementation() interface{} { + if implPtr := (*sdkapi.AsyncImpl)(atomic.LoadPointer(&obs.delegate)); implPtr != nil { + return (*implPtr).Implementation() + } + return obs +} + +func (obs *asyncImpl) setDelegate(d sdkapi.MeterImpl) { + implPtr := new(sdkapi.AsyncImpl) + + var err error + *implPtr, err = d.NewAsyncInstrument(obs.descriptor, obs.runner) + + if err != nil { + // TODO: There is no standard way to deliver this error to the user. + // See https://github.com/open-telemetry/opentelemetry-go/issues/514 + // Note that the default SDK will not generate any errors yet, this is + // only for added safety. + panic(err) + } + + atomic.StorePointer(&obs.delegate, unsafe.Pointer(implPtr)) +} + +// Metric updates + +func (m *meterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurements ...sdkapi.Measurement) { + if delegatePtr := (*sdkapi.MeterImpl)(atomic.LoadPointer(&m.delegate)); delegatePtr != nil { + (*delegatePtr).RecordBatch(ctx, labels, measurements...) + } +} + +func (inst *syncImpl) RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) { + if instPtr := (*sdkapi.SyncImpl)(atomic.LoadPointer(&inst.delegate)); instPtr != nil { + (*instPtr).RecordOne(ctx, number, labels) + } +} + +func AtomicFieldOffsets() map[string]uintptr { + return map[string]uintptr{ + "meterProvider.delegate": unsafe.Offsetof(meterProvider{}.delegate), + "meterImpl.delegate": unsafe.Offsetof(meterImpl{}.delegate), + "syncImpl.delegate": unsafe.Offsetof(syncImpl{}.delegate), + "asyncImpl.delegate": unsafe.Offsetof(asyncImpl{}.delegate), + } +} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go new file mode 100644 index 0000000000..896c6dd1c3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/metric/global/metric.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/internal/metric/global" + +import ( + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel/metric" +) + +type meterProviderHolder struct { + mp metric.MeterProvider +} + +var ( + globalMeter = defaultMeterValue() + + delegateMeterOnce sync.Once +) + +// MeterProvider is the internal implementation for global.MeterProvider. +func MeterProvider() metric.MeterProvider { + return globalMeter.Load().(meterProviderHolder).mp +} + +// SetMeterProvider is the internal implementation for global.SetMeterProvider. +func SetMeterProvider(mp metric.MeterProvider) { + delegateMeterOnce.Do(func() { + current := MeterProvider() + + if current == mp { + // Setting the provider to the prior default is nonsense, panic. + // Panic is acceptable because we are likely still early in the + // process lifetime. + panic("invalid MeterProvider, the global instance cannot be reinstalled") + } else if def, ok := current.(*meterProvider); ok { + def.setDelegate(mp) + } + }) + globalMeter.Store(meterProviderHolder{mp: mp}) +} + +func defaultMeterValue() *atomic.Value { + v := &atomic.Value{} + v.Store(meterProviderHolder{mp: newMeterProvider()}) + return v +} + +// ResetForTest restores the initial global state, for testing purposes. +func ResetForTest() { + globalMeter = defaultMeterValue() + delegateMeterOnce = sync.Once{} +} diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go new file mode 100644 index 0000000000..2f17562f0e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/metric/registry/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package registry provides a non-standalone implementation of +MeterProvider that adds uniqueness checking for instrument descriptors +on top of other MeterProvider it wraps. + +This package is currently in a pre-GA phase. Backwards incompatible changes +may be introduced in subsequent minor version releases as we work to track the +evolving OpenTelemetry specification and user feedback. +*/ +package registry // import "go.opentelemetry.io/otel/internal/metric/registry" diff --git a/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go b/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go new file mode 100644 index 0000000000..c929bf45c8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/metric/registry/registry.go @@ -0,0 +1,139 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package registry // import "go.opentelemetry.io/otel/internal/metric/registry" + +import ( + "context" + "fmt" + "sync" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/sdkapi" +) + +// UniqueInstrumentMeterImpl implements the metric.MeterImpl interface, adding +// uniqueness checking for instrument descriptors. +type UniqueInstrumentMeterImpl struct { + lock sync.Mutex + impl sdkapi.MeterImpl + state map[string]sdkapi.InstrumentImpl +} + +var _ sdkapi.MeterImpl = (*UniqueInstrumentMeterImpl)(nil) + +// ErrMetricKindMismatch is the standard error for mismatched metric +// instrument definitions. +var ErrMetricKindMismatch = fmt.Errorf( + "a metric was already registered by this name with another kind or number type") + +// NewUniqueInstrumentMeterImpl returns a wrapped metric.MeterImpl +// with the addition of instrument name uniqueness checking. +func NewUniqueInstrumentMeterImpl(impl sdkapi.MeterImpl) *UniqueInstrumentMeterImpl { + return &UniqueInstrumentMeterImpl{ + impl: impl, + state: map[string]sdkapi.InstrumentImpl{}, + } +} + +// MeterImpl gives the caller access to the underlying MeterImpl +// used by this UniqueInstrumentMeterImpl. +func (u *UniqueInstrumentMeterImpl) MeterImpl() sdkapi.MeterImpl { + return u.impl +} + +// RecordBatch implements sdkapi.MeterImpl. +func (u *UniqueInstrumentMeterImpl) RecordBatch(ctx context.Context, labels []attribute.KeyValue, ms ...sdkapi.Measurement) { + u.impl.RecordBatch(ctx, labels, ms...) +} + +// NewMetricKindMismatchError formats an error that describes a +// mismatched metric instrument definition. +func NewMetricKindMismatchError(desc sdkapi.Descriptor) error { + return fmt.Errorf("metric %s registered as %s %s: %w", + desc.Name(), + desc.NumberKind(), + desc.InstrumentKind(), + ErrMetricKindMismatch) +} + +// Compatible determines whether two sdkapi.Descriptors are considered +// the same for the purpose of uniqueness checking. +func Compatible(candidate, existing sdkapi.Descriptor) bool { + return candidate.InstrumentKind() == existing.InstrumentKind() && + candidate.NumberKind() == existing.NumberKind() +} + +// checkUniqueness returns an ErrMetricKindMismatch error if there is +// a conflict between a descriptor that was already registered and the +// `descriptor` argument. If there is an existing compatible +// registration, this returns the already-registered instrument. If +// there is no conflict and no prior registration, returns (nil, nil). +func (u *UniqueInstrumentMeterImpl) checkUniqueness(descriptor sdkapi.Descriptor) (sdkapi.InstrumentImpl, error) { + impl, ok := u.state[descriptor.Name()] + if !ok { + return nil, nil + } + + if !Compatible(descriptor, impl.Descriptor()) { + return nil, NewMetricKindMismatchError(impl.Descriptor()) + } + + return impl, nil +} + +// NewSyncInstrument implements sdkapi.MeterImpl. +func (u *UniqueInstrumentMeterImpl) NewSyncInstrument(descriptor sdkapi.Descriptor) (sdkapi.SyncImpl, error) { + u.lock.Lock() + defer u.lock.Unlock() + + impl, err := u.checkUniqueness(descriptor) + + if err != nil { + return nil, err + } else if impl != nil { + return impl.(sdkapi.SyncImpl), nil + } + + syncInst, err := u.impl.NewSyncInstrument(descriptor) + if err != nil { + return nil, err + } + u.state[descriptor.Name()] = syncInst + return syncInst, nil +} + +// NewAsyncInstrument implements sdkapi.MeterImpl. +func (u *UniqueInstrumentMeterImpl) NewAsyncInstrument( + descriptor sdkapi.Descriptor, + runner sdkapi.AsyncRunner, +) (sdkapi.AsyncImpl, error) { + u.lock.Lock() + defer u.lock.Unlock() + + impl, err := u.checkUniqueness(descriptor) + + if err != nil { + return nil, err + } else if impl != nil { + return impl.(sdkapi.AsyncImpl), nil + } + + asyncInst, err := u.impl.NewAsyncInstrument(descriptor, runner) + if err != nil { + return nil, err + } + u.state[descriptor.Name()] = asyncInst + return asyncInst, nil +} diff --git a/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go new file mode 100644 index 0000000000..ce7afaa188 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal/rawhelpers.go @@ -0,0 +1,55 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/internal" + +import ( + "math" + "unsafe" +) + +func BoolToRaw(b bool) uint64 { + if b { + return 1 + } + return 0 +} + +func RawToBool(r uint64) bool { + return r != 0 +} + +func Int64ToRaw(i int64) uint64 { + return uint64(i) +} + +func RawToInt64(r uint64) int64 { + return int64(r) +} + +func Float64ToRaw(f float64) uint64 { + return math.Float64bits(f) +} + +func RawToFloat64(r uint64) float64 { + return math.Float64frombits(r) +} + +func RawPtrToFloat64Ptr(r *uint64) *float64 { + return (*float64)(unsafe.Pointer(r)) +} + +func RawPtrToInt64Ptr(r *uint64) *int64 { + return (*int64)(unsafe.Pointer(r)) +} diff --git a/vendor/go.opentelemetry.io/otel/internal_logging.go b/vendor/go.opentelemetry.io/otel/internal_logging.go new file mode 100644 index 0000000000..c4f8acd5d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/internal_logging.go @@ -0,0 +1,26 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "github.com/go-logr/logr" + + "go.opentelemetry.io/otel/internal/global" +) + +// SetLogger configures the logger used internally to opentelemetry. +func SetLogger(logger logr.Logger) { + global.SetLogger(logger) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/LICENSE b/vendor/go.opentelemetry.io/otel/metric/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/metric/config.go b/vendor/go.opentelemetry.io/otel/metric/config.go new file mode 100644 index 0000000000..3f722344fa --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/config.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "go.opentelemetry.io/otel/metric/unit" +) + +// InstrumentConfig contains options for metric instrument descriptors. +type InstrumentConfig struct { + description string + unit unit.Unit +} + +// Description describes the instrument in human-readable terms. +func (cfg *InstrumentConfig) Description() string { + return cfg.description +} + +// Unit describes the measurement unit for a instrument. +func (cfg *InstrumentConfig) Unit() unit.Unit { + return cfg.unit +} + +// InstrumentOption is an interface for applying metric instrument options. +type InstrumentOption interface { + // ApplyMeter is used to set a InstrumentOption value of a + // InstrumentConfig. + applyInstrument(InstrumentConfig) InstrumentConfig +} + +// NewInstrumentConfig creates a new InstrumentConfig +// and applies all the given options. +func NewInstrumentConfig(opts ...InstrumentOption) InstrumentConfig { + var config InstrumentConfig + for _, o := range opts { + config = o.applyInstrument(config) + } + return config +} + +type instrumentOptionFunc func(InstrumentConfig) InstrumentConfig + +func (fn instrumentOptionFunc) applyInstrument(cfg InstrumentConfig) InstrumentConfig { + return fn(cfg) +} + +// WithDescription applies provided description. +func WithDescription(desc string) InstrumentOption { + return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { + cfg.description = desc + return cfg + }) +} + +// WithUnit applies provided unit. +func WithUnit(unit unit.Unit) InstrumentOption { + return instrumentOptionFunc(func(cfg InstrumentConfig) InstrumentConfig { + cfg.unit = unit + return cfg + }) +} + +// MeterConfig contains options for Meters. +type MeterConfig struct { + instrumentationVersion string + schemaURL string +} + +// InstrumentationVersion is the version of the library providing instrumentation. +func (cfg *MeterConfig) InstrumentationVersion() string { + return cfg.instrumentationVersion +} + +// SchemaURL is the schema_url of the library providing instrumentation. +func (cfg *MeterConfig) SchemaURL() string { + return cfg.schemaURL +} + +// MeterOption is an interface for applying Meter options. +type MeterOption interface { + // ApplyMeter is used to set a MeterOption value of a MeterConfig. + applyMeter(MeterConfig) MeterConfig +} + +// NewMeterConfig creates a new MeterConfig and applies +// all the given options. +func NewMeterConfig(opts ...MeterOption) MeterConfig { + var config MeterConfig + for _, o := range opts { + config = o.applyMeter(config) + } + return config +} + +type meterOptionFunc func(MeterConfig) MeterConfig + +func (fn meterOptionFunc) applyMeter(cfg MeterConfig) MeterConfig { + return fn(cfg) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.instrumentationVersion = version + return config + }) +} + +// WithSchemaURL sets the schema URL. +func WithSchemaURL(schemaURL string) MeterOption { + return meterOptionFunc(func(config MeterConfig) MeterConfig { + config.schemaURL = schemaURL + return config + }) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/doc.go b/vendor/go.opentelemetry.io/otel/metric/doc.go new file mode 100644 index 0000000000..4baf0719fc --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/doc.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package metric provides an implementation of the metrics part of the +OpenTelemetry API. + +This package is currently in a pre-GA phase. Backwards incompatible changes +may be introduced in subsequent minor version releases as we work to track the +evolving OpenTelemetry specification and user feedback. + +Measurements can be made about an operation being performed or the state of a +system in general. These measurements can be crucial to the reliable operation +of code and provide valuable insights about the inner workings of a system. + +Measurements are made using instruments provided by this package. The type of +instrument used will depend on the type of measurement being made and of what +part of a system is being measured. + +Instruments are categorized as Synchronous or Asynchronous and independently +as Adding or Grouping. Synchronous instruments are called by the user with a +Context. Asynchronous instruments are called by the SDK during collection. +Adding instruments are semantically intended for capturing a sum. Grouping +instruments are intended for capturing a distribution. + +Adding instruments may be monotonic, in which case they are non-decreasing +and naturally define a rate. + +The synchronous instrument names are: + + Counter: adding, monotonic + UpDownCounter: adding + Histogram: grouping + +and the asynchronous instruments are: + + CounterObserver: adding, monotonic + UpDownCounterObserver: adding + GaugeObserver: grouping + +All instruments are provided with support for either float64 or int64 input +values. + +An instrument is created using a Meter. Additionally, a Meter is used to +record batches of synchronous measurements or asynchronous observations. A +Meter is obtained using a MeterProvider. A Meter, like a Tracer, is unique to +the instrumentation it instruments and must be named and versioned when +created with a MeterProvider with the name and version of the instrumentation +library. + +Instrumentation should be designed to accept a MeterProvider from which it can +create its own unique Meter. Alternatively, the registered global +MeterProvider from the go.opentelemetry.io/otel package can be used as a +default. +*/ +package metric // import "go.opentelemetry.io/otel/metric" diff --git a/vendor/go.opentelemetry.io/otel/metric/global/metric.go b/vendor/go.opentelemetry.io/otel/metric/global/metric.go new file mode 100644 index 0000000000..14ba862002 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/global/metric.go @@ -0,0 +1,49 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package global // import "go.opentelemetry.io/otel/metric/global" + +import ( + "go.opentelemetry.io/otel/internal/metric/global" + "go.opentelemetry.io/otel/metric" +) + +// Meter creates an implementation of the Meter interface from the global +// MeterProvider. The instrumentationName must be the name of the library +// providing instrumentation. This name may be the same as the instrumented +// code only if that code provides built-in instrumentation. If the +// instrumentationName is empty, then a implementation defined default name +// will be used instead. +// +// This is short for MeterProvider().Meter(name) +func Meter(instrumentationName string, opts ...metric.MeterOption) metric.Meter { + return GetMeterProvider().Meter(instrumentationName, opts...) +} + +// GetMeterProvider returns the registered global meter provider. If +// none is registered then a default meter provider is returned that +// forwards the Meter interface to the first registered Meter. +// +// Use the meter provider to create a named meter. E.g. +// meter := global.MeterProvider().Meter("example.com/foo") +// or +// meter := global.Meter("example.com/foo") +func GetMeterProvider() metric.MeterProvider { + return global.MeterProvider() +} + +// SetMeterProvider registers `mp` as the global meter provider. +func SetMeterProvider(mp metric.MeterProvider) { + global.SetMeterProvider(mp) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric.go b/vendor/go.opentelemetry.io/otel/metric/metric.go new file mode 100644 index 0000000000..d8c5a6b3f3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/metric.go @@ -0,0 +1,538 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" +) + +// MeterProvider supports named Meter instances. +type MeterProvider interface { + // Meter creates an implementation of the Meter interface. + // The instrumentationName must be the name of the library providing + // instrumentation. This name may be the same as the instrumented code + // only if that code provides built-in instrumentation. If the + // instrumentationName is empty, then a implementation defined default + // name will be used instead. + Meter(instrumentationName string, opts ...MeterOption) Meter +} + +// Meter is the creator of metric instruments. +// +// An uninitialized Meter is a no-op implementation. +type Meter struct { + impl sdkapi.MeterImpl +} + +// WrapMeterImpl constructs a `Meter` implementation from a +// `MeterImpl` implementation. +func WrapMeterImpl(impl sdkapi.MeterImpl) Meter { + return Meter{ + impl: impl, + } +} + +// Measurement is used for reporting a synchronous batch of metric +// values. Instances of this type should be created by synchronous +// instruments (e.g., Int64Counter.Measurement()). +// +// Note: This is an alias because it is a first-class member of the +// API but is also part of the lower-level sdkapi interface. +type Measurement = sdkapi.Measurement + +// Observation is used for reporting an asynchronous batch of metric +// values. Instances of this type should be created by asynchronous +// instruments (e.g., Int64GaugeObserver.Observation()). +// +// Note: This is an alias because it is a first-class member of the +// API but is also part of the lower-level sdkapi interface. +type Observation = sdkapi.Observation + +// RecordBatch atomically records a batch of measurements. +func (m Meter) RecordBatch(ctx context.Context, ls []attribute.KeyValue, ms ...Measurement) { + if m.impl == nil { + return + } + m.impl.RecordBatch(ctx, ls, ms...) +} + +// NewBatchObserver creates a new BatchObserver that supports +// making batches of observations for multiple instruments. +func (m Meter) NewBatchObserver(callback BatchObserverFunc) BatchObserver { + return BatchObserver{ + meter: m, + runner: newBatchAsyncRunner(callback), + } +} + +// NewInt64Counter creates a new integer Counter instrument with the +// given name, customized with options. May return an error if the +// name is invalid (e.g., empty) or improperly registered (e.g., +// duplicate registration). +func (m Meter) NewInt64Counter(name string, options ...InstrumentOption) (Int64Counter, error) { + return wrapInt64CounterInstrument( + m.newSync(name, sdkapi.CounterInstrumentKind, number.Int64Kind, options)) +} + +// NewFloat64Counter creates a new floating point Counter with the +// given name, customized with options. May return an error if the +// name is invalid (e.g., empty) or improperly registered (e.g., +// duplicate registration). +func (m Meter) NewFloat64Counter(name string, options ...InstrumentOption) (Float64Counter, error) { + return wrapFloat64CounterInstrument( + m.newSync(name, sdkapi.CounterInstrumentKind, number.Float64Kind, options)) +} + +// NewInt64UpDownCounter creates a new integer UpDownCounter instrument with the +// given name, customized with options. May return an error if the +// name is invalid (e.g., empty) or improperly registered (e.g., +// duplicate registration). +func (m Meter) NewInt64UpDownCounter(name string, options ...InstrumentOption) (Int64UpDownCounter, error) { + return wrapInt64UpDownCounterInstrument( + m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Int64Kind, options)) +} + +// NewFloat64UpDownCounter creates a new floating point UpDownCounter with the +// given name, customized with options. May return an error if the +// name is invalid (e.g., empty) or improperly registered (e.g., +// duplicate registration). +func (m Meter) NewFloat64UpDownCounter(name string, options ...InstrumentOption) (Float64UpDownCounter, error) { + return wrapFloat64UpDownCounterInstrument( + m.newSync(name, sdkapi.UpDownCounterInstrumentKind, number.Float64Kind, options)) +} + +// NewInt64Histogram creates a new integer Histogram instrument with the +// given name, customized with options. May return an error if the +// name is invalid (e.g., empty) or improperly registered (e.g., +// duplicate registration). +func (m Meter) NewInt64Histogram(name string, opts ...InstrumentOption) (Int64Histogram, error) { + return wrapInt64HistogramInstrument( + m.newSync(name, sdkapi.HistogramInstrumentKind, number.Int64Kind, opts)) +} + +// NewFloat64Histogram creates a new floating point Histogram with the +// given name, customized with options. May return an error if the +// name is invalid (e.g., empty) or improperly registered (e.g., +// duplicate registration). +func (m Meter) NewFloat64Histogram(name string, opts ...InstrumentOption) (Float64Histogram, error) { + return wrapFloat64HistogramInstrument( + m.newSync(name, sdkapi.HistogramInstrumentKind, number.Float64Kind, opts)) +} + +// NewInt64GaugeObserver creates a new integer GaugeObserver instrument +// with the given name, running a given callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (m Meter) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64GaugeObserver, error) { + if callback == nil { + return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapInt64GaugeObserverInstrument( + m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, + newInt64AsyncRunner(callback))) +} + +// NewFloat64GaugeObserver creates a new floating point GaugeObserver with +// the given name, running a given callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (m Meter) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64GaugeObserver, error) { + if callback == nil { + return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapFloat64GaugeObserverInstrument( + m.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, + newFloat64AsyncRunner(callback))) +} + +// NewInt64CounterObserver creates a new integer CounterObserver instrument +// with the given name, running a given callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (m Meter) NewInt64CounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64CounterObserver, error) { + if callback == nil { + return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapInt64CounterObserverInstrument( + m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, + newInt64AsyncRunner(callback))) +} + +// NewFloat64CounterObserver creates a new floating point CounterObserver with +// the given name, running a given callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (m Meter) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64CounterObserver, error) { + if callback == nil { + return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapFloat64CounterObserverInstrument( + m.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, + newFloat64AsyncRunner(callback))) +} + +// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument +// with the given name, running a given callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (m Meter) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { + if callback == nil { + return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapInt64UpDownCounterObserverInstrument( + m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, + newInt64AsyncRunner(callback))) +} + +// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with +// the given name, running a given callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (m Meter) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { + if callback == nil { + return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapFloat64UpDownCounterObserverInstrument( + m.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, + newFloat64AsyncRunner(callback))) +} + +// NewInt64GaugeObserver creates a new integer GaugeObserver instrument +// with the given name, running in a batch callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (b BatchObserver) NewInt64GaugeObserver(name string, opts ...InstrumentOption) (Int64GaugeObserver, error) { + if b.runner == nil { + return wrapInt64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapInt64GaugeObserverInstrument( + b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Int64Kind, opts, b.runner)) +} + +// NewFloat64GaugeObserver creates a new floating point GaugeObserver with +// the given name, running in a batch callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (b BatchObserver) NewFloat64GaugeObserver(name string, opts ...InstrumentOption) (Float64GaugeObserver, error) { + if b.runner == nil { + return wrapFloat64GaugeObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapFloat64GaugeObserverInstrument( + b.meter.newAsync(name, sdkapi.GaugeObserverInstrumentKind, number.Float64Kind, opts, + b.runner)) +} + +// NewInt64CounterObserver creates a new integer CounterObserver instrument +// with the given name, running in a batch callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (b BatchObserver) NewInt64CounterObserver(name string, opts ...InstrumentOption) (Int64CounterObserver, error) { + if b.runner == nil { + return wrapInt64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapInt64CounterObserverInstrument( + b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) +} + +// NewFloat64CounterObserver creates a new floating point CounterObserver with +// the given name, running in a batch callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (b BatchObserver) NewFloat64CounterObserver(name string, opts ...InstrumentOption) (Float64CounterObserver, error) { + if b.runner == nil { + return wrapFloat64CounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapFloat64CounterObserverInstrument( + b.meter.newAsync(name, sdkapi.CounterObserverInstrumentKind, number.Float64Kind, opts, + b.runner)) +} + +// NewInt64UpDownCounterObserver creates a new integer UpDownCounterObserver instrument +// with the given name, running in a batch callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (b BatchObserver) NewInt64UpDownCounterObserver(name string, opts ...InstrumentOption) (Int64UpDownCounterObserver, error) { + if b.runner == nil { + return wrapInt64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapInt64UpDownCounterObserverInstrument( + b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Int64Kind, opts, b.runner)) +} + +// NewFloat64UpDownCounterObserver creates a new floating point UpDownCounterObserver with +// the given name, running in a batch callback, and customized with +// options. May return an error if the name is invalid (e.g., empty) +// or improperly registered (e.g., duplicate registration). +func (b BatchObserver) NewFloat64UpDownCounterObserver(name string, opts ...InstrumentOption) (Float64UpDownCounterObserver, error) { + if b.runner == nil { + return wrapFloat64UpDownCounterObserverInstrument(sdkapi.NewNoopAsyncInstrument(), nil) + } + return wrapFloat64UpDownCounterObserverInstrument( + b.meter.newAsync(name, sdkapi.UpDownCounterObserverInstrumentKind, number.Float64Kind, opts, + b.runner)) +} + +// MeterImpl returns the underlying MeterImpl of this Meter. +func (m Meter) MeterImpl() sdkapi.MeterImpl { + return m.impl +} + +// newAsync constructs one new asynchronous instrument. +func (m Meter) newAsync( + name string, + mkind sdkapi.InstrumentKind, + nkind number.Kind, + opts []InstrumentOption, + runner sdkapi.AsyncRunner, +) ( + sdkapi.AsyncImpl, + error, +) { + if m.impl == nil { + return sdkapi.NewNoopAsyncInstrument(), nil + } + cfg := NewInstrumentConfig(opts...) + desc := sdkapi.NewDescriptor(name, mkind, nkind, cfg.description, cfg.unit) + return m.impl.NewAsyncInstrument(desc, runner) +} + +// newSync constructs one new synchronous instrument. +func (m Meter) newSync( + name string, + metricKind sdkapi.InstrumentKind, + numberKind number.Kind, + opts []InstrumentOption, +) ( + sdkapi.SyncImpl, + error, +) { + if m.impl == nil { + return sdkapi.NewNoopSyncInstrument(), nil + } + cfg := NewInstrumentConfig(opts...) + desc := sdkapi.NewDescriptor(name, metricKind, numberKind, cfg.description, cfg.unit) + return m.impl.NewSyncInstrument(desc) +} + +// MeterMust is a wrapper for Meter interfaces that panics when any +// instrument constructor encounters an error. +type MeterMust struct { + meter Meter +} + +// BatchObserverMust is a wrapper for BatchObserver that panics when +// any instrument constructor encounters an error. +type BatchObserverMust struct { + batch BatchObserver +} + +// Must constructs a MeterMust implementation from a Meter, allowing +// the application to panic when any instrument constructor yields an +// error. +func Must(meter Meter) MeterMust { + return MeterMust{meter: meter} +} + +// NewInt64Counter calls `Meter.NewInt64Counter` and returns the +// instrument, panicking if it encounters an error. +func (mm MeterMust) NewInt64Counter(name string, cos ...InstrumentOption) Int64Counter { + if inst, err := mm.meter.NewInt64Counter(name, cos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64Counter calls `Meter.NewFloat64Counter` and returns the +// instrument, panicking if it encounters an error. +func (mm MeterMust) NewFloat64Counter(name string, cos ...InstrumentOption) Float64Counter { + if inst, err := mm.meter.NewFloat64Counter(name, cos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewInt64UpDownCounter calls `Meter.NewInt64UpDownCounter` and returns the +// instrument, panicking if it encounters an error. +func (mm MeterMust) NewInt64UpDownCounter(name string, cos ...InstrumentOption) Int64UpDownCounter { + if inst, err := mm.meter.NewInt64UpDownCounter(name, cos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64UpDownCounter calls `Meter.NewFloat64UpDownCounter` and returns the +// instrument, panicking if it encounters an error. +func (mm MeterMust) NewFloat64UpDownCounter(name string, cos ...InstrumentOption) Float64UpDownCounter { + if inst, err := mm.meter.NewFloat64UpDownCounter(name, cos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewInt64Histogram calls `Meter.NewInt64Histogram` and returns the +// instrument, panicking if it encounters an error. +func (mm MeterMust) NewInt64Histogram(name string, mos ...InstrumentOption) Int64Histogram { + if inst, err := mm.meter.NewInt64Histogram(name, mos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64Histogram calls `Meter.NewFloat64Histogram` and returns the +// instrument, panicking if it encounters an error. +func (mm MeterMust) NewFloat64Histogram(name string, mos ...InstrumentOption) Float64Histogram { + if inst, err := mm.meter.NewFloat64Histogram(name, mos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewInt64GaugeObserver calls `Meter.NewInt64GaugeObserver` and +// returns the instrument, panicking if it encounters an error. +func (mm MeterMust) NewInt64GaugeObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64GaugeObserver { + if inst, err := mm.meter.NewInt64GaugeObserver(name, callback, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64GaugeObserver calls `Meter.NewFloat64GaugeObserver` and +// returns the instrument, panicking if it encounters an error. +func (mm MeterMust) NewFloat64GaugeObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64GaugeObserver { + if inst, err := mm.meter.NewFloat64GaugeObserver(name, callback, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewInt64CounterObserver calls `Meter.NewInt64CounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (mm MeterMust) NewInt64CounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64CounterObserver { + if inst, err := mm.meter.NewInt64CounterObserver(name, callback, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64CounterObserver calls `Meter.NewFloat64CounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (mm MeterMust) NewFloat64CounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64CounterObserver { + if inst, err := mm.meter.NewFloat64CounterObserver(name, callback, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewInt64UpDownCounterObserver calls `Meter.NewInt64UpDownCounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (mm MeterMust) NewInt64UpDownCounterObserver(name string, callback Int64ObserverFunc, oos ...InstrumentOption) Int64UpDownCounterObserver { + if inst, err := mm.meter.NewInt64UpDownCounterObserver(name, callback, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64UpDownCounterObserver calls `Meter.NewFloat64UpDownCounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (mm MeterMust) NewFloat64UpDownCounterObserver(name string, callback Float64ObserverFunc, oos ...InstrumentOption) Float64UpDownCounterObserver { + if inst, err := mm.meter.NewFloat64UpDownCounterObserver(name, callback, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewBatchObserver returns a wrapper around BatchObserver that panics +// when any instrument constructor returns an error. +func (mm MeterMust) NewBatchObserver(callback BatchObserverFunc) BatchObserverMust { + return BatchObserverMust{ + batch: mm.meter.NewBatchObserver(callback), + } +} + +// NewInt64GaugeObserver calls `BatchObserver.NewInt64GaugeObserver` and +// returns the instrument, panicking if it encounters an error. +func (bm BatchObserverMust) NewInt64GaugeObserver(name string, oos ...InstrumentOption) Int64GaugeObserver { + if inst, err := bm.batch.NewInt64GaugeObserver(name, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64GaugeObserver calls `BatchObserver.NewFloat64GaugeObserver` and +// returns the instrument, panicking if it encounters an error. +func (bm BatchObserverMust) NewFloat64GaugeObserver(name string, oos ...InstrumentOption) Float64GaugeObserver { + if inst, err := bm.batch.NewFloat64GaugeObserver(name, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewInt64CounterObserver calls `BatchObserver.NewInt64CounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (bm BatchObserverMust) NewInt64CounterObserver(name string, oos ...InstrumentOption) Int64CounterObserver { + if inst, err := bm.batch.NewInt64CounterObserver(name, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64CounterObserver calls `BatchObserver.NewFloat64CounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (bm BatchObserverMust) NewFloat64CounterObserver(name string, oos ...InstrumentOption) Float64CounterObserver { + if inst, err := bm.batch.NewFloat64CounterObserver(name, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewInt64UpDownCounterObserver calls `BatchObserver.NewInt64UpDownCounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (bm BatchObserverMust) NewInt64UpDownCounterObserver(name string, oos ...InstrumentOption) Int64UpDownCounterObserver { + if inst, err := bm.batch.NewInt64UpDownCounterObserver(name, oos...); err != nil { + panic(err) + } else { + return inst + } +} + +// NewFloat64UpDownCounterObserver calls `BatchObserver.NewFloat64UpDownCounterObserver` and +// returns the instrument, panicking if it encounters an error. +func (bm BatchObserverMust) NewFloat64UpDownCounterObserver(name string, oos ...InstrumentOption) Float64UpDownCounterObserver { + if inst, err := bm.batch.NewFloat64UpDownCounterObserver(name, oos...); err != nil { + panic(err) + } else { + return inst + } +} diff --git a/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go b/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go new file mode 100644 index 0000000000..2da24c8f21 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/metric_instrument.go @@ -0,0 +1,464 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +import ( + "context" + "errors" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/sdkapi" +) + +// ErrSDKReturnedNilImpl is returned when a new `MeterImpl` returns nil. +var ErrSDKReturnedNilImpl = errors.New("SDK returned a nil implementation") + +// Int64ObserverFunc is a type of callback that integral +// observers run. +type Int64ObserverFunc func(context.Context, Int64ObserverResult) + +// Float64ObserverFunc is a type of callback that floating point +// observers run. +type Float64ObserverFunc func(context.Context, Float64ObserverResult) + +// BatchObserverFunc is a callback argument for use with any +// Observer instrument that will be reported as a batch of +// observations. +type BatchObserverFunc func(context.Context, BatchObserverResult) + +// Int64ObserverResult is passed to an observer callback to capture +// observations for one asynchronous integer metric instrument. +type Int64ObserverResult struct { + instrument sdkapi.AsyncImpl + function func([]attribute.KeyValue, ...Observation) +} + +// Float64ObserverResult is passed to an observer callback to capture +// observations for one asynchronous floating point metric instrument. +type Float64ObserverResult struct { + instrument sdkapi.AsyncImpl + function func([]attribute.KeyValue, ...Observation) +} + +// BatchObserverResult is passed to a batch observer callback to +// capture observations for multiple asynchronous instruments. +type BatchObserverResult struct { + function func([]attribute.KeyValue, ...Observation) +} + +// Observe captures a single integer value from the associated +// instrument callback, with the given labels. +func (ir Int64ObserverResult) Observe(value int64, labels ...attribute.KeyValue) { + ir.function(labels, sdkapi.NewObservation(ir.instrument, number.NewInt64Number(value))) +} + +// Observe captures a single floating point value from the associated +// instrument callback, with the given labels. +func (fr Float64ObserverResult) Observe(value float64, labels ...attribute.KeyValue) { + fr.function(labels, sdkapi.NewObservation(fr.instrument, number.NewFloat64Number(value))) +} + +// Observe captures a multiple observations from the associated batch +// instrument callback, with the given labels. +func (br BatchObserverResult) Observe(labels []attribute.KeyValue, obs ...Observation) { + br.function(labels, obs...) +} + +var _ sdkapi.AsyncSingleRunner = (*Int64ObserverFunc)(nil) +var _ sdkapi.AsyncSingleRunner = (*Float64ObserverFunc)(nil) +var _ sdkapi.AsyncBatchRunner = (*BatchObserverFunc)(nil) + +// newInt64AsyncRunner returns a single-observer callback for integer Observer instruments. +func newInt64AsyncRunner(c Int64ObserverFunc) sdkapi.AsyncSingleRunner { + return &c +} + +// newFloat64AsyncRunner returns a single-observer callback for floating point Observer instruments. +func newFloat64AsyncRunner(c Float64ObserverFunc) sdkapi.AsyncSingleRunner { + return &c +} + +// newBatchAsyncRunner returns a batch-observer callback use with multiple Observer instruments. +func newBatchAsyncRunner(c BatchObserverFunc) sdkapi.AsyncBatchRunner { + return &c +} + +// AnyRunner implements AsyncRunner. +func (*Int64ObserverFunc) AnyRunner() {} + +// AnyRunner implements AsyncRunner. +func (*Float64ObserverFunc) AnyRunner() {} + +// AnyRunner implements AsyncRunner. +func (*BatchObserverFunc) AnyRunner() {} + +// Run implements AsyncSingleRunner. +func (i *Int64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { + (*i)(ctx, Int64ObserverResult{ + instrument: impl, + function: function, + }) +} + +// Run implements AsyncSingleRunner. +func (f *Float64ObserverFunc) Run(ctx context.Context, impl sdkapi.AsyncImpl, function func([]attribute.KeyValue, ...Observation)) { + (*f)(ctx, Float64ObserverResult{ + instrument: impl, + function: function, + }) +} + +// Run implements AsyncBatchRunner. +func (b *BatchObserverFunc) Run(ctx context.Context, function func([]attribute.KeyValue, ...Observation)) { + (*b)(ctx, BatchObserverResult{ + function: function, + }) +} + +// wrapInt64GaugeObserverInstrument converts an AsyncImpl into Int64GaugeObserver. +func wrapInt64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64GaugeObserver, error) { + common, err := checkNewAsync(asyncInst, err) + return Int64GaugeObserver{asyncInstrument: common}, err +} + +// wrapFloat64GaugeObserverInstrument converts an AsyncImpl into Float64GaugeObserver. +func wrapFloat64GaugeObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64GaugeObserver, error) { + common, err := checkNewAsync(asyncInst, err) + return Float64GaugeObserver{asyncInstrument: common}, err +} + +// wrapInt64CounterObserverInstrument converts an AsyncImpl into Int64CounterObserver. +func wrapInt64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64CounterObserver, error) { + common, err := checkNewAsync(asyncInst, err) + return Int64CounterObserver{asyncInstrument: common}, err +} + +// wrapFloat64CounterObserverInstrument converts an AsyncImpl into Float64CounterObserver. +func wrapFloat64CounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64CounterObserver, error) { + common, err := checkNewAsync(asyncInst, err) + return Float64CounterObserver{asyncInstrument: common}, err +} + +// wrapInt64UpDownCounterObserverInstrument converts an AsyncImpl into Int64UpDownCounterObserver. +func wrapInt64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Int64UpDownCounterObserver, error) { + common, err := checkNewAsync(asyncInst, err) + return Int64UpDownCounterObserver{asyncInstrument: common}, err +} + +// wrapFloat64UpDownCounterObserverInstrument converts an AsyncImpl into Float64UpDownCounterObserver. +func wrapFloat64UpDownCounterObserverInstrument(asyncInst sdkapi.AsyncImpl, err error) (Float64UpDownCounterObserver, error) { + common, err := checkNewAsync(asyncInst, err) + return Float64UpDownCounterObserver{asyncInstrument: common}, err +} + +// BatchObserver represents an Observer callback that can report +// observations for multiple instruments. +type BatchObserver struct { + meter Meter + runner sdkapi.AsyncBatchRunner +} + +// Int64GaugeObserver is a metric that captures a set of int64 values at a +// point in time. +type Int64GaugeObserver struct { + asyncInstrument +} + +// Float64GaugeObserver is a metric that captures a set of float64 values +// at a point in time. +type Float64GaugeObserver struct { + asyncInstrument +} + +// Int64CounterObserver is a metric that captures a precomputed sum of +// int64 values at a point in time. +type Int64CounterObserver struct { + asyncInstrument +} + +// Float64CounterObserver is a metric that captures a precomputed sum of +// float64 values at a point in time. +type Float64CounterObserver struct { + asyncInstrument +} + +// Int64UpDownCounterObserver is a metric that captures a precomputed sum of +// int64 values at a point in time. +type Int64UpDownCounterObserver struct { + asyncInstrument +} + +// Float64UpDownCounterObserver is a metric that captures a precomputed sum of +// float64 values at a point in time. +type Float64UpDownCounterObserver struct { + asyncInstrument +} + +// Observation returns an Observation, a BatchObserverFunc +// argument, for an asynchronous integer instrument. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (i Int64GaugeObserver) Observation(v int64) Observation { + return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) +} + +// Observation returns an Observation, a BatchObserverFunc +// argument, for an asynchronous integer instrument. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (f Float64GaugeObserver) Observation(v float64) Observation { + return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) +} + +// Observation returns an Observation, a BatchObserverFunc +// argument, for an asynchronous integer instrument. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (i Int64CounterObserver) Observation(v int64) Observation { + return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) +} + +// Observation returns an Observation, a BatchObserverFunc +// argument, for an asynchronous integer instrument. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (f Float64CounterObserver) Observation(v float64) Observation { + return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) +} + +// Observation returns an Observation, a BatchObserverFunc +// argument, for an asynchronous integer instrument. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (i Int64UpDownCounterObserver) Observation(v int64) Observation { + return sdkapi.NewObservation(i.instrument, number.NewInt64Number(v)) +} + +// Observation returns an Observation, a BatchObserverFunc +// argument, for an asynchronous integer instrument. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (f Float64UpDownCounterObserver) Observation(v float64) Observation { + return sdkapi.NewObservation(f.instrument, number.NewFloat64Number(v)) +} + +// syncInstrument contains a SyncImpl. +type syncInstrument struct { + instrument sdkapi.SyncImpl +} + +// asyncInstrument contains a AsyncImpl. +type asyncInstrument struct { + instrument sdkapi.AsyncImpl +} + +// AsyncImpl implements AsyncImpl. +func (a asyncInstrument) AsyncImpl() sdkapi.AsyncImpl { + return a.instrument +} + +// SyncImpl returns the implementation object for synchronous instruments. +func (s syncInstrument) SyncImpl() sdkapi.SyncImpl { + return s.instrument +} + +func (s syncInstrument) float64Measurement(value float64) Measurement { + return sdkapi.NewMeasurement(s.instrument, number.NewFloat64Number(value)) +} + +func (s syncInstrument) int64Measurement(value int64) Measurement { + return sdkapi.NewMeasurement(s.instrument, number.NewInt64Number(value)) +} + +func (s syncInstrument) directRecord(ctx context.Context, number number.Number, labels []attribute.KeyValue) { + s.instrument.RecordOne(ctx, number, labels) +} + +// checkNewAsync receives an AsyncImpl and potential +// error, and returns the same types, checking for and ensuring that +// the returned interface is not nil. +func checkNewAsync(instrument sdkapi.AsyncImpl, err error) (asyncInstrument, error) { + if instrument == nil { + if err == nil { + err = ErrSDKReturnedNilImpl + } + instrument = sdkapi.NewNoopAsyncInstrument() + } + return asyncInstrument{ + instrument: instrument, + }, err +} + +// checkNewSync receives an SyncImpl and potential +// error, and returns the same types, checking for and ensuring that +// the returned interface is not nil. +func checkNewSync(instrument sdkapi.SyncImpl, err error) (syncInstrument, error) { + if instrument == nil { + if err == nil { + err = ErrSDKReturnedNilImpl + } + // Note: an alternate behavior would be to synthesize a new name + // or group all duplicately-named instruments of a certain type + // together and use a tag for the original name, e.g., + // name = 'invalid.counter.int64' + // label = 'original-name=duplicate-counter-name' + instrument = sdkapi.NewNoopSyncInstrument() + } + return syncInstrument{ + instrument: instrument, + }, err +} + +// wrapInt64CounterInstrument converts a SyncImpl into Int64Counter. +func wrapInt64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Counter, error) { + common, err := checkNewSync(syncInst, err) + return Int64Counter{syncInstrument: common}, err +} + +// wrapFloat64CounterInstrument converts a SyncImpl into Float64Counter. +func wrapFloat64CounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Counter, error) { + common, err := checkNewSync(syncInst, err) + return Float64Counter{syncInstrument: common}, err +} + +// wrapInt64UpDownCounterInstrument converts a SyncImpl into Int64UpDownCounter. +func wrapInt64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Int64UpDownCounter, error) { + common, err := checkNewSync(syncInst, err) + return Int64UpDownCounter{syncInstrument: common}, err +} + +// wrapFloat64UpDownCounterInstrument converts a SyncImpl into Float64UpDownCounter. +func wrapFloat64UpDownCounterInstrument(syncInst sdkapi.SyncImpl, err error) (Float64UpDownCounter, error) { + common, err := checkNewSync(syncInst, err) + return Float64UpDownCounter{syncInstrument: common}, err +} + +// wrapInt64HistogramInstrument converts a SyncImpl into Int64Histogram. +func wrapInt64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Int64Histogram, error) { + common, err := checkNewSync(syncInst, err) + return Int64Histogram{syncInstrument: common}, err +} + +// wrapFloat64HistogramInstrument converts a SyncImpl into Float64Histogram. +func wrapFloat64HistogramInstrument(syncInst sdkapi.SyncImpl, err error) (Float64Histogram, error) { + common, err := checkNewSync(syncInst, err) + return Float64Histogram{syncInstrument: common}, err +} + +// Float64Counter is a metric that accumulates float64 values. +type Float64Counter struct { + syncInstrument +} + +// Int64Counter is a metric that accumulates int64 values. +type Int64Counter struct { + syncInstrument +} + +// Measurement creates a Measurement object to use with batch +// recording. +func (c Float64Counter) Measurement(value float64) Measurement { + return c.float64Measurement(value) +} + +// Measurement creates a Measurement object to use with batch +// recording. +func (c Int64Counter) Measurement(value int64) Measurement { + return c.int64Measurement(value) +} + +// Add adds the value to the counter's sum. The labels should contain +// the keys and values to be associated with this value. +func (c Float64Counter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { + c.directRecord(ctx, number.NewFloat64Number(value), labels) +} + +// Add adds the value to the counter's sum. The labels should contain +// the keys and values to be associated with this value. +func (c Int64Counter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { + c.directRecord(ctx, number.NewInt64Number(value), labels) +} + +// Float64UpDownCounter is a metric instrument that sums floating +// point values. +type Float64UpDownCounter struct { + syncInstrument +} + +// Int64UpDownCounter is a metric instrument that sums integer values. +type Int64UpDownCounter struct { + syncInstrument +} + +// Measurement creates a Measurement object to use with batch +// recording. +func (c Float64UpDownCounter) Measurement(value float64) Measurement { + return c.float64Measurement(value) +} + +// Measurement creates a Measurement object to use with batch +// recording. +func (c Int64UpDownCounter) Measurement(value int64) Measurement { + return c.int64Measurement(value) +} + +// Add adds the value to the counter's sum. The labels should contain +// the keys and values to be associated with this value. +func (c Float64UpDownCounter) Add(ctx context.Context, value float64, labels ...attribute.KeyValue) { + c.directRecord(ctx, number.NewFloat64Number(value), labels) +} + +// Add adds the value to the counter's sum. The labels should contain +// the keys and values to be associated with this value. +func (c Int64UpDownCounter) Add(ctx context.Context, value int64, labels ...attribute.KeyValue) { + c.directRecord(ctx, number.NewInt64Number(value), labels) +} + +// Float64Histogram is a metric that records float64 values. +type Float64Histogram struct { + syncInstrument +} + +// Int64Histogram is a metric that records int64 values. +type Int64Histogram struct { + syncInstrument +} + +// Measurement creates a Measurement object to use with batch +// recording. +func (c Float64Histogram) Measurement(value float64) Measurement { + return c.float64Measurement(value) +} + +// Measurement creates a Measurement object to use with batch +// recording. +func (c Int64Histogram) Measurement(value int64) Measurement { + return c.int64Measurement(value) +} + +// Record adds a new value to the list of Histogram's records. The +// labels should contain the keys and values to be associated with +// this value. +func (c Float64Histogram) Record(ctx context.Context, value float64, labels ...attribute.KeyValue) { + c.directRecord(ctx, number.NewFloat64Number(value), labels) +} + +// Record adds a new value to the Histogram's distribution. The +// labels should contain the keys and values to be associated with +// this value. +func (c Int64Histogram) Record(ctx context.Context, value int64, labels ...attribute.KeyValue) { + c.directRecord(ctx, number.NewInt64Number(value), labels) +} diff --git a/vendor/go.opentelemetry.io/otel/metric/noop.go b/vendor/go.opentelemetry.io/otel/metric/noop.go new file mode 100644 index 0000000000..37c653f51a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/noop.go @@ -0,0 +1,30 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package metric // import "go.opentelemetry.io/otel/metric" + +type noopMeterProvider struct{} + +// NewNoopMeterProvider returns an implementation of MeterProvider that +// performs no operations. The Meter and Instrument created from the returned +// MeterProvider also perform no operations. +func NewNoopMeterProvider() MeterProvider { + return noopMeterProvider{} +} + +var _ MeterProvider = noopMeterProvider{} + +func (noopMeterProvider) Meter(instrumentationName string, opts ...MeterOption) Meter { + return Meter{} +} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/doc.go b/vendor/go.opentelemetry.io/otel/metric/number/doc.go new file mode 100644 index 0000000000..0649ff875e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/number/doc.go @@ -0,0 +1,23 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package number provides a number abstraction for instruments that +either support int64 or float64 input values. + +This package is currently in a pre-GA phase. Backwards incompatible changes +may be introduced in subsequent minor version releases as we work to track the +evolving OpenTelemetry specification and user feedback. +*/ +package number // import "go.opentelemetry.io/otel/metric/number" diff --git a/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go b/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go new file mode 100644 index 0000000000..6288c7ea29 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/number/kind_string.go @@ -0,0 +1,24 @@ +// Code generated by "stringer -type=Kind"; DO NOT EDIT. + +package number + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[Int64Kind-0] + _ = x[Float64Kind-1] +} + +const _Kind_name = "Int64KindFloat64Kind" + +var _Kind_index = [...]uint8{0, 9, 20} + +func (i Kind) String() string { + if i < 0 || i >= Kind(len(_Kind_index)-1) { + return "Kind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _Kind_name[_Kind_index[i]:_Kind_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/metric/number/number.go b/vendor/go.opentelemetry.io/otel/metric/number/number.go new file mode 100644 index 0000000000..3ec95e2014 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/number/number.go @@ -0,0 +1,538 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package number // import "go.opentelemetry.io/otel/metric/number" + +//go:generate stringer -type=Kind + +import ( + "fmt" + "math" + "sync/atomic" + + "go.opentelemetry.io/otel/internal" +) + +// Kind describes the data type of the Number. +type Kind int8 + +const ( + // Int64Kind means that the Number stores int64. + Int64Kind Kind = iota + // Float64Kind means that the Number stores float64. + Float64Kind +) + +// Zero returns a zero value for a given Kind +func (k Kind) Zero() Number { + switch k { + case Int64Kind: + return NewInt64Number(0) + case Float64Kind: + return NewFloat64Number(0.) + default: + return Number(0) + } +} + +// Minimum returns the minimum representable value +// for a given Kind +func (k Kind) Minimum() Number { + switch k { + case Int64Kind: + return NewInt64Number(math.MinInt64) + case Float64Kind: + return NewFloat64Number(-1. * math.MaxFloat64) + default: + return Number(0) + } +} + +// Maximum returns the maximum representable value +// for a given Kind +func (k Kind) Maximum() Number { + switch k { + case Int64Kind: + return NewInt64Number(math.MaxInt64) + case Float64Kind: + return NewFloat64Number(math.MaxFloat64) + default: + return Number(0) + } +} + +// Number represents either an integral or a floating point value. It +// needs to be accompanied with a source of Kind that describes +// the actual type of the value stored within Number. +type Number uint64 + +// - constructors + +// NewNumberFromRaw creates a new Number from a raw value. +func NewNumberFromRaw(r uint64) Number { + return Number(r) +} + +// NewInt64Number creates an integral Number. +func NewInt64Number(i int64) Number { + return NewNumberFromRaw(internal.Int64ToRaw(i)) +} + +// NewFloat64Number creates a floating point Number. +func NewFloat64Number(f float64) Number { + return NewNumberFromRaw(internal.Float64ToRaw(f)) +} + +// NewNumberSignChange returns a number with the same magnitude and +// the opposite sign. `kind` must describe the kind of number in `nn`. +func NewNumberSignChange(kind Kind, nn Number) Number { + switch kind { + case Int64Kind: + return NewInt64Number(-nn.AsInt64()) + case Float64Kind: + return NewFloat64Number(-nn.AsFloat64()) + } + return nn +} + +// - as x + +// AsNumber gets the Number. +func (n *Number) AsNumber() Number { + return *n +} + +// AsRaw gets the uninterpreted raw value. Might be useful for some +// atomic operations. +func (n *Number) AsRaw() uint64 { + return uint64(*n) +} + +// AsInt64 assumes that the value contains an int64 and returns it as +// such. +func (n *Number) AsInt64() int64 { + return internal.RawToInt64(n.AsRaw()) +} + +// AsFloat64 assumes that the measurement value contains a float64 and +// returns it as such. +func (n *Number) AsFloat64() float64 { + return internal.RawToFloat64(n.AsRaw()) +} + +// - as x atomic + +// AsNumberAtomic gets the Number atomically. +func (n *Number) AsNumberAtomic() Number { + return NewNumberFromRaw(n.AsRawAtomic()) +} + +// AsRawAtomic gets the uninterpreted raw value atomically. Might be +// useful for some atomic operations. +func (n *Number) AsRawAtomic() uint64 { + return atomic.LoadUint64(n.AsRawPtr()) +} + +// AsInt64Atomic assumes that the number contains an int64 and returns +// it as such atomically. +func (n *Number) AsInt64Atomic() int64 { + return atomic.LoadInt64(n.AsInt64Ptr()) +} + +// AsFloat64Atomic assumes that the measurement value contains a +// float64 and returns it as such atomically. +func (n *Number) AsFloat64Atomic() float64 { + return internal.RawToFloat64(n.AsRawAtomic()) +} + +// - as x ptr + +// AsRawPtr gets the pointer to the raw, uninterpreted raw +// value. Might be useful for some atomic operations. +func (n *Number) AsRawPtr() *uint64 { + return (*uint64)(n) +} + +// AsInt64Ptr assumes that the number contains an int64 and returns a +// pointer to it. +func (n *Number) AsInt64Ptr() *int64 { + return internal.RawPtrToInt64Ptr(n.AsRawPtr()) +} + +// AsFloat64Ptr assumes that the number contains a float64 and returns a +// pointer to it. +func (n *Number) AsFloat64Ptr() *float64 { + return internal.RawPtrToFloat64Ptr(n.AsRawPtr()) +} + +// - coerce + +// CoerceToInt64 casts the number to int64. May result in +// data/precision loss. +func (n *Number) CoerceToInt64(kind Kind) int64 { + switch kind { + case Int64Kind: + return n.AsInt64() + case Float64Kind: + return int64(n.AsFloat64()) + default: + // you get what you deserve + return 0 + } +} + +// CoerceToFloat64 casts the number to float64. May result in +// data/precision loss. +func (n *Number) CoerceToFloat64(kind Kind) float64 { + switch kind { + case Int64Kind: + return float64(n.AsInt64()) + case Float64Kind: + return n.AsFloat64() + default: + // you get what you deserve + return 0 + } +} + +// - set + +// SetNumber sets the number to the passed number. Both should be of +// the same kind. +func (n *Number) SetNumber(nn Number) { + *n.AsRawPtr() = nn.AsRaw() +} + +// SetRaw sets the number to the passed raw value. Both number and the +// raw number should represent the same kind. +func (n *Number) SetRaw(r uint64) { + *n.AsRawPtr() = r +} + +// SetInt64 assumes that the number contains an int64 and sets it to +// the passed value. +func (n *Number) SetInt64(i int64) { + *n.AsInt64Ptr() = i +} + +// SetFloat64 assumes that the number contains a float64 and sets it +// to the passed value. +func (n *Number) SetFloat64(f float64) { + *n.AsFloat64Ptr() = f +} + +// - set atomic + +// SetNumberAtomic sets the number to the passed number +// atomically. Both should be of the same kind. +func (n *Number) SetNumberAtomic(nn Number) { + atomic.StoreUint64(n.AsRawPtr(), nn.AsRaw()) +} + +// SetRawAtomic sets the number to the passed raw value +// atomically. Both number and the raw number should represent the +// same kind. +func (n *Number) SetRawAtomic(r uint64) { + atomic.StoreUint64(n.AsRawPtr(), r) +} + +// SetInt64Atomic assumes that the number contains an int64 and sets +// it to the passed value atomically. +func (n *Number) SetInt64Atomic(i int64) { + atomic.StoreInt64(n.AsInt64Ptr(), i) +} + +// SetFloat64Atomic assumes that the number contains a float64 and +// sets it to the passed value atomically. +func (n *Number) SetFloat64Atomic(f float64) { + atomic.StoreUint64(n.AsRawPtr(), internal.Float64ToRaw(f)) +} + +// - swap + +// SwapNumber sets the number to the passed number and returns the old +// number. Both this number and the passed number should be of the +// same kind. +func (n *Number) SwapNumber(nn Number) Number { + old := *n + n.SetNumber(nn) + return old +} + +// SwapRaw sets the number to the passed raw value and returns the old +// raw value. Both number and the raw number should represent the same +// kind. +func (n *Number) SwapRaw(r uint64) uint64 { + old := n.AsRaw() + n.SetRaw(r) + return old +} + +// SwapInt64 assumes that the number contains an int64, sets it to the +// passed value and returns the old int64 value. +func (n *Number) SwapInt64(i int64) int64 { + old := n.AsInt64() + n.SetInt64(i) + return old +} + +// SwapFloat64 assumes that the number contains an float64, sets it to +// the passed value and returns the old float64 value. +func (n *Number) SwapFloat64(f float64) float64 { + old := n.AsFloat64() + n.SetFloat64(f) + return old +} + +// - swap atomic + +// SwapNumberAtomic sets the number to the passed number and returns +// the old number atomically. Both this number and the passed number +// should be of the same kind. +func (n *Number) SwapNumberAtomic(nn Number) Number { + return NewNumberFromRaw(atomic.SwapUint64(n.AsRawPtr(), nn.AsRaw())) +} + +// SwapRawAtomic sets the number to the passed raw value and returns +// the old raw value atomically. Both number and the raw number should +// represent the same kind. +func (n *Number) SwapRawAtomic(r uint64) uint64 { + return atomic.SwapUint64(n.AsRawPtr(), r) +} + +// SwapInt64Atomic assumes that the number contains an int64, sets it +// to the passed value and returns the old int64 value atomically. +func (n *Number) SwapInt64Atomic(i int64) int64 { + return atomic.SwapInt64(n.AsInt64Ptr(), i) +} + +// SwapFloat64Atomic assumes that the number contains an float64, sets +// it to the passed value and returns the old float64 value +// atomically. +func (n *Number) SwapFloat64Atomic(f float64) float64 { + return internal.RawToFloat64(atomic.SwapUint64(n.AsRawPtr(), internal.Float64ToRaw(f))) +} + +// - add + +// AddNumber assumes that this and the passed number are of the passed +// kind and adds the passed number to this number. +func (n *Number) AddNumber(kind Kind, nn Number) { + switch kind { + case Int64Kind: + n.AddInt64(nn.AsInt64()) + case Float64Kind: + n.AddFloat64(nn.AsFloat64()) + } +} + +// AddRaw assumes that this number and the passed raw value are of the +// passed kind and adds the passed raw value to this number. +func (n *Number) AddRaw(kind Kind, r uint64) { + n.AddNumber(kind, NewNumberFromRaw(r)) +} + +// AddInt64 assumes that the number contains an int64 and adds the +// passed int64 to it. +func (n *Number) AddInt64(i int64) { + *n.AsInt64Ptr() += i +} + +// AddFloat64 assumes that the number contains a float64 and adds the +// passed float64 to it. +func (n *Number) AddFloat64(f float64) { + *n.AsFloat64Ptr() += f +} + +// - add atomic + +// AddNumberAtomic assumes that this and the passed number are of the +// passed kind and adds the passed number to this number atomically. +func (n *Number) AddNumberAtomic(kind Kind, nn Number) { + switch kind { + case Int64Kind: + n.AddInt64Atomic(nn.AsInt64()) + case Float64Kind: + n.AddFloat64Atomic(nn.AsFloat64()) + } +} + +// AddRawAtomic assumes that this number and the passed raw value are +// of the passed kind and adds the passed raw value to this number +// atomically. +func (n *Number) AddRawAtomic(kind Kind, r uint64) { + n.AddNumberAtomic(kind, NewNumberFromRaw(r)) +} + +// AddInt64Atomic assumes that the number contains an int64 and adds +// the passed int64 to it atomically. +func (n *Number) AddInt64Atomic(i int64) { + atomic.AddInt64(n.AsInt64Ptr(), i) +} + +// AddFloat64Atomic assumes that the number contains a float64 and +// adds the passed float64 to it atomically. +func (n *Number) AddFloat64Atomic(f float64) { + for { + o := n.AsFloat64Atomic() + if n.CompareAndSwapFloat64(o, o+f) { + break + } + } +} + +// - compare and swap (atomic only) + +// CompareAndSwapNumber does the atomic CAS operation on this +// number. This number and passed old and new numbers should be of the +// same kind. +func (n *Number) CompareAndSwapNumber(on, nn Number) bool { + return atomic.CompareAndSwapUint64(n.AsRawPtr(), on.AsRaw(), nn.AsRaw()) +} + +// CompareAndSwapRaw does the atomic CAS operation on this +// number. This number and passed old and new raw values should be of +// the same kind. +func (n *Number) CompareAndSwapRaw(or, nr uint64) bool { + return atomic.CompareAndSwapUint64(n.AsRawPtr(), or, nr) +} + +// CompareAndSwapInt64 assumes that this number contains an int64 and +// does the atomic CAS operation on it. +func (n *Number) CompareAndSwapInt64(oi, ni int64) bool { + return atomic.CompareAndSwapInt64(n.AsInt64Ptr(), oi, ni) +} + +// CompareAndSwapFloat64 assumes that this number contains a float64 and +// does the atomic CAS operation on it. +func (n *Number) CompareAndSwapFloat64(of, nf float64) bool { + return atomic.CompareAndSwapUint64(n.AsRawPtr(), internal.Float64ToRaw(of), internal.Float64ToRaw(nf)) +} + +// - compare + +// CompareNumber compares two Numbers given their kind. Both numbers +// should have the same kind. This returns: +// 0 if the numbers are equal +// -1 if the subject `n` is less than the argument `nn` +// +1 if the subject `n` is greater than the argument `nn` +func (n *Number) CompareNumber(kind Kind, nn Number) int { + switch kind { + case Int64Kind: + return n.CompareInt64(nn.AsInt64()) + case Float64Kind: + return n.CompareFloat64(nn.AsFloat64()) + default: + // you get what you deserve + return 0 + } +} + +// CompareRaw compares two numbers, where one is input as a raw +// uint64, interpreting both values as a `kind` of number. +func (n *Number) CompareRaw(kind Kind, r uint64) int { + return n.CompareNumber(kind, NewNumberFromRaw(r)) +} + +// CompareInt64 assumes that the Number contains an int64 and performs +// a comparison between the value and the other value. It returns the +// typical result of the compare function: -1 if the value is less +// than the other, 0 if both are equal, 1 if the value is greater than +// the other. +func (n *Number) CompareInt64(i int64) int { + this := n.AsInt64() + if this < i { + return -1 + } else if this > i { + return 1 + } + return 0 +} + +// CompareFloat64 assumes that the Number contains a float64 and +// performs a comparison between the value and the other value. It +// returns the typical result of the compare function: -1 if the value +// is less than the other, 0 if both are equal, 1 if the value is +// greater than the other. +// +// Do not compare NaN values. +func (n *Number) CompareFloat64(f float64) int { + this := n.AsFloat64() + if this < f { + return -1 + } else if this > f { + return 1 + } + return 0 +} + +// - relations to zero + +// IsPositive returns true if the actual value is greater than zero. +func (n *Number) IsPositive(kind Kind) bool { + return n.compareWithZero(kind) > 0 +} + +// IsNegative returns true if the actual value is less than zero. +func (n *Number) IsNegative(kind Kind) bool { + return n.compareWithZero(kind) < 0 +} + +// IsZero returns true if the actual value is equal to zero. +func (n *Number) IsZero(kind Kind) bool { + return n.compareWithZero(kind) == 0 +} + +// - misc + +// Emit returns a string representation of the raw value of the +// Number. A %d is used for integral values, %f for floating point +// values. +func (n *Number) Emit(kind Kind) string { + switch kind { + case Int64Kind: + return fmt.Sprintf("%d", n.AsInt64()) + case Float64Kind: + return fmt.Sprintf("%f", n.AsFloat64()) + default: + return "" + } +} + +// AsInterface returns the number as an interface{}, typically used +// for Kind-correct JSON conversion. +func (n *Number) AsInterface(kind Kind) interface{} { + switch kind { + case Int64Kind: + return n.AsInt64() + case Float64Kind: + return n.AsFloat64() + default: + return math.NaN() + } +} + +// - private stuff + +func (n *Number) compareWithZero(kind Kind) int { + switch kind { + case Int64Kind: + return n.CompareInt64(0) + case Float64Kind: + return n.CompareFloat64(0.) + default: + // you get what you deserve + return 0 + } +} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go new file mode 100644 index 0000000000..14eb0532e4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/sdkapi/descriptor.go @@ -0,0 +1,70 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" + +import ( + "go.opentelemetry.io/otel/metric/number" + "go.opentelemetry.io/otel/metric/unit" +) + +// Descriptor contains all the settings that describe an instrument, +// including its name, metric kind, number kind, and the configurable +// options. +type Descriptor struct { + name string + instrumentKind InstrumentKind + numberKind number.Kind + description string + unit unit.Unit +} + +// NewDescriptor returns a Descriptor with the given contents. +func NewDescriptor(name string, ikind InstrumentKind, nkind number.Kind, description string, unit unit.Unit) Descriptor { + return Descriptor{ + name: name, + instrumentKind: ikind, + numberKind: nkind, + description: description, + unit: unit, + } +} + +// Name returns the metric instrument's name. +func (d Descriptor) Name() string { + return d.name +} + +// InstrumentKind returns the specific kind of instrument. +func (d Descriptor) InstrumentKind() InstrumentKind { + return d.instrumentKind +} + +// Description provides a human-readable description of the metric +// instrument. +func (d Descriptor) Description() string { + return d.description +} + +// Unit describes the units of the metric instrument. Unitless +// metrics return the empty string. +func (d Descriptor) Unit() unit.Unit { + return d.unit +} + +// NumberKind returns whether this instrument is declared over int64, +// float64, or uint64 values. +func (d Descriptor) NumberKind() number.Kind { + return d.numberKind +} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go new file mode 100644 index 0000000000..64aa5ead12 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind.go @@ -0,0 +1,80 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:generate stringer -type=InstrumentKind + +package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" + +// InstrumentKind describes the kind of instrument. +type InstrumentKind int8 + +const ( + // HistogramInstrumentKind indicates a Histogram instrument. + HistogramInstrumentKind InstrumentKind = iota + // GaugeObserverInstrumentKind indicates an GaugeObserver instrument. + GaugeObserverInstrumentKind + + // CounterInstrumentKind indicates a Counter instrument. + CounterInstrumentKind + // UpDownCounterInstrumentKind indicates a UpDownCounter instrument. + UpDownCounterInstrumentKind + + // CounterObserverInstrumentKind indicates a CounterObserver instrument. + CounterObserverInstrumentKind + // UpDownCounterObserverInstrumentKind indicates a UpDownCounterObserver + // instrument. + UpDownCounterObserverInstrumentKind +) + +// Synchronous returns whether this is a synchronous kind of instrument. +func (k InstrumentKind) Synchronous() bool { + switch k { + case CounterInstrumentKind, UpDownCounterInstrumentKind, HistogramInstrumentKind: + return true + } + return false +} + +// Asynchronous returns whether this is an asynchronous kind of instrument. +func (k InstrumentKind) Asynchronous() bool { + return !k.Synchronous() +} + +// Adding returns whether this kind of instrument adds its inputs (as opposed to Grouping). +func (k InstrumentKind) Adding() bool { + switch k { + case CounterInstrumentKind, UpDownCounterInstrumentKind, CounterObserverInstrumentKind, UpDownCounterObserverInstrumentKind: + return true + } + return false +} + +// Grouping returns whether this kind of instrument groups its inputs (as opposed to Adding). +func (k InstrumentKind) Grouping() bool { + return !k.Adding() +} + +// Monotonic returns whether this kind of instrument exposes a non-decreasing sum. +func (k InstrumentKind) Monotonic() bool { + switch k { + case CounterInstrumentKind, CounterObserverInstrumentKind: + return true + } + return false +} + +// PrecomputedSum returns whether this kind of instrument receives precomputed sums. +func (k InstrumentKind) PrecomputedSum() bool { + return k.Adding() && k.Asynchronous() +} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go new file mode 100644 index 0000000000..3a2e79d823 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/sdkapi/instrumentkind_string.go @@ -0,0 +1,28 @@ +// Code generated by "stringer -type=InstrumentKind"; DO NOT EDIT. + +package sdkapi + +import "strconv" + +func _() { + // An "invalid array index" compiler error signifies that the constant values have changed. + // Re-run the stringer command to generate them again. + var x [1]struct{} + _ = x[HistogramInstrumentKind-0] + _ = x[GaugeObserverInstrumentKind-1] + _ = x[CounterInstrumentKind-2] + _ = x[UpDownCounterInstrumentKind-3] + _ = x[CounterObserverInstrumentKind-4] + _ = x[UpDownCounterObserverInstrumentKind-5] +} + +const _InstrumentKind_name = "HistogramInstrumentKindGaugeObserverInstrumentKindCounterInstrumentKindUpDownCounterInstrumentKindCounterObserverInstrumentKindUpDownCounterObserverInstrumentKind" + +var _InstrumentKind_index = [...]uint8{0, 23, 50, 71, 98, 127, 162} + +func (i InstrumentKind) String() string { + if i < 0 || i >= InstrumentKind(len(_InstrumentKind_index)-1) { + return "InstrumentKind(" + strconv.FormatInt(int64(i), 10) + ")" + } + return _InstrumentKind_name[_InstrumentKind_index[i]:_InstrumentKind_index[i+1]] +} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go new file mode 100644 index 0000000000..f22895dae6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/sdkapi/noop.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/number" +) + +type noopInstrument struct { + descriptor Descriptor +} +type noopSyncInstrument struct{ noopInstrument } +type noopAsyncInstrument struct{ noopInstrument } + +var _ SyncImpl = noopSyncInstrument{} +var _ AsyncImpl = noopAsyncInstrument{} + +// NewNoopSyncInstrument returns a No-op implementation of the +// synchronous instrument interface. +func NewNoopSyncInstrument() SyncImpl { + return noopSyncInstrument{ + noopInstrument{ + descriptor: Descriptor{ + instrumentKind: CounterInstrumentKind, + }, + }, + } +} + +// NewNoopAsyncInstrument returns a No-op implementation of the +// asynchronous instrument interface. +func NewNoopAsyncInstrument() AsyncImpl { + return noopAsyncInstrument{ + noopInstrument{ + descriptor: Descriptor{ + instrumentKind: CounterObserverInstrumentKind, + }, + }, + } +} + +func (noopInstrument) Implementation() interface{} { + return nil +} + +func (n noopInstrument) Descriptor() Descriptor { + return n.descriptor +} + +func (noopSyncInstrument) RecordOne(context.Context, number.Number, []attribute.KeyValue) { +} diff --git a/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go b/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go new file mode 100644 index 0000000000..36836364bd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/sdkapi/sdkapi.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sdkapi // import "go.opentelemetry.io/otel/metric/sdkapi" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/metric/number" +) + +// MeterImpl is the interface an SDK must implement to supply a Meter +// implementation. +type MeterImpl interface { + // RecordBatch atomically records a batch of measurements. + RecordBatch(ctx context.Context, labels []attribute.KeyValue, measurement ...Measurement) + + // NewSyncInstrument returns a newly constructed + // synchronous instrument implementation or an error, should + // one occur. + NewSyncInstrument(descriptor Descriptor) (SyncImpl, error) + + // NewAsyncInstrument returns a newly constructed + // asynchronous instrument implementation or an error, should + // one occur. + NewAsyncInstrument( + descriptor Descriptor, + runner AsyncRunner, + ) (AsyncImpl, error) +} + +// InstrumentImpl is a common interface for synchronous and +// asynchronous instruments. +type InstrumentImpl interface { + // Implementation returns the underlying implementation of the + // instrument, which allows the implementation to gain access + // to its own representation especially from a `Measurement`. + Implementation() interface{} + + // Descriptor returns a copy of the instrument's Descriptor. + Descriptor() Descriptor +} + +// SyncImpl is the implementation-level interface to a generic +// synchronous instrument (e.g., Histogram and Counter instruments). +type SyncImpl interface { + InstrumentImpl + + // RecordOne captures a single synchronous metric event. + RecordOne(ctx context.Context, number number.Number, labels []attribute.KeyValue) +} + +// AsyncImpl is an implementation-level interface to an +// asynchronous instrument (e.g., Observer instruments). +type AsyncImpl interface { + InstrumentImpl +} + +// AsyncRunner is expected to convert into an AsyncSingleRunner or an +// AsyncBatchRunner. SDKs will encounter an error if the AsyncRunner +// does not satisfy one of these interfaces. +type AsyncRunner interface { + // AnyRunner is a non-exported method with no functional use + // other than to make this a non-empty interface. + AnyRunner() +} + +// AsyncSingleRunner is an interface implemented by single-observer +// callbacks. +type AsyncSingleRunner interface { + // Run accepts a single instrument and function for capturing + // observations of that instrument. Each call to the function + // receives one captured observation. (The function accepts + // multiple observations so the same implementation can be + // used for batch runners.) + Run(ctx context.Context, single AsyncImpl, capture func([]attribute.KeyValue, ...Observation)) + + AsyncRunner +} + +// AsyncBatchRunner is an interface implemented by batch-observer +// callbacks. +type AsyncBatchRunner interface { + // Run accepts a function for capturing observations of + // multiple instruments. + Run(ctx context.Context, capture func([]attribute.KeyValue, ...Observation)) + + AsyncRunner +} + +// NewMeasurement constructs a single observation, a binding between +// an asynchronous instrument and a number. +func NewMeasurement(instrument SyncImpl, number number.Number) Measurement { + return Measurement{ + instrument: instrument, + number: number, + } +} + +// Measurement is a low-level type used with synchronous instruments +// as a direct interface to the SDK via `RecordBatch`. +type Measurement struct { + // number needs to be aligned for 64-bit atomic operations. + number number.Number + instrument SyncImpl +} + +// SyncImpl returns the instrument that created this measurement. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (m Measurement) SyncImpl() SyncImpl { + return m.instrument +} + +// Number returns a number recorded in this measurement. +func (m Measurement) Number() number.Number { + return m.number +} + +// NewObservation constructs a single observation, a binding between +// an asynchronous instrument and a number. +func NewObservation(instrument AsyncImpl, number number.Number) Observation { + return Observation{ + instrument: instrument, + number: number, + } +} + +// Observation is a low-level type used with asynchronous instruments +// as a direct interface to the SDK via `BatchObserver`. +type Observation struct { + // number needs to be aligned for 64-bit atomic operations. + number number.Number + instrument AsyncImpl +} + +// AsyncImpl returns the instrument that created this observation. +// This returns an implementation-level object for use by the SDK, +// users should not refer to this. +func (m Observation) AsyncImpl() AsyncImpl { + return m.instrument +} + +// Number returns a number recorded in this observation. +func (m Observation) Number() number.Number { + return m.number +} diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/doc.go b/vendor/go.opentelemetry.io/otel/metric/unit/doc.go new file mode 100644 index 0000000000..f8e723593e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/unit/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package unit provides units. +// +// This package is currently in a pre-GA phase. Backwards incompatible changes +// may be introduced in subsequent minor version releases as we work to track +// the evolving OpenTelemetry specification and user feedback. +package unit // import "go.opentelemetry.io/otel/metric/unit" diff --git a/vendor/go.opentelemetry.io/otel/metric/unit/unit.go b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go new file mode 100644 index 0000000000..4615eb16f6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/metric/unit/unit.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package unit // import "go.opentelemetry.io/otel/metric/unit" + +type Unit string + +// Units defined by OpenTelemetry. +const ( + Dimensionless Unit = "1" + Bytes Unit = "By" + Milliseconds Unit = "ms" +) diff --git a/vendor/go.opentelemetry.io/otel/propagation.go b/vendor/go.opentelemetry.io/otel/propagation.go new file mode 100644 index 0000000000..d29aaa32c0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation.go @@ -0,0 +1,31 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/propagation" +) + +// GetTextMapPropagator returns the global TextMapPropagator. If none has been +// set, a No-Op TextMapPropagator is returned. +func GetTextMapPropagator() propagation.TextMapPropagator { + return global.TextMapPropagator() +} + +// SetTextMapPropagator sets propagator as the global TextMapPropagator. +func SetTextMapPropagator(propagator propagation.TextMapPropagator) { + global.SetTextMapPropagator(propagator) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/baggage.go b/vendor/go.opentelemetry.io/otel/propagation/baggage.go new file mode 100644 index 0000000000..303cdf1cbf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/baggage.go @@ -0,0 +1,58 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + + "go.opentelemetry.io/otel/baggage" +) + +const baggageHeader = "baggage" + +// Baggage is a propagator that supports the W3C Baggage format. +// +// This propagates user-defined baggage associated with a trace. The complete +// specification is defined at https://www.w3.org/TR/baggage/. +type Baggage struct{} + +var _ TextMapPropagator = Baggage{} + +// Inject sets baggage key-values from ctx into the carrier. +func (b Baggage) Inject(ctx context.Context, carrier TextMapCarrier) { + bStr := baggage.FromContext(ctx).String() + if bStr != "" { + carrier.Set(baggageHeader, bStr) + } +} + +// Extract returns a copy of parent with the baggage from the carrier added. +func (b Baggage) Extract(parent context.Context, carrier TextMapCarrier) context.Context { + bStr := carrier.Get(baggageHeader) + if bStr == "" { + return parent + } + + bag, err := baggage.Parse(bStr) + if err != nil { + return parent + } + return baggage.ContextWithBaggage(parent, bag) +} + +// Fields returns the keys who's values are set with Inject. +func (b Baggage) Fields() []string { + return []string{baggageHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/doc.go b/vendor/go.opentelemetry.io/otel/propagation/doc.go new file mode 100644 index 0000000000..c119eb2858 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/doc.go @@ -0,0 +1,24 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package propagation contains OpenTelemetry context propagators. + +OpenTelemetry propagators are used to extract and inject context data from and +into messages exchanged by applications. The propagator supported by this +package is the W3C Trace Context encoding +(https://www.w3.org/TR/trace-context/), and W3C Baggage +(https://www.w3.org/TR/baggage/). +*/ +package propagation // import "go.opentelemetry.io/otel/propagation" diff --git a/vendor/go.opentelemetry.io/otel/propagation/propagation.go b/vendor/go.opentelemetry.io/otel/propagation/propagation.go new file mode 100644 index 0000000000..c94438f73a --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/propagation.go @@ -0,0 +1,153 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "net/http" +) + +// TextMapCarrier is the storage medium used by a TextMapPropagator. +type TextMapCarrier interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Get returns the value associated with the passed key. + Get(key string) string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Set stores the key-value pair. + Set(key string, value string) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Keys lists the keys stored in this carrier. + Keys() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// MapCarrier is a TextMapCarrier that uses a map held in memory as a storage +// medium for propagated key-value pairs. +type MapCarrier map[string]string + +// Compile time check that MapCarrier implements the TextMapCarrier. +var _ TextMapCarrier = MapCarrier{} + +// Get returns the value associated with the passed key. +func (c MapCarrier) Get(key string) string { + return c[key] +} + +// Set stores the key-value pair. +func (c MapCarrier) Set(key, value string) { + c[key] = value +} + +// Keys lists the keys stored in this carrier. +func (c MapCarrier) Keys() []string { + keys := make([]string, 0, len(c)) + for k := range c { + keys = append(keys, k) + } + return keys +} + +// HeaderCarrier adapts http.Header to satisfy the TextMapCarrier interface. +type HeaderCarrier http.Header + +// Get returns the value associated with the passed key. +func (hc HeaderCarrier) Get(key string) string { + return http.Header(hc).Get(key) +} + +// Set stores the key-value pair. +func (hc HeaderCarrier) Set(key string, value string) { + http.Header(hc).Set(key, value) +} + +// Keys lists the keys stored in this carrier. +func (hc HeaderCarrier) Keys() []string { + keys := make([]string, 0, len(hc)) + for k := range hc { + keys = append(keys, k) + } + return keys +} + +// TextMapPropagator propagates cross-cutting concerns as key-value text +// pairs within a carrier that travels in-band across process boundaries. +type TextMapPropagator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Inject set cross-cutting concerns from the Context into the carrier. + Inject(ctx context.Context, carrier TextMapCarrier) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Extract reads cross-cutting concerns from the carrier into a Context. + Extract(ctx context.Context, carrier TextMapCarrier) context.Context + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Fields returns the keys whose values are set with Inject. + Fields() []string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type compositeTextMapPropagator []TextMapPropagator + +func (p compositeTextMapPropagator) Inject(ctx context.Context, carrier TextMapCarrier) { + for _, i := range p { + i.Inject(ctx, carrier) + } +} + +func (p compositeTextMapPropagator) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + for _, i := range p { + ctx = i.Extract(ctx, carrier) + } + return ctx +} + +func (p compositeTextMapPropagator) Fields() []string { + unique := make(map[string]struct{}) + for _, i := range p { + for _, k := range i.Fields() { + unique[k] = struct{}{} + } + } + + fields := make([]string, 0, len(unique)) + for k := range unique { + fields = append(fields, k) + } + return fields +} + +// NewCompositeTextMapPropagator returns a unified TextMapPropagator from the +// group of passed TextMapPropagator. This allows different cross-cutting +// concerns to be propagates in a unified manner. +// +// The returned TextMapPropagator will inject and extract cross-cutting +// concerns in the order the TextMapPropagators were provided. Additionally, +// the Fields method will return a de-duplicated slice of the keys that are +// set with the Inject method. +func NewCompositeTextMapPropagator(p ...TextMapPropagator) TextMapPropagator { + return compositeTextMapPropagator(p) +} diff --git a/vendor/go.opentelemetry.io/otel/propagation/trace_context.go b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go new file mode 100644 index 0000000000..902692da08 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/propagation/trace_context.go @@ -0,0 +1,159 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package propagation // import "go.opentelemetry.io/otel/propagation" + +import ( + "context" + "encoding/hex" + "fmt" + "regexp" + + "go.opentelemetry.io/otel/trace" +) + +const ( + supportedVersion = 0 + maxVersion = 254 + traceparentHeader = "traceparent" + tracestateHeader = "tracestate" +) + +// TraceContext is a propagator that supports the W3C Trace Context format +// (https://www.w3.org/TR/trace-context/) +// +// This propagator will propagate the traceparent and tracestate headers to +// guarantee traces are not broken. It is up to the users of this propagator +// to choose if they want to participate in a trace by modifying the +// traceparent header and relevant parts of the tracestate header containing +// their proprietary information. +type TraceContext struct{} + +var _ TextMapPropagator = TraceContext{} +var traceCtxRegExp = regexp.MustCompile("^(?P[0-9a-f]{2})-(?P[a-f0-9]{32})-(?P[a-f0-9]{16})-(?P[a-f0-9]{2})(?:-.*)?$") + +// Inject set tracecontext from the Context into the carrier. +func (tc TraceContext) Inject(ctx context.Context, carrier TextMapCarrier) { + sc := trace.SpanContextFromContext(ctx) + if !sc.IsValid() { + return + } + + if ts := sc.TraceState().String(); ts != "" { + carrier.Set(tracestateHeader, ts) + } + + // Clear all flags other than the trace-context supported sampling bit. + flags := sc.TraceFlags() & trace.FlagsSampled + + h := fmt.Sprintf("%.2x-%s-%s-%s", + supportedVersion, + sc.TraceID(), + sc.SpanID(), + flags) + carrier.Set(traceparentHeader, h) +} + +// Extract reads tracecontext from the carrier into a returned Context. +// +// The returned Context will be a copy of ctx and contain the extracted +// tracecontext as the remote SpanContext. If the extracted tracecontext is +// invalid, the passed ctx will be returned directly instead. +func (tc TraceContext) Extract(ctx context.Context, carrier TextMapCarrier) context.Context { + sc := tc.extract(carrier) + if !sc.IsValid() { + return ctx + } + return trace.ContextWithRemoteSpanContext(ctx, sc) +} + +func (tc TraceContext) extract(carrier TextMapCarrier) trace.SpanContext { + h := carrier.Get(traceparentHeader) + if h == "" { + return trace.SpanContext{} + } + + matches := traceCtxRegExp.FindStringSubmatch(h) + + if len(matches) == 0 { + return trace.SpanContext{} + } + + if len(matches) < 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[1]) != 2 { + return trace.SpanContext{} + } + ver, err := hex.DecodeString(matches[1]) + if err != nil { + return trace.SpanContext{} + } + version := int(ver[0]) + if version > maxVersion { + return trace.SpanContext{} + } + + if version == 0 && len(matches) != 5 { // four subgroups plus the overall match + return trace.SpanContext{} + } + + if len(matches[2]) != 32 { + return trace.SpanContext{} + } + + var scc trace.SpanContextConfig + + scc.TraceID, err = trace.TraceIDFromHex(matches[2][:32]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[3]) != 16 { + return trace.SpanContext{} + } + scc.SpanID, err = trace.SpanIDFromHex(matches[3]) + if err != nil { + return trace.SpanContext{} + } + + if len(matches[4]) != 2 { + return trace.SpanContext{} + } + opts, err := hex.DecodeString(matches[4]) + if err != nil || len(opts) < 1 || (version == 0 && opts[0] > 2) { + return trace.SpanContext{} + } + // Clear all flags other than the trace-context supported sampling bit. + scc.TraceFlags = trace.TraceFlags(opts[0]) & trace.FlagsSampled + + // Ignore the error returned here. Failure to parse tracestate MUST NOT + // affect the parsing of traceparent according to the W3C tracecontext + // specification. + scc.TraceState, _ = trace.ParseTraceState(carrier.Get(tracestateHeader)) + scc.Remote = true + + sc := trace.NewSpanContext(scc) + if !sc.IsValid() { + return trace.SpanContext{} + } + + return sc +} + +// Fields returns the keys who's values are set with Inject. +func (tc TraceContext) Fields() []string { + return []string{traceparentHeader, tracestateHeader} +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/LICENSE b/vendor/go.opentelemetry.io/otel/sdk/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go new file mode 100644 index 0000000000..6f0016169e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/instrumentation/library.go @@ -0,0 +1,33 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package instrumentation provides an instrumentation library structure to be +passed to both the OpenTelemetry Tracer and Meter components. + +For more information see +[this](https://github.com/open-telemetry/oteps/blob/main/text/0083-component.md). +*/ +package instrumentation // import "go.opentelemetry.io/otel/sdk/instrumentation" + +// Library represents the instrumentation library. +type Library struct { + // Name is the name of the instrumentation library. This should be the + // Go package name of that library. + Name string + // Version is the version of the instrumentation library. + Version string + // SchemaURL of the telemetry emitted by the library. + SchemaURL string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go new file mode 100644 index 0000000000..397fd9593c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/env/env.go @@ -0,0 +1,88 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package env // import "go.opentelemetry.io/otel/sdk/internal/env" + +import ( + "os" + "strconv" + + "go.opentelemetry.io/otel/internal/global" +) + +// Environment variable names +const ( + // BatchSpanProcessorScheduleDelayKey + // Delay interval between two consecutive exports. + // i.e. 5000 + BatchSpanProcessorScheduleDelayKey = "OTEL_BSP_SCHEDULE_DELAY" + // BatchSpanProcessorExportTimeoutKey + // Maximum allowed time to export data. + // i.e. 3000 + BatchSpanProcessorExportTimeoutKey = "OTEL_BSP_EXPORT_TIMEOUT" + // BatchSpanProcessorMaxQueueSizeKey + // Maximum queue size + // i.e. 2048 + BatchSpanProcessorMaxQueueSizeKey = "OTEL_BSP_MAX_QUEUE_SIZE" + // BatchSpanProcessorMaxExportBatchSizeKey + // Maximum batch size + // Note: Must be less than or equal to EnvBatchSpanProcessorMaxQueueSize + // i.e. 512 + BatchSpanProcessorMaxExportBatchSizeKey = "OTEL_BSP_MAX_EXPORT_BATCH_SIZE" +) + +// IntEnvOr returns the int value of the environment variable with name key if +// it exists and the value is an int. Otherwise, defaultValue is returned. +func IntEnvOr(key string, defaultValue int) int { + value, ok := os.LookupEnv(key) + if !ok { + return defaultValue + } + + intValue, err := strconv.Atoi(value) + if err != nil { + global.Info("Got invalid value, number value expected.", key, value) + return defaultValue + } + + return intValue +} + +// BatchSpanProcessorScheduleDelay returns the environment variable value for +// the OTEL_BSP_SCHEDULE_DELAY key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorScheduleDelay(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorScheduleDelayKey, defaultValue) +} + +// BatchSpanProcessorExportTimeout returns the environment variable value for +// the OTEL_BSP_EXPORT_TIMEOUT key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorExportTimeout(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorExportTimeoutKey, defaultValue) +} + +// BatchSpanProcessorMaxQueueSize returns the environment variable value for +// the OTEL_BSP_MAX_QUEUE_SIZE key if it exists, otherwise defaultValue is +// returned. +func BatchSpanProcessorMaxQueueSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxQueueSizeKey, defaultValue) +} + +// BatchSpanProcessorMaxExportBatchSize returns the environment variable value for +// the OTEL_BSP_MAX_EXPORT_BATCH_SIZE key if it exists, otherwise defaultValue +// is returned. +func BatchSpanProcessorMaxExportBatchSize(defaultValue int) int { + return IntEnvOr(BatchSpanProcessorMaxExportBatchSizeKey, defaultValue) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go new file mode 100644 index 0000000000..84a02306e6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/internal/internal.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package internal // import "go.opentelemetry.io/otel/sdk/internal" + +import ( + "fmt" + "time" + + "go.opentelemetry.io/otel" +) + +// UserAgent is the user agent to be added to the outgoing +// requests from the exporters. +var UserAgent = fmt.Sprintf("opentelemetry-go/%s", otel.Version()) + +// MonotonicEndTime returns the end time at present +// but offset from start, monotonically. +// +// The monotonic clock is used in subtractions hence +// the duration since start added back to start gives +// end as a monotonic time. +// See https://golang.org/pkg/time/#hdr-Monotonic_Clocks +func MonotonicEndTime(start time.Time) time.Time { + return start.Add(time.Since(start)) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go new file mode 100644 index 0000000000..a5eaa7e5d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/auto.go @@ -0,0 +1,72 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" +) + +var ( + // ErrPartialResource is returned by a detector when complete source + // information for a Resource is unavailable or the source information + // contains invalid values that are omitted from the returned Resource. + ErrPartialResource = errors.New("partial resource") +) + +// Detector detects OpenTelemetry resource information +type Detector interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Detect returns an initialized Resource based on gathered information. + // If the source information to construct a Resource contains invalid + // values, a Resource is returned with the valid parts of the source + // information used for initialization along with an appropriately + // wrapped ErrPartialResource error. + Detect(ctx context.Context) (*Resource, error) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// Detect calls all input detectors sequentially and merges each result with the previous one. +// It returns the merged error too. +func Detect(ctx context.Context, detectors ...Detector) (*Resource, error) { + var autoDetectedRes *Resource + var errInfo []string + for _, detector := range detectors { + if detector == nil { + continue + } + res, err := detector.Detect(ctx) + if err != nil { + errInfo = append(errInfo, err.Error()) + if !errors.Is(err, ErrPartialResource) { + continue + } + } + autoDetectedRes, err = Merge(autoDetectedRes, res) + if err != nil { + errInfo = append(errInfo, err.Error()) + } + } + + var aggregatedError error + if len(errInfo) > 0 { + aggregatedError = fmt.Errorf("detecting resources: %s", errInfo) + } + return autoDetectedRes, aggregatedError +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go new file mode 100644 index 0000000000..701eae40a3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/builtin.go @@ -0,0 +1,108 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "path/filepath" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +type ( + // telemetrySDK is a Detector that provides information about + // the OpenTelemetry SDK used. This Detector is included as a + // builtin. If these resource attributes are not wanted, use + // the WithTelemetrySDK(nil) or WithoutBuiltin() options to + // explicitly disable them. + telemetrySDK struct{} + + // host is a Detector that provides information about the host + // being run on. This Detector is included as a builtin. If + // these resource attributes are not wanted, use the + // WithHost(nil) or WithoutBuiltin() options to explicitly + // disable them. + host struct{} + + stringDetector struct { + schemaURL string + K attribute.Key + F func() (string, error) + } + + defaultServiceNameDetector struct{} +) + +var ( + _ Detector = telemetrySDK{} + _ Detector = host{} + _ Detector = stringDetector{} + _ Detector = defaultServiceNameDetector{} +) + +// Detect returns a *Resource that describes the OpenTelemetry SDK used. +func (telemetrySDK) Detect(context.Context) (*Resource, error) { + return NewWithAttributes( + semconv.SchemaURL, + semconv.TelemetrySDKNameKey.String("opentelemetry"), + semconv.TelemetrySDKLanguageKey.String("go"), + semconv.TelemetrySDKVersionKey.String(otel.Version()), + ), nil +} + +// Detect returns a *Resource that describes the host being run on. +func (host) Detect(ctx context.Context) (*Resource, error) { + return StringDetector(semconv.SchemaURL, semconv.HostNameKey, os.Hostname).Detect(ctx) +} + +// StringDetector returns a Detector that will produce a *Resource +// containing the string as a value corresponding to k. The resulting Resource +// will have the specified schemaURL. +func StringDetector(schemaURL string, k attribute.Key, f func() (string, error)) Detector { + return stringDetector{schemaURL: schemaURL, K: k, F: f} +} + +// Detect returns a *Resource that describes the string as a value +// corresponding to attribute.Key as well as the specific schemaURL. +func (sd stringDetector) Detect(ctx context.Context) (*Resource, error) { + value, err := sd.F() + if err != nil { + return nil, fmt.Errorf("%s: %w", string(sd.K), err) + } + a := sd.K.String(value) + if !a.Valid() { + return nil, fmt.Errorf("invalid attribute: %q -> %q", a.Key, a.Value.Emit()) + } + return NewWithAttributes(sd.schemaURL, sd.K.String(value)), nil +} + +// Detect implements Detector +func (defaultServiceNameDetector) Detect(ctx context.Context) (*Resource, error) { + return StringDetector( + semconv.SchemaURL, + semconv.ServiceNameKey, + func() (string, error) { + executable, err := os.Executable() + if err != nil { + return "unknown_service:go", nil + } + return "unknown_service:" + filepath.Base(executable), nil + }, + ).Detect(ctx) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/config.go b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go new file mode 100644 index 0000000000..d80b5ae621 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/config.go @@ -0,0 +1,173 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" +) + +// config contains configuration for Resource creation. +type config struct { + // detectors that will be evaluated. + detectors []Detector + // SchemaURL to associate with the Resource. + schemaURL string +} + +// Option is the interface that applies a configuration option. +type Option interface { + // apply sets the Option value of a config. + apply(config) config +} + +// WithAttributes adds attributes to the configured Resource. +func WithAttributes(attributes ...attribute.KeyValue) Option { + return WithDetectors(detectAttributes{attributes}) +} + +type detectAttributes struct { + attributes []attribute.KeyValue +} + +func (d detectAttributes) Detect(context.Context) (*Resource, error) { + return NewSchemaless(d.attributes...), nil +} + +// WithDetectors adds detectors to be evaluated for the configured resource. +func WithDetectors(detectors ...Detector) Option { + return detectorsOption{detectors: detectors} +} + +type detectorsOption struct { + detectors []Detector +} + +func (o detectorsOption) apply(cfg config) config { + cfg.detectors = append(cfg.detectors, o.detectors...) + return cfg +} + +// WithFromEnv adds attributes from environment variables to the configured resource. +func WithFromEnv() Option { + return WithDetectors(fromEnv{}) +} + +// WithHost adds attributes from the host to the configured resource. +func WithHost() Option { + return WithDetectors(host{}) +} + +// WithTelemetrySDK adds TelemetrySDK version info to the configured resource. +func WithTelemetrySDK() Option { + return WithDetectors(telemetrySDK{}) +} + +// WithSchemaURL sets the schema URL for the configured resource. +func WithSchemaURL(schemaURL string) Option { + return schemaURLOption(schemaURL) +} + +type schemaURLOption string + +func (o schemaURLOption) apply(cfg config) config { + cfg.schemaURL = string(o) + return cfg +} + +// WithOS adds all the OS attributes to the configured Resource. +// See individual WithOS* functions to configure specific attributes. +func WithOS() Option { + return WithDetectors( + osTypeDetector{}, + osDescriptionDetector{}, + ) +} + +// WithOSType adds an attribute with the operating system type to the configured Resource. +func WithOSType() Option { + return WithDetectors(osTypeDetector{}) +} + +// WithOSDescription adds an attribute with the operating system description to the +// configured Resource. The formatted string is equivalent to the output of the +// `uname -snrvm` command. +func WithOSDescription() Option { + return WithDetectors(osDescriptionDetector{}) +} + +// WithProcess adds all the Process attributes to the configured Resource. +// See individual WithProcess* functions to configure specific attributes. +func WithProcess() Option { + return WithDetectors( + processPIDDetector{}, + processExecutableNameDetector{}, + processExecutablePathDetector{}, + processCommandArgsDetector{}, + processOwnerDetector{}, + processRuntimeNameDetector{}, + processRuntimeVersionDetector{}, + processRuntimeDescriptionDetector{}, + ) +} + +// WithProcessPID adds an attribute with the process identifier (PID) to the +// configured Resource. +func WithProcessPID() Option { + return WithDetectors(processPIDDetector{}) +} + +// WithProcessExecutableName adds an attribute with the name of the process +// executable to the configured Resource. +func WithProcessExecutableName() Option { + return WithDetectors(processExecutableNameDetector{}) +} + +// WithProcessExecutablePath adds an attribute with the full path to the process +// executable to the configured Resource. +func WithProcessExecutablePath() Option { + return WithDetectors(processExecutablePathDetector{}) +} + +// WithProcessCommandArgs adds an attribute with all the command arguments (including +// the command/executable itself) as received by the process the configured Resource. +func WithProcessCommandArgs() Option { + return WithDetectors(processCommandArgsDetector{}) +} + +// WithProcessOwner adds an attribute with the username of the user that owns the process +// to the configured Resource. +func WithProcessOwner() Option { + return WithDetectors(processOwnerDetector{}) +} + +// WithProcessRuntimeName adds an attribute with the name of the runtime of this +// process to the configured Resource. +func WithProcessRuntimeName() Option { + return WithDetectors(processRuntimeNameDetector{}) +} + +// WithProcessRuntimeVersion adds an attribute with the version of the runtime of +// this process to the configured Resource. +func WithProcessRuntimeVersion() Option { + return WithDetectors(processRuntimeVersionDetector{}) +} + +// WithProcessRuntimeDescription adds an attribute with an additional description +// about the runtime of the process to the configured Resource. +func WithProcessRuntimeDescription() Option { + return WithDetectors(processRuntimeDescriptionDetector{}) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go new file mode 100644 index 0000000000..9aab3d8393 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/doc.go @@ -0,0 +1,28 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package resource provides detecting and representing resources. +// +// The fundamental struct is a Resource which holds identifying information +// about the entities for which telemetry is exported. +// +// To automatically construct Resources from an environment a Detector +// interface is defined. Implementations of this interface can be passed to +// the Detect function to generate a Resource from the merged information. +// +// To load a user defined Resource from the environment variable +// OTEL_RESOURCE_ATTRIBUTES the FromEnv Detector can be used. It will interpret +// the value as a list of comma delimited key/value pairs +// (e.g. `=,=,...`). +package resource // import "go.opentelemetry.io/otel/sdk/resource" diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/env.go b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go new file mode 100644 index 0000000000..9392296cba --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/env.go @@ -0,0 +1,99 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +const ( + // resourceAttrKey is the environment variable name OpenTelemetry Resource information will be read from. + resourceAttrKey = "OTEL_RESOURCE_ATTRIBUTES" + + // svcNameKey is the environment variable name that Service Name information will be read from. + svcNameKey = "OTEL_SERVICE_NAME" +) + +var ( + // errMissingValue is returned when a resource value is missing. + errMissingValue = fmt.Errorf("%w: missing value", ErrPartialResource) +) + +// fromEnv is a Detector that implements the Detector and collects +// resources from environment. This Detector is included as a +// builtin. +type fromEnv struct{} + +// compile time assertion that FromEnv implements Detector interface +var _ Detector = fromEnv{} + +// Detect collects resources from environment +func (fromEnv) Detect(context.Context) (*Resource, error) { + attrs := strings.TrimSpace(os.Getenv(resourceAttrKey)) + svcName := strings.TrimSpace(os.Getenv(svcNameKey)) + + if attrs == "" && svcName == "" { + return Empty(), nil + } + + var res *Resource + + if svcName != "" { + res = NewSchemaless(semconv.ServiceNameKey.String(svcName)) + } + + r2, err := constructOTResources(attrs) + + // Ensure that the resource with the service name from OTEL_SERVICE_NAME + // takes precedence, if it was defined. + res, err2 := Merge(r2, res) + + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return res, err +} + +func constructOTResources(s string) (*Resource, error) { + if s == "" { + return Empty(), nil + } + pairs := strings.Split(s, ",") + attrs := []attribute.KeyValue{} + var invalid []string + for _, p := range pairs { + field := strings.SplitN(p, "=", 2) + if len(field) != 2 { + invalid = append(invalid, p) + continue + } + k, v := strings.TrimSpace(field[0]), strings.TrimSpace(field[1]) + attrs = append(attrs, attribute.String(k, v)) + } + var err error + if len(invalid) > 0 { + err = fmt.Errorf("%w: %v", errMissingValue, invalid) + } + return NewSchemaless(attrs...), err +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go new file mode 100644 index 0000000000..59329770cf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os.go @@ -0,0 +1,97 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "strings" + + "go.opentelemetry.io/otel/attribute" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +type osDescriptionProvider func() (string, error) + +var defaultOSDescriptionProvider osDescriptionProvider = platformOSDescription + +var osDescription = defaultOSDescriptionProvider + +func setDefaultOSDescriptionProvider() { + setOSDescriptionProvider(defaultOSDescriptionProvider) +} + +func setOSDescriptionProvider(osDescriptionProvider osDescriptionProvider) { + osDescription = osDescriptionProvider +} + +type osTypeDetector struct{} +type osDescriptionDetector struct{} + +// Detect returns a *Resource that describes the operating system type the +// service is running on. +func (osTypeDetector) Detect(ctx context.Context) (*Resource, error) { + osType := runtimeOS() + + osTypeAttribute := mapRuntimeOSToSemconvOSType(osType) + + return NewWithAttributes( + semconv.SchemaURL, + osTypeAttribute, + ), nil +} + +// Detect returns a *Resource that describes the operating system the +// service is running on. +func (osDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + description, err := osDescription() + + if err != nil { + return nil, err + } + + return NewWithAttributes( + semconv.SchemaURL, + semconv.OSDescriptionKey.String(description), + ), nil +} + +// mapRuntimeOSToSemconvOSType translates the OS name as provided by the Go runtime +// into an OS type attribute with the corresponding value defined by the semantic +// conventions. In case the provided OS name isn't mapped, it's transformed to lowercase +// and used as the value for the returned OS type attribute. +func mapRuntimeOSToSemconvOSType(osType string) attribute.KeyValue { + // the elements in this map are the intersection between + // available GOOS values and defined semconv OS types + osTypeAttributeMap := map[string]attribute.KeyValue{ + "darwin": semconv.OSTypeDarwin, + "dragonfly": semconv.OSTypeDragonflyBSD, + "freebsd": semconv.OSTypeFreeBSD, + "linux": semconv.OSTypeLinux, + "netbsd": semconv.OSTypeNetBSD, + "openbsd": semconv.OSTypeOpenBSD, + "solaris": semconv.OSTypeSolaris, + "windows": semconv.OSTypeWindows, + } + + var osTypeAttribute attribute.KeyValue + + if attr, ok := osTypeAttributeMap[osType]; ok { + osTypeAttribute = attr + } else { + osTypeAttribute = semconv.OSTypeKey.String(strings.ToLower(osType)) + } + + return osTypeAttribute +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go new file mode 100644 index 0000000000..24ec85793d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_darwin.go @@ -0,0 +1,102 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "encoding/xml" + "fmt" + "io" + "os" +) + +type plist struct { + XMLName xml.Name `xml:"plist"` + Dict dict `xml:"dict"` +} + +type dict struct { + Key []string `xml:"key"` + String []string `xml:"string"` +} + +// osRelease builds a string describing the operating system release based on the +// contents of the property list (.plist) system files. If no .plist files are found, +// or if the required properties to build the release description string are missing, +// an empty string is returned instead. The generated string resembles the output of +// the `sw_vers` commandline program, but in a single-line string. For more information +// about the `sw_vers` program, see: https://www.unix.com/man-page/osx/1/SW_VERS. +func osRelease() string { + file, err := getPlistFile() + if err != nil { + return "" + } + + defer file.Close() + + values, err := parsePlistFile(file) + if err != nil { + return "" + } + + return buildOSRelease(values) +} + +// getPlistFile returns a *os.File pointing to one of the well-known .plist files +// available on macOS. If no file can be opened, it returns an error. +func getPlistFile() (*os.File, error) { + return getFirstAvailableFile([]string{ + "/System/Library/CoreServices/SystemVersion.plist", + "/System/Library/CoreServices/ServerVersion.plist", + }) +} + +// parsePlistFile process the file pointed by `file` as a .plist file and returns +// a map with the key-values for each pair of correlated and elements +// contained in it. +func parsePlistFile(file io.Reader) (map[string]string, error) { + var v plist + + err := xml.NewDecoder(file).Decode(&v) + if err != nil { + return nil, err + } + + if len(v.Dict.Key) != len(v.Dict.String) { + return nil, fmt.Errorf("the number of and elements doesn't match") + } + + properties := make(map[string]string, len(v.Dict.Key)) + for i, key := range v.Dict.Key { + properties[key] = v.Dict.String[i] + } + + return properties, nil +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It tries to find the `ProductName`, `ProductVersion` +// and `ProductBuildVersion` properties. If some of these properties are not found, +// it returns an empty string. +func buildOSRelease(properties map[string]string) string { + productName := properties["ProductName"] + productVersion := properties["ProductVersion"] + productBuildVersion := properties["ProductBuildVersion"] + + if productName == "" || productVersion == "" || productBuildVersion == "" { + return "" + } + + return fmt.Sprintf("%s %s (%s)", productName, productVersion, productBuildVersion) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go new file mode 100644 index 0000000000..fba6790e44 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_release_unix.go @@ -0,0 +1,154 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build aix || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bufio" + "fmt" + "io" + "os" + "strings" +) + +// osRelease builds a string describing the operating system release based on the +// properties of the os-release file. If no os-release file is found, or if the +// required properties to build the release description string are missing, an empty +// string is returned instead. For more information about os-release files, see: +// https://www.freedesktop.org/software/systemd/man/os-release.html +func osRelease() string { + file, err := getOSReleaseFile() + if err != nil { + return "" + } + + defer file.Close() + + values := parseOSReleaseFile(file) + + return buildOSRelease(values) +} + +// getOSReleaseFile returns a *os.File pointing to one of the well-known os-release +// files, according to their order of preference. If no file can be opened, it +// returns an error. +func getOSReleaseFile() (*os.File, error) { + return getFirstAvailableFile([]string{"/etc/os-release", "/usr/lib/os-release"}) +} + +// parseOSReleaseFile process the file pointed by `file` as an os-release file and +// returns a map with the key-values contained in it. Empty lines or lines starting +// with a '#' character are ignored, as well as lines with the missing key=value +// separator. Values are unquoted and unescaped. +func parseOSReleaseFile(file io.Reader) map[string]string { + values := make(map[string]string) + scanner := bufio.NewScanner(file) + + for scanner.Scan() { + line := scanner.Text() + + if skip(line) { + continue + } + + key, value, ok := parse(line) + if ok { + values[key] = value + } + } + + return values +} + +// skip returns true if the line is blank or starts with a '#' character, and +// therefore should be skipped from processing. +func skip(line string) bool { + line = strings.TrimSpace(line) + + return len(line) == 0 || strings.HasPrefix(line, "#") +} + +// parse attempts to split the provided line on the first '=' character, and then +// sanitize each side of the split before returning them as a key-value pair. +func parse(line string) (string, string, bool) { + parts := strings.SplitN(line, "=", 2) + + if len(parts) != 2 || len(parts[0]) == 0 { + return "", "", false + } + + key := strings.TrimSpace(parts[0]) + value := unescape(unquote(strings.TrimSpace(parts[1]))) + + return key, value, true +} + +// unquote checks whether the string `s` is quoted with double or single quotes +// and, if so, returns a version of the string without them. Otherwise it returns +// the provided string unchanged. +func unquote(s string) string { + if len(s) < 2 { + return s + } + + if (s[0] == '"' || s[0] == '\'') && s[0] == s[len(s)-1] { + return s[1 : len(s)-1] + } + + return s +} + +// unescape removes the `\` prefix from some characters that are expected +// to have it added in front of them for escaping purposes. +func unescape(s string) string { + return strings.NewReplacer( + `\$`, `$`, + `\"`, `"`, + `\'`, `'`, + `\\`, `\`, + "\\`", "`", + ).Replace(s) +} + +// buildOSRelease builds a string describing the OS release based on the properties +// available on the provided map. It favors a combination of the `NAME` and `VERSION` +// properties as first option (falling back to `VERSION_ID` if `VERSION` isn't +// found), and using `PRETTY_NAME` alone if some of the previous are not present. If +// none of these properties are found, it returns an empty string. +// +// The rationale behind not using `PRETTY_NAME` as first choice was that, for some +// Linux distributions, it doesn't include the same detail that can be found on the +// individual `NAME` and `VERSION` properties, and combining `PRETTY_NAME` with +// other properties can produce "pretty" redundant strings in some cases. +func buildOSRelease(values map[string]string) string { + var osRelease string + + name := values["NAME"] + version := values["VERSION"] + + if version == "" { + version = values["VERSION_ID"] + } + + if name != "" && version != "" { + osRelease = fmt.Sprintf("%s %s", name, version) + } else { + osRelease = values["PRETTY_NAME"] + } + + return osRelease +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go new file mode 100644 index 0000000000..42894a15b5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unix.go @@ -0,0 +1,100 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//go:build aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos +// +build aix darwin dragonfly freebsd linux netbsd openbsd solaris zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "bytes" + "fmt" + "os" + + "golang.org/x/sys/unix" +) + +type unameProvider func(buf *unix.Utsname) (err error) + +var defaultUnameProvider unameProvider = unix.Uname + +var currentUnameProvider = defaultUnameProvider + +func setDefaultUnameProvider() { + setUnameProvider(defaultUnameProvider) +} + +func setUnameProvider(unameProvider unameProvider) { + currentUnameProvider = unameProvider +} + +// platformOSDescription returns a human readable OS version information string. +// The final string combines OS release information (where available) and the +// result of the `uname` system call. +func platformOSDescription() (string, error) { + uname, err := uname() + if err != nil { + return "", err + } + + osRelease := osRelease() + if osRelease != "" { + return fmt.Sprintf("%s (%s)", osRelease, uname), nil + } + + return uname, nil +} + +// uname issues a uname(2) system call (or equivalent on systems which doesn't +// have one) and formats the output in a single string, similar to the output +// of the `uname` commandline program. The final string resembles the one +// obtained with a call to `uname -snrvm`. +func uname() (string, error) { + var utsName unix.Utsname + + err := currentUnameProvider(&utsName) + if err != nil { + return "", err + } + + return fmt.Sprintf("%s %s %s %s %s", + charsToString(utsName.Sysname[:]), + charsToString(utsName.Nodename[:]), + charsToString(utsName.Release[:]), + charsToString(utsName.Version[:]), + charsToString(utsName.Machine[:]), + ), nil +} + +// charsToString converts a C-like null-terminated char array to a Go string. +func charsToString(charArray []byte) string { + if i := bytes.IndexByte(charArray, 0); i >= 0 { + charArray = charArray[:i] + } + + return string(charArray) +} + +// getFirstAvailableFile returns an *os.File of the first available +// file from a list of candidate file paths. +func getFirstAvailableFile(candidates []string) (*os.File, error) { + for _, c := range candidates { + file, err := os.Open(c) + if err == nil { + return file, nil + } + } + + return nil, fmt.Errorf("no candidate file available: %v", candidates) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go new file mode 100644 index 0000000000..3ebcb534f2 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_unsupported.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// +build !aix +// +build !darwin +// +build !dragonfly +// +build !freebsd +// +build !linux +// +build !netbsd +// +build !openbsd +// +build !solaris +// +build !windows +// +build !zos + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +// platformOSDescription is a placeholder implementation for OSes +// for which this project currently doesn't support os.description +// attribute detection. See build tags declaration early on this file +// for a list of unsupported OSes. +func platformOSDescription() (string, error) { + return "", nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go new file mode 100644 index 0000000000..faad64d8da --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/os_windows.go @@ -0,0 +1,101 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "fmt" + "strconv" + + "golang.org/x/sys/windows/registry" +) + +// platformOSDescription returns a human readable OS version information string. +// It does so by querying registry values under the +// `SOFTWARE\Microsoft\Windows NT\CurrentVersion` key. The final string +// resembles the one displayed by the Version Reporter Applet (winver.exe). +func platformOSDescription() (string, error) { + k, err := registry.OpenKey( + registry.LOCAL_MACHINE, `SOFTWARE\Microsoft\Windows NT\CurrentVersion`, registry.QUERY_VALUE) + + if err != nil { + return "", err + } + + defer k.Close() + + var ( + productName = readProductName(k) + displayVersion = readDisplayVersion(k) + releaseID = readReleaseID(k) + currentMajorVersionNumber = readCurrentMajorVersionNumber(k) + currentMinorVersionNumber = readCurrentMinorVersionNumber(k) + currentBuildNumber = readCurrentBuildNumber(k) + ubr = readUBR(k) + ) + + if displayVersion != "" { + displayVersion += " " + } + + return fmt.Sprintf("%s %s(%s) [Version %s.%s.%s.%s]", + productName, + displayVersion, + releaseID, + currentMajorVersionNumber, + currentMinorVersionNumber, + currentBuildNumber, + ubr, + ), nil +} + +func getStringValue(name string, k registry.Key) string { + value, _, _ := k.GetStringValue(name) + + return value +} + +func getIntegerValue(name string, k registry.Key) uint64 { + value, _, _ := k.GetIntegerValue(name) + + return value +} + +func readProductName(k registry.Key) string { + return getStringValue("ProductName", k) +} + +func readDisplayVersion(k registry.Key) string { + return getStringValue("DisplayVersion", k) +} + +func readReleaseID(k registry.Key) string { + return getStringValue("ReleaseID", k) +} + +func readCurrentMajorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMajorVersionNumber", k), 10) +} + +func readCurrentMinorVersionNumber(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("CurrentMinorVersionNumber", k), 10) +} + +func readCurrentBuildNumber(k registry.Key) string { + return getStringValue("CurrentBuildNumber", k) +} + +func readUBR(k registry.Key) string { + return strconv.FormatUint(getIntegerValue("UBR", k), 10) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/process.go b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go new file mode 100644 index 0000000000..80d5e69932 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/process.go @@ -0,0 +1,175 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "fmt" + "os" + "os/user" + "path/filepath" + "runtime" + + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" +) + +type pidProvider func() int +type executablePathProvider func() (string, error) +type commandArgsProvider func() []string +type ownerProvider func() (*user.User, error) +type runtimeNameProvider func() string +type runtimeVersionProvider func() string +type runtimeOSProvider func() string +type runtimeArchProvider func() string + +var ( + defaultPidProvider pidProvider = os.Getpid + defaultExecutablePathProvider executablePathProvider = os.Executable + defaultCommandArgsProvider commandArgsProvider = func() []string { return os.Args } + defaultOwnerProvider ownerProvider = user.Current + defaultRuntimeNameProvider runtimeNameProvider = func() string { return runtime.Compiler } + defaultRuntimeVersionProvider runtimeVersionProvider = runtime.Version + defaultRuntimeOSProvider runtimeOSProvider = func() string { return runtime.GOOS } + defaultRuntimeArchProvider runtimeArchProvider = func() string { return runtime.GOARCH } +) + +var ( + pid = defaultPidProvider + executablePath = defaultExecutablePathProvider + commandArgs = defaultCommandArgsProvider + owner = defaultOwnerProvider + runtimeName = defaultRuntimeNameProvider + runtimeVersion = defaultRuntimeVersionProvider + runtimeOS = defaultRuntimeOSProvider + runtimeArch = defaultRuntimeArchProvider +) + +func setDefaultOSProviders() { + setOSProviders( + defaultPidProvider, + defaultExecutablePathProvider, + defaultCommandArgsProvider, + ) +} + +func setOSProviders( + pidProvider pidProvider, + executablePathProvider executablePathProvider, + commandArgsProvider commandArgsProvider, +) { + pid = pidProvider + executablePath = executablePathProvider + commandArgs = commandArgsProvider +} + +func setDefaultRuntimeProviders() { + setRuntimeProviders( + defaultRuntimeNameProvider, + defaultRuntimeVersionProvider, + defaultRuntimeOSProvider, + defaultRuntimeArchProvider, + ) +} + +func setRuntimeProviders( + runtimeNameProvider runtimeNameProvider, + runtimeVersionProvider runtimeVersionProvider, + runtimeOSProvider runtimeOSProvider, + runtimeArchProvider runtimeArchProvider, +) { + runtimeName = runtimeNameProvider + runtimeVersion = runtimeVersionProvider + runtimeOS = runtimeOSProvider + runtimeArch = runtimeArchProvider +} + +func setDefaultUserProviders() { + setUserProviders(defaultOwnerProvider) +} + +func setUserProviders(ownerProvider ownerProvider) { + owner = ownerProvider +} + +type processPIDDetector struct{} +type processExecutableNameDetector struct{} +type processExecutablePathDetector struct{} +type processCommandArgsDetector struct{} +type processOwnerDetector struct{} +type processRuntimeNameDetector struct{} +type processRuntimeVersionDetector struct{} +type processRuntimeDescriptionDetector struct{} + +// Detect returns a *Resource that describes the process identifier (PID) of the +// executing process. +func (processPIDDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessPIDKey.Int(pid())), nil +} + +// Detect returns a *Resource that describes the name of the process executable. +func (processExecutableNameDetector) Detect(ctx context.Context) (*Resource, error) { + executableName := filepath.Base(commandArgs()[0]) + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutableNameKey.String(executableName)), nil +} + +// Detect returns a *Resource that describes the full path of the process executable. +func (processExecutablePathDetector) Detect(ctx context.Context) (*Resource, error) { + executablePath, err := executablePath() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessExecutablePathKey.String(executablePath)), nil +} + +// Detect returns a *Resource that describes all the command arguments as received +// by the process. +func (processCommandArgsDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessCommandArgsKey.StringSlice(commandArgs())), nil +} + +// Detect returns a *Resource that describes the username of the user that owns the +// process. +func (processOwnerDetector) Detect(ctx context.Context) (*Resource, error) { + owner, err := owner() + if err != nil { + return nil, err + } + + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessOwnerKey.String(owner.Username)), nil +} + +// Detect returns a *Resource that describes the name of the compiler used to compile +// this process image. +func (processRuntimeNameDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeNameKey.String(runtimeName())), nil +} + +// Detect returns a *Resource that describes the version of the runtime of this process. +func (processRuntimeVersionDetector) Detect(ctx context.Context) (*Resource, error) { + return NewWithAttributes(semconv.SchemaURL, semconv.ProcessRuntimeVersionKey.String(runtimeVersion())), nil +} + +// Detect returns a *Resource that describes the runtime of this process. +func (processRuntimeDescriptionDetector) Detect(ctx context.Context) (*Resource, error) { + runtimeDescription := fmt.Sprintf( + "go version %s %s/%s", runtimeVersion(), runtimeOS(), runtimeArch()) + + return NewWithAttributes( + semconv.SchemaURL, + semconv.ProcessRuntimeDescriptionKey.String(runtimeDescription), + ), nil +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go new file mode 100644 index 0000000000..e842744ae9 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/resource/resource.go @@ -0,0 +1,280 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package resource // import "go.opentelemetry.io/otel/sdk/resource" + +import ( + "context" + "errors" + "fmt" + "sync" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/attribute" +) + +// Resource describes an entity about which identifying information +// and metadata is exposed. Resource is an immutable object, +// equivalent to a map from key to unique value. +// +// Resources should be passed and stored as pointers +// (`*resource.Resource`). The `nil` value is equivalent to an empty +// Resource. +type Resource struct { + attrs attribute.Set + schemaURL string +} + +var ( + emptyResource Resource + defaultResource *Resource + defaultResourceOnce sync.Once +) + +var errMergeConflictSchemaURL = errors.New("cannot merge resource due to conflicting Schema URL") + +// New returns a Resource combined from the user-provided detectors. +func New(ctx context.Context, opts ...Option) (*Resource, error) { + cfg := config{} + for _, opt := range opts { + cfg = opt.apply(cfg) + } + + resource, err := Detect(ctx, cfg.detectors...) + + var err2 error + resource, err2 = Merge(resource, &Resource{schemaURL: cfg.schemaURL}) + if err == nil { + err = err2 + } else if err2 != nil { + err = fmt.Errorf("detecting resources: %s", []string{err.Error(), err2.Error()}) + } + + return resource, err +} + +// NewWithAttributes creates a resource from attrs and associates the resource with a +// schema URL. If attrs contains duplicate keys, the last value will be used. If attrs +// contains any invalid items those items will be dropped. The attrs are assumed to be +// in a schema identified by schemaURL. +func NewWithAttributes(schemaURL string, attrs ...attribute.KeyValue) *Resource { + resource := NewSchemaless(attrs...) + resource.schemaURL = schemaURL + return resource +} + +// NewSchemaless creates a resource from attrs. If attrs contains duplicate keys, +// the last value will be used. If attrs contains any invalid items those items will +// be dropped. The resource will not be associated with a schema URL. If the schema +// of the attrs is known use NewWithAttributes instead. +func NewSchemaless(attrs ...attribute.KeyValue) *Resource { + if len(attrs) == 0 { + return &emptyResource + } + + // Ensure attributes comply with the specification: + // https://github.com/open-telemetry/opentelemetry-specification/blob/v1.0.1/specification/common/common.md#attributes + s, _ := attribute.NewSetWithFiltered(attrs, func(kv attribute.KeyValue) bool { + return kv.Valid() + }) + + // If attrs only contains invalid entries do not allocate a new resource. + if s.Len() == 0 { + return &emptyResource + } + + return &Resource{attrs: s} //nolint +} + +// String implements the Stringer interface and provides a +// human-readable form of the resource. +// +// Avoid using this representation as the key in a map of resources, +// use Equivalent() as the key instead. +func (r *Resource) String() string { + if r == nil { + return "" + } + return r.attrs.Encoded(attribute.DefaultEncoder()) +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (r *Resource) MarshalLog() interface{} { + return struct { + Attributes attribute.Set + SchemaURL string + }{ + Attributes: r.attrs, + SchemaURL: r.schemaURL, + } +} + +// Attributes returns a copy of attributes from the resource in a sorted order. +// To avoid allocating a new slice, use an iterator. +func (r *Resource) Attributes() []attribute.KeyValue { + if r == nil { + r = Empty() + } + return r.attrs.ToSlice() +} + +func (r *Resource) SchemaURL() string { + if r == nil { + return "" + } + return r.schemaURL +} + +// Iter returns an iterator of the Resource attributes. +// This is ideal to use if you do not want a copy of the attributes. +func (r *Resource) Iter() attribute.Iterator { + if r == nil { + r = Empty() + } + return r.attrs.Iter() +} + +// Equal returns true when a Resource is equivalent to this Resource. +func (r *Resource) Equal(eq *Resource) bool { + if r == nil { + r = Empty() + } + if eq == nil { + eq = Empty() + } + return r.Equivalent() == eq.Equivalent() +} + +// Merge creates a new resource by combining resource a and b. +// +// If there are common keys between resource a and b, then the value +// from resource b will overwrite the value from resource a, even +// if resource b's value is empty. +// +// The SchemaURL of the resources will be merged according to the spec rules: +// https://github.com/open-telemetry/opentelemetry-specification/blob/bad49c714a62da5493f2d1d9bafd7ebe8c8ce7eb/specification/resource/sdk.md#merge +// If the resources have different non-empty schemaURL an empty resource and an error +// will be returned. +func Merge(a, b *Resource) (*Resource, error) { + if a == nil && b == nil { + return Empty(), nil + } + if a == nil { + return b, nil + } + if b == nil { + return a, nil + } + + // Merge the schema URL. + var schemaURL string + if a.schemaURL == "" { + schemaURL = b.schemaURL + } else if b.schemaURL == "" { + schemaURL = a.schemaURL + } else if a.schemaURL == b.schemaURL { + schemaURL = a.schemaURL + } else { + return Empty(), errMergeConflictSchemaURL + } + + // Note: 'b' attributes will overwrite 'a' with last-value-wins in attribute.Key() + // Meaning this is equivalent to: append(a.Attributes(), b.Attributes()...) + mi := attribute.NewMergeIterator(b.Set(), a.Set()) + combine := make([]attribute.KeyValue, 0, a.Len()+b.Len()) + for mi.Next() { + combine = append(combine, mi.Label()) + } + merged := NewWithAttributes(schemaURL, combine...) + return merged, nil +} + +// Empty returns an instance of Resource with no attributes. It is +// equivalent to a `nil` Resource. +func Empty() *Resource { + return &emptyResource +} + +// Default returns an instance of Resource with a default +// "service.name" and OpenTelemetrySDK attributes. +func Default() *Resource { + defaultResourceOnce.Do(func() { + var err error + defaultResource, err = Detect( + context.Background(), + defaultServiceNameDetector{}, + fromEnv{}, + telemetrySDK{}, + ) + if err != nil { + otel.Handle(err) + } + // If Detect did not return a valid resource, fall back to emptyResource. + if defaultResource == nil { + defaultResource = &emptyResource + } + }) + return defaultResource +} + +// Environment returns an instance of Resource with attributes +// extracted from the OTEL_RESOURCE_ATTRIBUTES environment variable. +func Environment() *Resource { + detector := &fromEnv{} + resource, err := detector.Detect(context.Background()) + if err != nil { + otel.Handle(err) + } + return resource +} + +// Equivalent returns an object that can be compared for equality +// between two resources. This value is suitable for use as a key in +// a map. +func (r *Resource) Equivalent() attribute.Distinct { + return r.Set().Equivalent() +} + +// Set returns the equivalent *attribute.Set of this resource's attributes. +func (r *Resource) Set() *attribute.Set { + if r == nil { + r = Empty() + } + return &r.attrs +} + +// MarshalJSON encodes the resource attributes as a JSON list of { "Key": +// "...", "Value": ... } pairs in order sorted by key. +func (r *Resource) MarshalJSON() ([]byte, error) { + if r == nil { + r = Empty() + } + return r.attrs.MarshalJSON() +} + +// Len returns the number of unique key-values in this Resource. +func (r *Resource) Len() int { + if r == nil { + return 0 + } + return r.attrs.Len() +} + +// Encoded returns an encoded representation of the resource. +func (r *Resource) Encoded(enc attribute.Encoder) string { + if r == nil { + return "" + } + return r.attrs.Encoded(enc) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go new file mode 100644 index 0000000000..67e2732c3c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/batch_span_processor.go @@ -0,0 +1,396 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "runtime" + "sync" + "sync/atomic" + "time" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/internal/env" + "go.opentelemetry.io/otel/trace" +) + +// Defaults for BatchSpanProcessorOptions. +const ( + DefaultMaxQueueSize = 2048 + DefaultScheduleDelay = 5000 + DefaultExportTimeout = 30000 + DefaultMaxExportBatchSize = 512 +) + +type BatchSpanProcessorOption func(o *BatchSpanProcessorOptions) + +type BatchSpanProcessorOptions struct { + // MaxQueueSize is the maximum queue size to buffer spans for delayed processing. If the + // queue gets full it drops the spans. Use BlockOnQueueFull to change this behavior. + // The default value of MaxQueueSize is 2048. + MaxQueueSize int + + // BatchTimeout is the maximum duration for constructing a batch. Processor + // forcefully sends available spans when timeout is reached. + // The default value of BatchTimeout is 5000 msec. + BatchTimeout time.Duration + + // ExportTimeout specifies the maximum duration for exporting spans. If the timeout + // is reached, the export will be cancelled. + // The default value of ExportTimeout is 30000 msec. + ExportTimeout time.Duration + + // MaxExportBatchSize is the maximum number of spans to process in a single batch. + // If there are more than one batch worth of spans then it processes multiple batches + // of spans one batch after the other without any delay. + // The default value of MaxExportBatchSize is 512. + MaxExportBatchSize int + + // BlockOnQueueFull blocks onEnd() and onStart() method if the queue is full + // AND if BlockOnQueueFull is set to true. + // Blocking option should be used carefully as it can severely affect the performance of an + // application. + BlockOnQueueFull bool +} + +// batchSpanProcessor is a SpanProcessor that batches asynchronously-received +// spans and sends them to a trace.Exporter when complete. +type batchSpanProcessor struct { + e SpanExporter + o BatchSpanProcessorOptions + + queue chan ReadOnlySpan + dropped uint32 + + batch []ReadOnlySpan + batchMutex sync.Mutex + timer *time.Timer + stopWait sync.WaitGroup + stopOnce sync.Once + stopCh chan struct{} +} + +var _ SpanProcessor = (*batchSpanProcessor)(nil) + +// NewBatchSpanProcessor creates a new SpanProcessor that will send completed +// span batches to the exporter with the supplied options. +// +// If the exporter is nil, the span processor will preform no action. +func NewBatchSpanProcessor(exporter SpanExporter, options ...BatchSpanProcessorOption) SpanProcessor { + maxQueueSize := env.BatchSpanProcessorMaxQueueSize(DefaultMaxQueueSize) + maxExportBatchSize := env.BatchSpanProcessorMaxExportBatchSize(DefaultMaxExportBatchSize) + + if maxExportBatchSize > maxQueueSize { + if DefaultMaxExportBatchSize > maxQueueSize { + maxExportBatchSize = maxQueueSize + } else { + maxExportBatchSize = DefaultMaxExportBatchSize + } + } + + o := BatchSpanProcessorOptions{ + BatchTimeout: time.Duration(env.BatchSpanProcessorScheduleDelay(DefaultScheduleDelay)) * time.Millisecond, + ExportTimeout: time.Duration(env.BatchSpanProcessorExportTimeout(DefaultExportTimeout)) * time.Millisecond, + MaxQueueSize: maxQueueSize, + MaxExportBatchSize: maxExportBatchSize, + } + for _, opt := range options { + opt(&o) + } + bsp := &batchSpanProcessor{ + e: exporter, + o: o, + batch: make([]ReadOnlySpan, 0, o.MaxExportBatchSize), + timer: time.NewTimer(o.BatchTimeout), + queue: make(chan ReadOnlySpan, o.MaxQueueSize), + stopCh: make(chan struct{}), + } + + bsp.stopWait.Add(1) + go func() { + defer bsp.stopWait.Done() + bsp.processQueue() + bsp.drainQueue() + }() + + return bsp +} + +// OnStart method does nothing. +func (bsp *batchSpanProcessor) OnStart(parent context.Context, s ReadWriteSpan) {} + +// OnEnd method enqueues a ReadOnlySpan for later processing. +func (bsp *batchSpanProcessor) OnEnd(s ReadOnlySpan) { + // Do not enqueue spans if we are just going to drop them. + if bsp.e == nil { + return + } + bsp.enqueue(s) +} + +// Shutdown flushes the queue and waits until all spans are processed. +// It only executes once. Subsequent call does nothing. +func (bsp *batchSpanProcessor) Shutdown(ctx context.Context) error { + var err error + bsp.stopOnce.Do(func() { + wait := make(chan struct{}) + go func() { + close(bsp.stopCh) + bsp.stopWait.Wait() + if bsp.e != nil { + if err := bsp.e.Shutdown(ctx); err != nil { + otel.Handle(err) + } + } + close(wait) + }() + // Wait until the wait group is done or the context is cancelled + select { + case <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + }) + return err +} + +type forceFlushSpan struct { + ReadOnlySpan + flushed chan struct{} +} + +func (f forceFlushSpan) SpanContext() trace.SpanContext { + return trace.NewSpanContext(trace.SpanContextConfig{TraceFlags: trace.FlagsSampled}) +} + +// ForceFlush exports all ended spans that have not yet been exported. +func (bsp *batchSpanProcessor) ForceFlush(ctx context.Context) error { + var err error + if bsp.e != nil { + flushCh := make(chan struct{}) + if bsp.enqueueBlockOnQueueFull(ctx, forceFlushSpan{flushed: flushCh}, true) { + select { + case <-flushCh: + // Processed any items in queue prior to ForceFlush being called + case <-ctx.Done(): + return ctx.Err() + } + } + + wait := make(chan error) + go func() { + wait <- bsp.exportSpans(ctx) + close(wait) + }() + // Wait until the export is finished or the context is cancelled/timed out + select { + case err = <-wait: + case <-ctx.Done(): + err = ctx.Err() + } + } + return err +} + +func WithMaxQueueSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxQueueSize = size + } +} + +func WithMaxExportBatchSize(size int) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.MaxExportBatchSize = size + } +} + +func WithBatchTimeout(delay time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BatchTimeout = delay + } +} + +func WithExportTimeout(timeout time.Duration) BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.ExportTimeout = timeout + } +} + +func WithBlocking() BatchSpanProcessorOption { + return func(o *BatchSpanProcessorOptions) { + o.BlockOnQueueFull = true + } +} + +// exportSpans is a subroutine of processing and draining the queue. +func (bsp *batchSpanProcessor) exportSpans(ctx context.Context) error { + + bsp.timer.Reset(bsp.o.BatchTimeout) + + bsp.batchMutex.Lock() + defer bsp.batchMutex.Unlock() + + if bsp.o.ExportTimeout > 0 { + var cancel context.CancelFunc + ctx, cancel = context.WithTimeout(ctx, bsp.o.ExportTimeout) + defer cancel() + } + + if l := len(bsp.batch); l > 0 { + global.Debug("exporting spans", "count", len(bsp.batch), "dropped", atomic.LoadUint32(&bsp.dropped)) + err := bsp.e.ExportSpans(ctx, bsp.batch) + + // A new batch is always created after exporting, even if the batch failed to be exported. + // + // It is up to the exporter to implement any type of retry logic if a batch is failing + // to be exported, since it is specific to the protocol and backend being sent to. + bsp.batch = bsp.batch[:0] + + if err != nil { + return err + } + } + return nil +} + +// processQueue removes spans from the `queue` channel until processor +// is shut down. It calls the exporter in batches of up to MaxExportBatchSize +// waiting up to BatchTimeout to form a batch. +func (bsp *batchSpanProcessor) processQueue() { + defer bsp.timer.Stop() + + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case <-bsp.stopCh: + return + case <-bsp.timer.C: + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + case sd := <-bsp.queue: + if ffs, ok := sd.(forceFlushSpan); ok { + close(ffs.flushed) + continue + } + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) >= bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + if shouldExport { + if !bsp.timer.Stop() { + <-bsp.timer.C + } + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + } + } +} + +// drainQueue awaits the any caller that had added to bsp.stopWait +// to finish the enqueue, then exports the final batch. +func (bsp *batchSpanProcessor) drainQueue() { + ctx, cancel := context.WithCancel(context.Background()) + defer cancel() + for { + select { + case sd := <-bsp.queue: + if sd == nil { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + return + } + + bsp.batchMutex.Lock() + bsp.batch = append(bsp.batch, sd) + shouldExport := len(bsp.batch) == bsp.o.MaxExportBatchSize + bsp.batchMutex.Unlock() + + if shouldExport { + if err := bsp.exportSpans(ctx); err != nil { + otel.Handle(err) + } + } + default: + close(bsp.queue) + } + } +} + +func (bsp *batchSpanProcessor) enqueue(sd ReadOnlySpan) { + bsp.enqueueBlockOnQueueFull(context.TODO(), sd, bsp.o.BlockOnQueueFull) +} + +func (bsp *batchSpanProcessor) enqueueBlockOnQueueFull(ctx context.Context, sd ReadOnlySpan, block bool) bool { + if !sd.SpanContext().IsSampled() { + return false + } + + // This ensures the bsp.queue<- below does not panic as the + // processor shuts down. + defer func() { + x := recover() + switch err := x.(type) { + case nil: + return + case runtime.Error: + if err.Error() == "send on closed channel" { + return + } + } + panic(x) + }() + + select { + case <-bsp.stopCh: + return false + default: + } + + if block { + select { + case bsp.queue <- sd: + return true + case <-ctx.Done(): + return false + } + } + + select { + case bsp.queue <- sd: + return true + default: + atomic.AddUint32(&bsp.dropped, 1) + } + return false +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (bsp *batchSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + SpanExporter SpanExporter + Config BatchSpanProcessorOptions + }{ + Type: "BatchSpanProcessor", + SpanExporter: bsp.e, + Config: bsp.o, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/config.go b/vendor/go.opentelemetry.io/otel/sdk/trace/config.go new file mode 100644 index 0000000000..61a3043925 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/config.go @@ -0,0 +1,68 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +// SpanLimits represents the limits of a span. +type SpanLimits struct { + // AttributeCountLimit is the maximum allowed span attribute count. + AttributeCountLimit int + + // EventCountLimit is the maximum allowed span event count. + EventCountLimit int + + // LinkCountLimit is the maximum allowed span link count. + LinkCountLimit int + + // AttributePerEventCountLimit is the maximum allowed attribute per span event count. + AttributePerEventCountLimit int + + // AttributePerLinkCountLimit is the maximum allowed attribute per span link count. + AttributePerLinkCountLimit int +} + +func (sl *SpanLimits) ensureDefault() { + if sl.EventCountLimit <= 0 { + sl.EventCountLimit = DefaultEventCountLimit + } + if sl.AttributeCountLimit <= 0 { + sl.AttributeCountLimit = DefaultAttributeCountLimit + } + if sl.LinkCountLimit <= 0 { + sl.LinkCountLimit = DefaultLinkCountLimit + } + if sl.AttributePerEventCountLimit <= 0 { + sl.AttributePerEventCountLimit = DefaultAttributePerEventCountLimit + } + if sl.AttributePerLinkCountLimit <= 0 { + sl.AttributePerLinkCountLimit = DefaultAttributePerLinkCountLimit + } +} + +const ( + // DefaultAttributeCountLimit is the default maximum allowed span attribute count. + DefaultAttributeCountLimit = 128 + + // DefaultEventCountLimit is the default maximum allowed span event count. + DefaultEventCountLimit = 128 + + // DefaultLinkCountLimit is the default maximum allowed span link count. + DefaultLinkCountLimit = 128 + + // DefaultAttributePerEventCountLimit is the default maximum allowed attribute per span event count. + DefaultAttributePerEventCountLimit = 128 + + // DefaultAttributePerLinkCountLimit is the default maximum allowed attribute per span link count. + DefaultAttributePerLinkCountLimit = 128 +) diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go new file mode 100644 index 0000000000..0285e99be0 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/doc.go @@ -0,0 +1,21 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace contains support for OpenTelemetry distributed tracing. + +The following assumes a basic familiarity with OpenTelemetry concepts. +See https://opentelemetry.io. +*/ +package trace // import "go.opentelemetry.io/otel/sdk/trace" diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/event.go b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go new file mode 100644 index 0000000000..1e3b426757 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/event.go @@ -0,0 +1,37 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// Event is a thing that happened during a Span's lifetime. +type Event struct { + // Name is the name of this event + Name string + + // Attributes describe the aspects of the event. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int + + // Time at which this event was recorded. + Time time.Time +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go new file mode 100644 index 0000000000..8e89e19d4b --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/evictedqueue.go @@ -0,0 +1,39 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +// evictedQueue is a FIFO queue with a configurable capacity. +type evictedQueue struct { + queue []interface{} + capacity int + droppedCount int +} + +func newEvictedQueue(capacity int) evictedQueue { + // Do not pre-allocate queue, do this lazily. + return evictedQueue{capacity: capacity} +} + +// add adds value to the evictedQueue eq. If eq is at capacity, the oldest +// queued value will be discarded and the drop count incremented. +func (eq *evictedQueue) add(value interface{}) { + if len(eq.queue) == eq.capacity { + // Drop first-in while avoiding allocating more capacity to eq.queue. + copy(eq.queue[:eq.capacity-1], eq.queue[1:]) + eq.queue = eq.queue[:eq.capacity-1] + eq.droppedCount++ + } + eq.queue = append(eq.queue, value) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go new file mode 100644 index 0000000000..c9e2802ac5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/id_generator.go @@ -0,0 +1,77 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + crand "crypto/rand" + "encoding/binary" + "math/rand" + "sync" + + "go.opentelemetry.io/otel/trace" +) + +// IDGenerator allows custom generators for TraceID and SpanID. +type IDGenerator interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewIDs returns a new trace and span ID. + NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // NewSpanID returns a ID for a new span in the trace with traceID. + NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type randomIDGenerator struct { + sync.Mutex + randSource *rand.Rand +} + +var _ IDGenerator = &randomIDGenerator{} + +// NewSpanID returns a non-zero span ID from a randomly-chosen sequence. +func (gen *randomIDGenerator) NewSpanID(ctx context.Context, traceID trace.TraceID) trace.SpanID { + gen.Lock() + defer gen.Unlock() + sid := trace.SpanID{} + gen.randSource.Read(sid[:]) + return sid +} + +// NewIDs returns a non-zero trace ID and a non-zero span ID from a +// randomly-chosen sequence. +func (gen *randomIDGenerator) NewIDs(ctx context.Context) (trace.TraceID, trace.SpanID) { + gen.Lock() + defer gen.Unlock() + tid := trace.TraceID{} + gen.randSource.Read(tid[:]) + sid := trace.SpanID{} + gen.randSource.Read(sid[:]) + return tid, sid +} + +func defaultIDGenerator() IDGenerator { + gen := &randomIDGenerator{} + var rngSeed int64 + _ = binary.Read(crand.Reader, binary.LittleEndian, &rngSeed) + gen.randSource = rand.New(rand.NewSource(rngSeed)) + return gen +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/link.go b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go new file mode 100644 index 0000000000..19cfea4ba4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/link.go @@ -0,0 +1,34 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +type Link struct { + // SpanContext of the linked Span. + SpanContext trace.SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue + + // DroppedAttributeCount is the number of attributes that were not + // recorded due to configured limits being reached. + DroppedAttributeCount int +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go new file mode 100644 index 0000000000..c6b311f9cd --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/provider.go @@ -0,0 +1,374 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "sync" + "sync/atomic" + + "go.opentelemetry.io/otel" + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +const ( + defaultTracerName = "go.opentelemetry.io/otel/sdk/tracer" +) + +// tracerProviderConfig +type tracerProviderConfig struct { + // processors contains collection of SpanProcessors that are processing pipeline + // for spans in the trace signal. + // SpanProcessors registered with a TracerProvider and are called at the start + // and end of a Span's lifecycle, and are called in the order they are + // registered. + processors []SpanProcessor + + // sampler is the default sampler used when creating new spans. + sampler Sampler + + // idGenerator is used to generate all Span and Trace IDs when needed. + idGenerator IDGenerator + + // spanLimits defines the attribute, event, and link limits for spans. + spanLimits SpanLimits + + // resource contains attributes representing an entity that produces telemetry. + resource *resource.Resource +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (cfg tracerProviderConfig) MarshalLog() interface{} { + return struct { + SpanProcessors []SpanProcessor + SamplerType string + IDGeneratorType string + SpanLimits SpanLimits + Resource *resource.Resource + }{ + SpanProcessors: cfg.processors, + SamplerType: fmt.Sprintf("%T", cfg.sampler), + IDGeneratorType: fmt.Sprintf("%T", cfg.idGenerator), + SpanLimits: cfg.spanLimits, + Resource: cfg.resource, + } +} + +type TracerProvider struct { + mu sync.Mutex + namedTracer map[instrumentation.Library]*tracer + spanProcessors atomic.Value + + // These fields are not protected by the lock mu. They are assumed to be + // immutable after creation of the TracerProvider. + sampler Sampler + idGenerator IDGenerator + spanLimits SpanLimits + resource *resource.Resource +} + +var _ trace.TracerProvider = &TracerProvider{} + +// NewTracerProvider returns a new and configured TracerProvider. +// +// By default the returned TracerProvider is configured with: +// - a ParentBased(AlwaysSample) Sampler +// - a random number IDGenerator +// - the resource.Default() Resource +// - the default SpanLimits. +// +// The passed opts are used to override these default values and configure the +// returned TracerProvider appropriately. +func NewTracerProvider(opts ...TracerProviderOption) *TracerProvider { + o := tracerProviderConfig{} + + for _, opt := range opts { + o = opt.apply(o) + } + + o = ensureValidTracerProviderConfig(o) + + tp := &TracerProvider{ + namedTracer: make(map[instrumentation.Library]*tracer), + sampler: o.sampler, + idGenerator: o.idGenerator, + spanLimits: o.spanLimits, + resource: o.resource, + } + + global.Info("TracerProvider created", "config", o) + + for _, sp := range o.processors { + tp.RegisterSpanProcessor(sp) + } + + return tp +} + +// Tracer returns a Tracer with the given name and options. If a Tracer for +// the given name and options does not exist it is created, otherwise the +// existing Tracer is returned. +// +// If name is empty, DefaultTracerName is used instead. +// +// This method is safe to be called concurrently. +func (p *TracerProvider) Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + c := trace.NewTracerConfig(opts...) + + p.mu.Lock() + defer p.mu.Unlock() + if name == "" { + name = defaultTracerName + } + il := instrumentation.Library{ + Name: name, + Version: c.InstrumentationVersion(), + SchemaURL: c.SchemaURL(), + } + t, ok := p.namedTracer[il] + if !ok { + t = &tracer{ + provider: p, + instrumentationLibrary: il, + } + p.namedTracer[il] = t + global.Info("Tracer created", "name", name, "version", c.InstrumentationVersion(), "schemaURL", c.SchemaURL()) + } + return t +} + +// RegisterSpanProcessor adds the given SpanProcessor to the list of SpanProcessors +func (p *TracerProvider) RegisterSpanProcessor(s SpanProcessor) { + p.mu.Lock() + defer p.mu.Unlock() + new := spanProcessorStates{} + if old, ok := p.spanProcessors.Load().(spanProcessorStates); ok { + new = append(new, old...) + } + newSpanSync := &spanProcessorState{ + sp: s, + state: &sync.Once{}, + } + new = append(new, newSpanSync) + p.spanProcessors.Store(new) +} + +// UnregisterSpanProcessor removes the given SpanProcessor from the list of SpanProcessors +func (p *TracerProvider) UnregisterSpanProcessor(s SpanProcessor) { + p.mu.Lock() + defer p.mu.Unlock() + spss := spanProcessorStates{} + old, ok := p.spanProcessors.Load().(spanProcessorStates) + if !ok || len(old) == 0 { + return + } + spss = append(spss, old...) + + // stop the span processor if it is started and remove it from the list + var stopOnce *spanProcessorState + var idx int + for i, sps := range spss { + if sps.sp == s { + stopOnce = sps + idx = i + } + } + if stopOnce != nil { + stopOnce.state.Do(func() { + if err := s.Shutdown(context.Background()); err != nil { + otel.Handle(err) + } + }) + } + if len(spss) > 1 { + copy(spss[idx:], spss[idx+1:]) + } + spss[len(spss)-1] = nil + spss = spss[:len(spss)-1] + + p.spanProcessors.Store(spss) +} + +// ForceFlush immediately exports all spans that have not yet been exported for +// all the registered span processors. +func (p *TracerProvider) ForceFlush(ctx context.Context) error { + spss, ok := p.spanProcessors.Load().(spanProcessorStates) + if !ok { + return fmt.Errorf("failed to load span processors") + } + if len(spss) == 0 { + return nil + } + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + if err := sps.sp.ForceFlush(ctx); err != nil { + return err + } + } + return nil +} + +// Shutdown shuts down the span processors in the order they were registered. +func (p *TracerProvider) Shutdown(ctx context.Context) error { + spss, ok := p.spanProcessors.Load().(spanProcessorStates) + if !ok { + return fmt.Errorf("failed to load span processors") + } + if len(spss) == 0 { + return nil + } + + for _, sps := range spss { + select { + case <-ctx.Done(): + return ctx.Err() + default: + } + + var err error + sps.state.Do(func() { + err = sps.sp.Shutdown(ctx) + }) + if err != nil { + return err + } + } + return nil +} + +type TracerProviderOption interface { + apply(tracerProviderConfig) tracerProviderConfig +} + +type traceProviderOptionFunc func(tracerProviderConfig) tracerProviderConfig + +func (fn traceProviderOptionFunc) apply(cfg tracerProviderConfig) tracerProviderConfig { + return fn(cfg) +} + +// WithSyncer registers the exporter with the TracerProvider using a +// SimpleSpanProcessor. +// +// This is not recommended for production use. The synchronous nature of the +// SimpleSpanProcessor that will wrap the exporter make it good for testing, +// debugging, or showing examples of other feature, but it will be slow and +// have a high computation resource usage overhead. The WithBatcher option is +// recommended for production use instead. +func WithSyncer(e SpanExporter) TracerProviderOption { + return WithSpanProcessor(NewSimpleSpanProcessor(e)) +} + +// WithBatcher registers the exporter with the TracerProvider using a +// BatchSpanProcessor configured with the passed opts. +func WithBatcher(e SpanExporter, opts ...BatchSpanProcessorOption) TracerProviderOption { + return WithSpanProcessor(NewBatchSpanProcessor(e, opts...)) +} + +// WithSpanProcessor registers the SpanProcessor with a TracerProvider. +func WithSpanProcessor(sp SpanProcessor) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.processors = append(cfg.processors, sp) + return cfg + }) +} + +// WithResource returns a TracerProviderOption that will configure the +// Resource r as a TracerProvider's Resource. The configured Resource is +// referenced by all the Tracers the TracerProvider creates. It represents the +// entity producing telemetry. +// +// If this option is not used, the TracerProvider will use the +// resource.Default() Resource by default. +func WithResource(r *resource.Resource) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + var err error + cfg.resource, err = resource.Merge(resource.Environment(), r) + if err != nil { + otel.Handle(err) + } + return cfg + }) +} + +// WithIDGenerator returns a TracerProviderOption that will configure the +// IDGenerator g as a TracerProvider's IDGenerator. The configured IDGenerator +// is used by the Tracers the TracerProvider creates to generate new Span and +// Trace IDs. +// +// If this option is not used, the TracerProvider will use a random number +// IDGenerator by default. +func WithIDGenerator(g IDGenerator) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if g != nil { + cfg.idGenerator = g + } + return cfg + }) +} + +// WithSampler returns a TracerProviderOption that will configure the Sampler +// s as a TracerProvider's Sampler. The configured Sampler is used by the +// Tracers the TracerProvider creates to make their sampling decisions for the +// Spans they create. +// +// If this option is not used, the TracerProvider will use a +// ParentBased(AlwaysSample) Sampler by default. +func WithSampler(s Sampler) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + if s != nil { + cfg.sampler = s + } + return cfg + }) +} + +// WithSpanLimits returns a TracerProviderOption that will configure the +// SpanLimits sl as a TracerProvider's SpanLimits. The configured SpanLimits +// are used used by the Tracers the TracerProvider and the Spans they create +// to limit tracing resources used. +// +// If this option is not used, the TracerProvider will use the default +// SpanLimits. +func WithSpanLimits(sl SpanLimits) TracerProviderOption { + return traceProviderOptionFunc(func(cfg tracerProviderConfig) tracerProviderConfig { + cfg.spanLimits = sl + return cfg + }) +} + +// ensureValidTracerProviderConfig ensures that given TracerProviderConfig is valid. +func ensureValidTracerProviderConfig(cfg tracerProviderConfig) tracerProviderConfig { + if cfg.sampler == nil { + cfg.sampler = ParentBased(AlwaysSample()) + } + if cfg.idGenerator == nil { + cfg.idGenerator = defaultIDGenerator() + } + cfg.spanLimits.ensureDefault() + if cfg.resource == nil { + cfg.resource = resource.Default() + } + return cfg +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go new file mode 100644 index 0000000000..a4ac588f66 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/sampling.go @@ -0,0 +1,292 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "encoding/binary" + "fmt" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/trace" +) + +// Sampler decides whether a trace should be sampled and exported. +type Sampler interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ShouldSample returns a SamplingResult based on a decision made from the + // passed parameters. + ShouldSample(parameters SamplingParameters) SamplingResult + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Description returns information describing the Sampler. + Description() string + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +// SamplingParameters contains the values passed to a Sampler. +type SamplingParameters struct { + ParentContext context.Context + TraceID trace.TraceID + Name string + Kind trace.SpanKind + Attributes []attribute.KeyValue + Links []trace.Link +} + +// SamplingDecision indicates whether a span is dropped, recorded and/or sampled. +type SamplingDecision uint8 + +// Valid sampling decisions +const ( + // Drop will not record the span and all attributes/events will be dropped + Drop SamplingDecision = iota + + // Record indicates the span's `IsRecording() == true`, but `Sampled` flag + // *must not* be set + RecordOnly + + // RecordAndSample has span's `IsRecording() == true` and `Sampled` flag + // *must* be set + RecordAndSample +) + +// SamplingResult conveys a SamplingDecision, set of Attributes and a Tracestate. +type SamplingResult struct { + Decision SamplingDecision + Attributes []attribute.KeyValue + Tracestate trace.TraceState +} + +type traceIDRatioSampler struct { + traceIDUpperBound uint64 + description string +} + +func (ts traceIDRatioSampler) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + x := binary.BigEndian.Uint64(p.TraceID[0:8]) >> 1 + if x < ts.traceIDUpperBound { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: psc.TraceState(), + } + } + return SamplingResult{ + Decision: Drop, + Tracestate: psc.TraceState(), + } +} + +func (ts traceIDRatioSampler) Description() string { + return ts.description +} + +// TraceIDRatioBased samples a given fraction of traces. Fractions >= 1 will +// always sample. Fractions < 0 are treated as zero. To respect the +// parent trace's `SampledFlag`, the `TraceIDRatioBased` sampler should be used +// as a delegate of a `Parent` sampler. +//nolint:revive // revive complains about stutter of `trace.TraceIDRatioBased` +func TraceIDRatioBased(fraction float64) Sampler { + if fraction >= 1 { + return AlwaysSample() + } + + if fraction <= 0 { + fraction = 0 + } + + return &traceIDRatioSampler{ + traceIDUpperBound: uint64(fraction * (1 << 63)), + description: fmt.Sprintf("TraceIDRatioBased{%g}", fraction), + } +} + +type alwaysOnSampler struct{} + +func (as alwaysOnSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: RecordAndSample, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOnSampler) Description() string { + return "AlwaysOnSampler" +} + +// AlwaysSample returns a Sampler that samples every trace. +// Be careful about using this sampler in a production application with +// significant traffic: a new trace will be started and exported for every +// request. +func AlwaysSample() Sampler { + return alwaysOnSampler{} +} + +type alwaysOffSampler struct{} + +func (as alwaysOffSampler) ShouldSample(p SamplingParameters) SamplingResult { + return SamplingResult{ + Decision: Drop, + Tracestate: trace.SpanContextFromContext(p.ParentContext).TraceState(), + } +} + +func (as alwaysOffSampler) Description() string { + return "AlwaysOffSampler" +} + +// NeverSample returns a Sampler that samples no traces. +func NeverSample() Sampler { + return alwaysOffSampler{} +} + +// ParentBased returns a composite sampler which behaves differently, +// based on the parent of the span. If the span has no parent, +// the root(Sampler) is used to make sampling decision. If the span has +// a parent, depending on whether the parent is remote and whether it +// is sampled, one of the following samplers will apply: +// - remoteParentSampled(Sampler) (default: AlwaysOn) +// - remoteParentNotSampled(Sampler) (default: AlwaysOff) +// - localParentSampled(Sampler) (default: AlwaysOn) +// - localParentNotSampled(Sampler) (default: AlwaysOff) +func ParentBased(root Sampler, samplers ...ParentBasedSamplerOption) Sampler { + return parentBased{ + root: root, + config: configureSamplersForParentBased(samplers), + } +} + +type parentBased struct { + root Sampler + config samplerConfig +} + +func configureSamplersForParentBased(samplers []ParentBasedSamplerOption) samplerConfig { + c := samplerConfig{ + remoteParentSampled: AlwaysSample(), + remoteParentNotSampled: NeverSample(), + localParentSampled: AlwaysSample(), + localParentNotSampled: NeverSample(), + } + + for _, so := range samplers { + c = so.apply(c) + } + + return c +} + +// samplerConfig is a group of options for parentBased sampler. +type samplerConfig struct { + remoteParentSampled, remoteParentNotSampled Sampler + localParentSampled, localParentNotSampled Sampler +} + +// ParentBasedSamplerOption configures the sampler for a particular sampling case. +type ParentBasedSamplerOption interface { + apply(samplerConfig) samplerConfig +} + +// WithRemoteParentSampled sets the sampler for the case of sampled remote parent. +func WithRemoteParentSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentSampledOption{s} +} + +type remoteParentSampledOption struct { + s Sampler +} + +func (o remoteParentSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentSampled = o.s + return config +} + +// WithRemoteParentNotSampled sets the sampler for the case of remote parent +// which is not sampled. +func WithRemoteParentNotSampled(s Sampler) ParentBasedSamplerOption { + return remoteParentNotSampledOption{s} +} + +type remoteParentNotSampledOption struct { + s Sampler +} + +func (o remoteParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.remoteParentNotSampled = o.s + return config +} + +// WithLocalParentSampled sets the sampler for the case of sampled local parent. +func WithLocalParentSampled(s Sampler) ParentBasedSamplerOption { + return localParentSampledOption{s} +} + +type localParentSampledOption struct { + s Sampler +} + +func (o localParentSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentSampled = o.s + return config +} + +// WithLocalParentNotSampled sets the sampler for the case of local parent +// which is not sampled. +func WithLocalParentNotSampled(s Sampler) ParentBasedSamplerOption { + return localParentNotSampledOption{s} +} + +type localParentNotSampledOption struct { + s Sampler +} + +func (o localParentNotSampledOption) apply(config samplerConfig) samplerConfig { + config.localParentNotSampled = o.s + return config +} + +func (pb parentBased) ShouldSample(p SamplingParameters) SamplingResult { + psc := trace.SpanContextFromContext(p.ParentContext) + if psc.IsValid() { + if psc.IsRemote() { + if psc.IsSampled() { + return pb.config.remoteParentSampled.ShouldSample(p) + } + return pb.config.remoteParentNotSampled.ShouldSample(p) + } + + if psc.IsSampled() { + return pb.config.localParentSampled.ShouldSample(p) + } + return pb.config.localParentNotSampled.ShouldSample(p) + } + return pb.root.ShouldSample(p) +} + +func (pb parentBased) Description() string { + return fmt.Sprintf("ParentBased{root:%s,remoteParentSampled:%s,"+ + "remoteParentNotSampled:%s,localParentSampled:%s,localParentNotSampled:%s}", + pb.root.Description(), + pb.config.remoteParentSampled.Description(), + pb.config.remoteParentNotSampled.Description(), + pb.config.localParentSampled.Description(), + pb.config.localParentNotSampled.Description(), + ) +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go new file mode 100644 index 0000000000..d31d3c1caf --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/simple_span_processor.go @@ -0,0 +1,128 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" + + "go.opentelemetry.io/otel" +) + +// simpleSpanProcessor is a SpanProcessor that synchronously sends all +// completed Spans to a trace.Exporter immediately. +type simpleSpanProcessor struct { + exporterMu sync.RWMutex + exporter SpanExporter + stopOnce sync.Once +} + +var _ SpanProcessor = (*simpleSpanProcessor)(nil) + +// NewSimpleSpanProcessor returns a new SpanProcessor that will synchronously +// send completed spans to the exporter immediately. +// +// This SpanProcessor is not recommended for production use. The synchronous +// nature of this SpanProcessor make it good for testing, debugging, or +// showing examples of other feature, but it will be slow and have a high +// computation resource usage overhead. The BatchSpanProcessor is recommended +// for production use instead. +func NewSimpleSpanProcessor(exporter SpanExporter) SpanProcessor { + ssp := &simpleSpanProcessor{ + exporter: exporter, + } + return ssp +} + +// OnStart does nothing. +func (ssp *simpleSpanProcessor) OnStart(context.Context, ReadWriteSpan) {} + +// OnEnd immediately exports a ReadOnlySpan. +func (ssp *simpleSpanProcessor) OnEnd(s ReadOnlySpan) { + ssp.exporterMu.RLock() + defer ssp.exporterMu.RUnlock() + + if ssp.exporter != nil && s.SpanContext().TraceFlags().IsSampled() { + if err := ssp.exporter.ExportSpans(context.Background(), []ReadOnlySpan{s}); err != nil { + otel.Handle(err) + } + } +} + +// Shutdown shuts down the exporter this SimpleSpanProcessor exports to. +func (ssp *simpleSpanProcessor) Shutdown(ctx context.Context) error { + var err error + ssp.stopOnce.Do(func() { + stopFunc := func(exp SpanExporter) (<-chan error, func()) { + done := make(chan error) + return done, func() { done <- exp.Shutdown(ctx) } + } + + // The exporter field of the simpleSpanProcessor needs to be zeroed to + // signal it is shut down, meaning all subsequent calls to OnEnd will + // be gracefully ignored. This needs to be done synchronously to avoid + // any race condition. + // + // A closure is used to keep reference to the exporter and then the + // field is zeroed. This ensures the simpleSpanProcessor is shut down + // before the exporter. This order is important as it avoids a + // potential deadlock. If the exporter shut down operation generates a + // span, that span would need to be exported. Meaning, OnEnd would be + // called and try acquiring the lock that is held here. + ssp.exporterMu.Lock() + done, shutdown := stopFunc(ssp.exporter) + ssp.exporter = nil + ssp.exporterMu.Unlock() + + go shutdown() + + // Wait for the exporter to shut down or the deadline to expire. + select { + case err = <-done: + case <-ctx.Done(): + // It is possible for the exporter to have immediately shut down + // and the context to be done simultaneously. In that case this + // outer select statement will randomly choose a case. This will + // result in a different returned error for similar scenarios. + // Instead, double check if the exporter shut down at the same + // time and return that error if so. This will ensure consistency + // as well as ensure the caller knows the exporter shut down + // successfully (they can already determine if the deadline is + // expired given they passed the context). + select { + case err = <-done: + default: + err = ctx.Err() + } + } + }) + return err +} + +// ForceFlush does nothing as there is no data to flush. +func (ssp *simpleSpanProcessor) ForceFlush(context.Context) error { + return nil +} + +// MarshalLog is the marshaling function used by the logging system to represent this exporter. +func (ssp *simpleSpanProcessor) MarshalLog() interface{} { + return struct { + Type string + Exporter SpanExporter + }{ + Type: "SimpleSpanProcessor", + Exporter: ssp.exporter, + } +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go new file mode 100644 index 0000000000..53aac61f5f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/snapshot.go @@ -0,0 +1,138 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/resource" + "go.opentelemetry.io/otel/trace" +) + +// snapshot is an record of a spans state at a particular checkpointed time. +// It is used as a read-only representation of that state. +type snapshot struct { + name string + spanContext trace.SpanContext + parent trace.SpanContext + spanKind trace.SpanKind + startTime time.Time + endTime time.Time + attributes []attribute.KeyValue + events []Event + links []Link + status Status + childSpanCount int + droppedAttributeCount int + droppedEventCount int + droppedLinkCount int + resource *resource.Resource + instrumentationLibrary instrumentation.Library +} + +var _ ReadOnlySpan = snapshot{} + +func (s snapshot) private() {} + +// Name returns the name of the span. +func (s snapshot) Name() string { + return s.name +} + +// SpanContext returns the unique SpanContext that identifies the span. +func (s snapshot) SpanContext() trace.SpanContext { + return s.spanContext +} + +// Parent returns the unique SpanContext that identifies the parent of the +// span if one exists. If the span has no parent the returned SpanContext +// will be invalid. +func (s snapshot) Parent() trace.SpanContext { + return s.parent +} + +// SpanKind returns the role the span plays in a Trace. +func (s snapshot) SpanKind() trace.SpanKind { + return s.spanKind +} + +// StartTime returns the time the span started recording. +func (s snapshot) StartTime() time.Time { + return s.startTime +} + +// EndTime returns the time the span stopped recording. It will be zero if +// the span has not ended. +func (s snapshot) EndTime() time.Time { + return s.endTime +} + +// Attributes returns the defining attributes of the span. +func (s snapshot) Attributes() []attribute.KeyValue { + return s.attributes +} + +// Links returns all the links the span has to other spans. +func (s snapshot) Links() []Link { + return s.links +} + +// Events returns all the events that occurred within in the spans +// lifetime. +func (s snapshot) Events() []Event { + return s.events +} + +// Status returns the spans status. +func (s snapshot) Status() Status { + return s.status +} + +// InstrumentationLibrary returns information about the instrumentation +// library that created the span. +func (s snapshot) InstrumentationLibrary() instrumentation.Library { + return s.instrumentationLibrary +} + +// Resource returns information about the entity that produced the span. +func (s snapshot) Resource() *resource.Resource { + return s.resource +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s snapshot) DroppedAttributes() int { + return s.droppedAttributeCount +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s snapshot) DroppedLinks() int { + return s.droppedLinkCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s snapshot) DroppedEvents() int { + return s.droppedEventCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s snapshot) ChildSpanCount() int { + return s.childSpanCount +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go new file mode 100644 index 0000000000..779cde691d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span.go @@ -0,0 +1,739 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "fmt" + "reflect" + "runtime" + rt "runtime/trace" + "sync" + "time" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/sdk/internal" + "go.opentelemetry.io/otel/sdk/resource" + semconv "go.opentelemetry.io/otel/semconv/v1.7.0" + "go.opentelemetry.io/otel/trace" +) + +// ReadOnlySpan allows reading information from the data structure underlying a +// trace.Span. It is used in places where reading information from a span is +// necessary but changing the span isn't necessary or allowed. +// +// Warning: methods may be added to this interface in minor releases. +type ReadOnlySpan interface { + // Name returns the name of the span. + Name() string + // SpanContext returns the unique SpanContext that identifies the span. + SpanContext() trace.SpanContext + // Parent returns the unique SpanContext that identifies the parent of the + // span if one exists. If the span has no parent the returned SpanContext + // will be invalid. + Parent() trace.SpanContext + // SpanKind returns the role the span plays in a Trace. + SpanKind() trace.SpanKind + // StartTime returns the time the span started recording. + StartTime() time.Time + // EndTime returns the time the span stopped recording. It will be zero if + // the span has not ended. + EndTime() time.Time + // Attributes returns the defining attributes of the span. + // The order of the returned attributes is not guaranteed to be stable across invocations. + Attributes() []attribute.KeyValue + // Links returns all the links the span has to other spans. + Links() []Link + // Events returns all the events that occurred within in the spans + // lifetime. + Events() []Event + // Status returns the spans status. + Status() Status + // InstrumentationLibrary returns information about the instrumentation + // library that created the span. + InstrumentationLibrary() instrumentation.Library + // Resource returns information about the entity that produced the span. + Resource() *resource.Resource + // DroppedAttributes returns the number of attributes dropped by the span + // due to limits being reached. + DroppedAttributes() int + // DroppedLinks returns the number of links dropped by the span due to + // limits being reached. + DroppedLinks() int + // DroppedEvents returns the number of events dropped by the span due to + // limits being reached. + DroppedEvents() int + // ChildSpanCount returns the count of spans that consider the span a + // direct parent. + ChildSpanCount() int + + // A private method to prevent users implementing the + // interface and so future additions to it will not + // violate compatibility. + private() +} + +// ReadWriteSpan exposes the same methods as trace.Span and in addition allows +// reading information from the underlying data structure. +// This interface exposes the union of the methods of trace.Span (which is a +// "write-only" span) and ReadOnlySpan. New methods for writing or reading span +// information should be added under trace.Span or ReadOnlySpan, respectively. +// +// Warning: methods may be added to this interface in minor releases. +type ReadWriteSpan interface { + trace.Span + ReadOnlySpan +} + +// recordingSpan is an implementation of the OpenTelemetry Span API +// representing the individual component of a trace that is sampled. +type recordingSpan struct { + // mu protects the contents of this span. + mu sync.Mutex + + // parent holds the parent span of this span as a trace.SpanContext. + parent trace.SpanContext + + // spanKind represents the kind of this span as a trace.SpanKind. + spanKind trace.SpanKind + + // name is the name of this span. + name string + + // startTime is the time at which this span was started. + startTime time.Time + + // endTime is the time at which this span was ended. It contains the zero + // value of time.Time until the span is ended. + endTime time.Time + + // status is the status of this span. + status Status + + // childSpanCount holds the number of child spans created for this span. + childSpanCount int + + // spanContext holds the SpanContext of this span. + spanContext trace.SpanContext + + // attributes is a collection of user provided key/values. The collection + // is constrained by a configurable maximum held by the parent + // TracerProvider. When additional attributes are added after this maximum + // is reached these attributes the user is attempting to add are dropped. + // This dropped number of attributes is tracked and reported in the + // ReadOnlySpan exported when the span ends. + attributes []attribute.KeyValue + droppedAttributes int + + // events are stored in FIFO queue capped by configured limit. + events evictedQueue + + // links are stored in FIFO queue capped by configured limit. + links evictedQueue + + // executionTracerTaskEnd ends the execution tracer span. + executionTracerTaskEnd func() + + // tracer is the SDK tracer that created this span. + tracer *tracer +} + +var _ ReadWriteSpan = (*recordingSpan)(nil) +var _ runtimeTracer = (*recordingSpan)(nil) + +// SpanContext returns the SpanContext of this span. +func (s *recordingSpan) SpanContext() trace.SpanContext { + if s == nil { + return trace.SpanContext{} + } + return s.spanContext +} + +// IsRecording returns if this span is being recorded. If this span has ended +// this will return false. +func (s *recordingSpan) IsRecording() bool { + if s == nil { + return false + } + s.mu.Lock() + defer s.mu.Unlock() + + return s.endTime.IsZero() +} + +// SetStatus sets the status of the Span in the form of a code and a +// description, overriding previous values set. The description is only +// included in the set status when the code is for an error. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) SetStatus(code codes.Code, description string) { + if !s.IsRecording() { + return + } + + status := Status{Code: code} + if code == codes.Error { + status.Description = description + } + + s.mu.Lock() + s.status = status + s.mu.Unlock() +} + +// SetAttributes sets attributes of this span. +// +// If a key from attributes already exists the value associated with that key +// will be overwritten with the value contained in attributes. +// +// If this span is not being recorded than this method does nothing. +// +// If adding attributes to the span would exceed the maximum amount of +// attributes the span is configured to have, the last added attributes will +// be dropped. +func (s *recordingSpan) SetAttributes(attributes ...attribute.KeyValue) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + + // If adding these attributes could exceed the capacity of s perform a + // de-duplication and truncation while adding to avoid over allocation. + if len(s.attributes)+len(attributes) > s.tracer.provider.spanLimits.AttributeCountLimit { + s.addOverCapAttrs(attributes) + return + } + + // Otherwise, add without deduplication. When attributes are read they + // will be deduplicated, optimizing the operation. + for _, a := range attributes { + if !a.Valid() { + // Drop all invalid attributes. + s.droppedAttributes++ + continue + } + s.attributes = append(s.attributes, a) + } +} + +// addOverCapAttrs adds the attributes attrs to the span s while +// de-duplicating the attributes of s and attrs and dropping attributes that +// exceed the capacity of s. +// +// This method assumes s.mu.Lock is held by the caller. +// +// This method should only be called when there is a possibility that adding +// attrs to s will exceed the capacity of s. Otherwise, attrs should be added +// to s without checking for duplicates and all retrieval methods of the +// attributes for s will de-duplicate as needed. +func (s *recordingSpan) addOverCapAttrs(attrs []attribute.KeyValue) { + // In order to not allocate more capacity to s.attributes than needed, + // prune and truncate this addition of attributes while adding. + + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) + + // Now that s.attributes is deduplicated, adding unique attributes up to + // the capacity of s will not over allocate s.attributes. + for _, a := range attrs { + if !a.Valid() { + // Drop all invalid attributes. + s.droppedAttributes++ + continue + } + + if idx, ok := exists[a.Key]; ok { + // Perform all updates before dropping, even when at capacity. + s.attributes[idx] = a + continue + } + + if len(s.attributes) >= s.tracer.provider.spanLimits.AttributeCountLimit { + // Do not just drop all of the remaining attributes, make sure + // updates are checked and performed. + s.droppedAttributes++ + } else { + s.attributes = append(s.attributes, a) + exists[a.Key] = len(s.attributes) - 1 + } + } +} + +// End ends the span. This method does nothing if the span is already ended or +// is not being recorded. +// +// The only SpanOption currently supported is WithTimestamp which will set the +// end time for a Span's life-cycle. +// +// If this method is called while panicking an error event is added to the +// Span before ending it and the panic is continued. +func (s *recordingSpan) End(options ...trace.SpanEndOption) { + // Do not start by checking if the span is being recorded which requires + // acquiring a lock. Make a minimal check that the span is not nil. + if s == nil { + return + } + + // Store the end time as soon as possible to avoid artificially increasing + // the span's duration in case some operation below takes a while. + et := internal.MonotonicEndTime(s.startTime) + + // Do relative expensive check now that we have an end time and see if we + // need to do any more processing. + if !s.IsRecording() { + return + } + + config := trace.NewSpanEndConfig(options...) + if recovered := recover(); recovered != nil { + // Record but don't stop the panic. + defer panic(recovered) + opts := []trace.EventOption{ + trace.WithAttributes( + semconv.ExceptionTypeKey.String(typeStr(recovered)), + semconv.ExceptionMessageKey.String(fmt.Sprint(recovered)), + ), + } + + if config.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktraceKey.String(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) + } + + if s.executionTracerTaskEnd != nil { + s.executionTracerTaskEnd() + } + + s.mu.Lock() + // Setting endTime to non-zero marks the span as ended and not recording. + if config.Timestamp().IsZero() { + s.endTime = et + } else { + s.endTime = config.Timestamp() + } + s.mu.Unlock() + + if sps, ok := s.tracer.provider.spanProcessors.Load().(spanProcessorStates); ok { + if len(sps) == 0 { + return + } + snap := s.snapshot() + for _, sp := range sps { + sp.sp.OnEnd(snap) + } + } +} + +// RecordError will record err as a span event for this span. An additional call to +// SetStatus is required if the Status of the Span should be set to Error, this method +// does not change the Span status. If this span is not being recorded or err is nil +// than this method does nothing. +func (s *recordingSpan) RecordError(err error, opts ...trace.EventOption) { + if s == nil || err == nil || !s.IsRecording() { + return + } + + opts = append(opts, trace.WithAttributes( + semconv.ExceptionTypeKey.String(typeStr(err)), + semconv.ExceptionMessageKey.String(err.Error()), + )) + + c := trace.NewEventConfig(opts...) + if c.StackTrace() { + opts = append(opts, trace.WithAttributes( + semconv.ExceptionStacktraceKey.String(recordStackTrace()), + )) + } + + s.addEvent(semconv.ExceptionEventName, opts...) +} + +func typeStr(i interface{}) string { + t := reflect.TypeOf(i) + if t.PkgPath() == "" && t.Name() == "" { + // Likely a builtin type. + return t.String() + } + return fmt.Sprintf("%s.%s", t.PkgPath(), t.Name()) +} + +func recordStackTrace() string { + stackTrace := make([]byte, 2048) + n := runtime.Stack(stackTrace, false) + + return string(stackTrace[0:n]) +} + +// AddEvent adds an event with the provided name and options. If this span is +// not being recorded than this method does nothing. +func (s *recordingSpan) AddEvent(name string, o ...trace.EventOption) { + if !s.IsRecording() { + return + } + s.addEvent(name, o...) +} + +func (s *recordingSpan) addEvent(name string, o ...trace.EventOption) { + c := trace.NewEventConfig(o...) + + // Discard over limited attributes + attributes := c.Attributes() + var discarded int + if len(attributes) > s.tracer.provider.spanLimits.AttributePerEventCountLimit { + discarded = len(attributes) - s.tracer.provider.spanLimits.AttributePerEventCountLimit + attributes = attributes[:s.tracer.provider.spanLimits.AttributePerEventCountLimit] + } + s.mu.Lock() + defer s.mu.Unlock() + s.events.add(Event{ + Name: name, + Attributes: attributes, + DroppedAttributeCount: discarded, + Time: c.Timestamp(), + }) +} + +// SetName sets the name of this span. If this span is not being recorded than +// this method does nothing. +func (s *recordingSpan) SetName(name string) { + if !s.IsRecording() { + return + } + + s.mu.Lock() + defer s.mu.Unlock() + s.name = name +} + +// Name returns the name of this span. +func (s *recordingSpan) Name() string { + s.mu.Lock() + defer s.mu.Unlock() + return s.name +} + +// Name returns the SpanContext of this span's parent span. +func (s *recordingSpan) Parent() trace.SpanContext { + s.mu.Lock() + defer s.mu.Unlock() + return s.parent +} + +// SpanKind returns the SpanKind of this span. +func (s *recordingSpan) SpanKind() trace.SpanKind { + s.mu.Lock() + defer s.mu.Unlock() + return s.spanKind +} + +// StartTime returns the time this span started. +func (s *recordingSpan) StartTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.startTime +} + +// EndTime returns the time this span ended. For spans that have not yet +// ended, the returned value will be the zero value of time.Time. +func (s *recordingSpan) EndTime() time.Time { + s.mu.Lock() + defer s.mu.Unlock() + return s.endTime +} + +// Attributes returns the attributes of this span. +// +// The order of the returned attributes is not guaranteed to be stable. +func (s *recordingSpan) Attributes() []attribute.KeyValue { + s.mu.Lock() + defer s.mu.Unlock() + s.dedupeAttrs() + return s.attributes +} + +// dedupeAttrs deduplicates the attributes of s to fit capacity. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrs() { + // Do not set a capacity when creating this map. Benchmark testing has + // showed this to only add unused memory allocations in general use. + exists := make(map[attribute.Key]int) + s.dedupeAttrsFromRecord(&exists) +} + +// dedupeAttrsFromRecord deduplicates the attributes of s to fit capacity +// using record as the record of unique attribute keys to their index. +// +// This method assumes s.mu.Lock is held by the caller. +func (s *recordingSpan) dedupeAttrsFromRecord(record *map[attribute.Key]int) { + // Use the fact that slices share the same backing array. + unique := s.attributes[:0] + for _, a := range s.attributes { + if idx, ok := (*record)[a.Key]; ok { + unique[idx] = a + } else { + unique = append(unique, a) + (*record)[a.Key] = len(unique) - 1 + } + } + // s.attributes have element types of attribute.KeyValue. These types are + // not pointers and they themselves do not contain pointer fields, + // therefore the duplicate values do not need to be zeroed for them to be + // garbage collected. + s.attributes = unique +} + +// Links returns the links of this span. +func (s *recordingSpan) Links() []Link { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.links.queue) == 0 { + return []Link{} + } + return s.interfaceArrayToLinksArray() +} + +// Events returns the events of this span. +func (s *recordingSpan) Events() []Event { + s.mu.Lock() + defer s.mu.Unlock() + if len(s.events.queue) == 0 { + return []Event{} + } + return s.interfaceArrayToEventArray() +} + +// Status returns the status of this span. +func (s *recordingSpan) Status() Status { + s.mu.Lock() + defer s.mu.Unlock() + return s.status +} + +// InstrumentationLibrary returns the instrumentation.Library associated with +// the Tracer that created this span. +func (s *recordingSpan) InstrumentationLibrary() instrumentation.Library { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.instrumentationLibrary +} + +// Resource returns the Resource associated with the Tracer that created this +// span. +func (s *recordingSpan) Resource() *resource.Resource { + s.mu.Lock() + defer s.mu.Unlock() + return s.tracer.provider.resource +} + +func (s *recordingSpan) addLink(link trace.Link) { + if !s.IsRecording() || !link.SpanContext.IsValid() { + return + } + s.mu.Lock() + defer s.mu.Unlock() + + var droppedAttributeCount int + + // Discard over limited attributes + if len(link.Attributes) > s.tracer.provider.spanLimits.AttributePerLinkCountLimit { + droppedAttributeCount = len(link.Attributes) - s.tracer.provider.spanLimits.AttributePerLinkCountLimit + link.Attributes = link.Attributes[:s.tracer.provider.spanLimits.AttributePerLinkCountLimit] + } + + s.links.add(Link{link.SpanContext, link.Attributes, droppedAttributeCount}) +} + +// DroppedAttributes returns the number of attributes dropped by the span +// due to limits being reached. +func (s *recordingSpan) DroppedAttributes() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.droppedAttributes +} + +// DroppedLinks returns the number of links dropped by the span due to limits +// being reached. +func (s *recordingSpan) DroppedLinks() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.links.droppedCount +} + +// DroppedEvents returns the number of events dropped by the span due to +// limits being reached. +func (s *recordingSpan) DroppedEvents() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.events.droppedCount +} + +// ChildSpanCount returns the count of spans that consider the span a +// direct parent. +func (s *recordingSpan) ChildSpanCount() int { + s.mu.Lock() + defer s.mu.Unlock() + return s.childSpanCount +} + +// TracerProvider returns a trace.TracerProvider that can be used to generate +// additional Spans on the same telemetry pipeline as the current Span. +func (s *recordingSpan) TracerProvider() trace.TracerProvider { + return s.tracer.provider +} + +// snapshot creates a read-only copy of the current state of the span. +func (s *recordingSpan) snapshot() ReadOnlySpan { + var sd snapshot + s.mu.Lock() + defer s.mu.Unlock() + + sd.endTime = s.endTime + sd.instrumentationLibrary = s.tracer.instrumentationLibrary + sd.name = s.name + sd.parent = s.parent + sd.resource = s.tracer.provider.resource + sd.spanContext = s.spanContext + sd.spanKind = s.spanKind + sd.startTime = s.startTime + sd.status = s.status + sd.childSpanCount = s.childSpanCount + + if len(s.attributes) > 0 { + s.dedupeAttrs() + sd.attributes = s.attributes + } + sd.droppedAttributeCount = s.droppedAttributes + if len(s.events.queue) > 0 { + sd.events = s.interfaceArrayToEventArray() + sd.droppedEventCount = s.events.droppedCount + } + if len(s.links.queue) > 0 { + sd.links = s.interfaceArrayToLinksArray() + sd.droppedLinkCount = s.links.droppedCount + } + return &sd +} + +func (s *recordingSpan) interfaceArrayToLinksArray() []Link { + linkArr := make([]Link, 0) + for _, value := range s.links.queue { + linkArr = append(linkArr, value.(Link)) + } + return linkArr +} + +func (s *recordingSpan) interfaceArrayToEventArray() []Event { + eventArr := make([]Event, 0) + for _, value := range s.events.queue { + eventArr = append(eventArr, value.(Event)) + } + return eventArr +} + +func (s *recordingSpan) addChild() { + if !s.IsRecording() { + return + } + s.mu.Lock() + s.childSpanCount++ + s.mu.Unlock() +} + +func (*recordingSpan) private() {} + +// runtimeTrace starts a "runtime/trace".Task for the span and returns a +// context containing the task. +func (s *recordingSpan) runtimeTrace(ctx context.Context) context.Context { + if !rt.IsEnabled() { + // Avoid additional overhead if runtime/trace is not enabled. + return ctx + } + nctx, task := rt.NewTask(ctx, s.name) + + s.mu.Lock() + s.executionTracerTaskEnd = task.End + s.mu.Unlock() + + return nctx +} + +// nonRecordingSpan is a minimal implementation of the OpenTelemetry Span API +// that wraps a SpanContext. It performs no operations other than to return +// the wrapped SpanContext or TracerProvider that created it. +type nonRecordingSpan struct { + // tracer is the SDK tracer that created this span. + tracer *tracer + sc trace.SpanContext +} + +var _ trace.Span = nonRecordingSpan{} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() trace.SpanContext { return s.sc } + +// IsRecording always returns false. +func (nonRecordingSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (nonRecordingSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (nonRecordingSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (nonRecordingSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (nonRecordingSpan) End(...trace.SpanEndOption) {} + +// RecordError does nothing. +func (nonRecordingSpan) RecordError(error, ...trace.EventOption) {} + +// AddEvent does nothing. +func (nonRecordingSpan) AddEvent(string, ...trace.EventOption) {} + +// SetName does nothing. +func (nonRecordingSpan) SetName(string) {} + +// TracerProvider returns the trace.TracerProvider that provided the Tracer +// that created this span. +func (s nonRecordingSpan) TracerProvider() trace.TracerProvider { return s.tracer.provider } + +func isRecording(s SamplingResult) bool { + return s.Decision == RecordOnly || s.Decision == RecordAndSample +} + +func isSampled(s SamplingResult) bool { + return s.Decision == RecordAndSample +} + +// Status is the classified state of a Span. +type Status struct { + // Code is an identifier of a Spans state classification. + Code codes.Code + // Description is a user hint about why that status was set. It is only + // applicable when Code is Error. + Description string +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go new file mode 100644 index 0000000000..9fb3d6eac3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_exporter.go @@ -0,0 +1,47 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import "context" + +// SpanExporter handles the delivery of spans to external receivers. This is +// the final component in the trace export pipeline. +type SpanExporter interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ExportSpans exports a batch of spans. + // + // This function is called synchronously, so there is no concurrency + // safety requirement. However, due to the synchronous calling pattern, + // it is critical that all timeouts and cancellations contained in the + // passed context must be honored. + // + // Any retry logic must be contained in this function. The SDK that + // calls this function will not implement any retry logic. All errors + // returned by this function are considered unrecoverable and will be + // reported to a configured error Handler. + ExportSpans(ctx context.Context, spans []ReadOnlySpan) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown notifies the exporter of a pending halt to operations. The + // exporter is expected to preform any cleanup or synchronization it + // requires while honoring all timeouts and cancellations contained in + // the passed context. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go new file mode 100644 index 0000000000..b649a2ff04 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/span_processor.go @@ -0,0 +1,67 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "sync" +) + +// SpanProcessor is a processing pipeline for spans in the trace signal. +// SpanProcessors registered with a TracerProvider and are called at the start +// and end of a Span's lifecycle, and are called in the order they are +// registered. +type SpanProcessor interface { + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnStart is called when a span is started. It is called synchronously + // and should not block. + OnStart(parent context.Context, s ReadWriteSpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // OnEnd is called when span is finished. It is called synchronously and + // hence not block. + OnEnd(s ReadOnlySpan) + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // Shutdown is called when the SDK shuts down. Any cleanup or release of + // resources held by the processor should be done in this call. + // + // Calls to OnStart, OnEnd, or ForceFlush after this has been called + // should be ignored. + // + // All timeouts and cancellations contained in ctx must be honored, this + // should not block indefinitely. + Shutdown(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. + + // ForceFlush exports all ended spans to the configured Exporter that have not yet + // been exported. It should only be called when absolutely necessary, such as when + // using a FaaS provider that may suspend the process after an invocation, but before + // the Processor can export the completed spans. + ForceFlush(ctx context.Context) error + // DO NOT CHANGE: any modification will not be backwards compatible and + // must never be done outside of a new major release. +} + +type spanProcessorState struct { + sp SpanProcessor + state *sync.Once +} +type spanProcessorStates []*spanProcessorState diff --git a/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go new file mode 100644 index 0000000000..5b8ab43be3 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/sdk/trace/tracer.go @@ -0,0 +1,156 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/sdk/trace" + +import ( + "context" + "time" + + "go.opentelemetry.io/otel/sdk/instrumentation" + "go.opentelemetry.io/otel/trace" +) + +type tracer struct { + provider *TracerProvider + instrumentationLibrary instrumentation.Library +} + +var _ trace.Tracer = &tracer{} + +// Start starts a Span and returns it along with a context containing it. +// +// The Span is created with the provided name and as a child of any existing +// span context found in the passed context. The created Span will be +// configured appropriately by any SpanOption passed. +func (tr *tracer) Start(ctx context.Context, name string, options ...trace.SpanStartOption) (context.Context, trace.Span) { + config := trace.NewSpanStartConfig(options...) + + // For local spans created by this SDK, track child span count. + if p := trace.SpanFromContext(ctx); p != nil { + if sdkSpan, ok := p.(*recordingSpan); ok { + sdkSpan.addChild() + } + } + + s := tr.newSpan(ctx, name, &config) + if rw, ok := s.(ReadWriteSpan); ok && s.IsRecording() { + sps, _ := tr.provider.spanProcessors.Load().(spanProcessorStates) + for _, sp := range sps { + sp.sp.OnStart(ctx, rw) + } + } + if rtt, ok := s.(runtimeTracer); ok { + ctx = rtt.runtimeTrace(ctx) + } + + return trace.ContextWithSpan(ctx, s), s +} + +type runtimeTracer interface { + // runtimeTrace starts a "runtime/trace".Task for the span and + // returns a context containing the task. + runtimeTrace(ctx context.Context) context.Context +} + +// newSpan returns a new configured span. +func (tr *tracer) newSpan(ctx context.Context, name string, config *trace.SpanConfig) trace.Span { + // If told explicitly to make this a new root use a zero value SpanContext + // as a parent which contains an invalid trace ID and is not remote. + var psc trace.SpanContext + if config.NewRoot() { + ctx = trace.ContextWithSpanContext(ctx, psc) + } else { + psc = trace.SpanContextFromContext(ctx) + } + + // If there is a valid parent trace ID, use it to ensure the continuity of + // the trace. Always generate a new span ID so other components can rely + // on a unique span ID, even if the Span is non-recording. + var tid trace.TraceID + var sid trace.SpanID + if !psc.TraceID().IsValid() { + tid, sid = tr.provider.idGenerator.NewIDs(ctx) + } else { + tid = psc.TraceID() + sid = tr.provider.idGenerator.NewSpanID(ctx, tid) + } + + samplingResult := tr.provider.sampler.ShouldSample(SamplingParameters{ + ParentContext: ctx, + TraceID: tid, + Name: name, + Kind: config.SpanKind(), + Attributes: config.Attributes(), + Links: config.Links(), + }) + + scc := trace.SpanContextConfig{ + TraceID: tid, + SpanID: sid, + TraceState: samplingResult.Tracestate, + } + if isSampled(samplingResult) { + scc.TraceFlags = psc.TraceFlags() | trace.FlagsSampled + } else { + scc.TraceFlags = psc.TraceFlags() &^ trace.FlagsSampled + } + sc := trace.NewSpanContext(scc) + + if !isRecording(samplingResult) { + return tr.newNonRecordingSpan(sc) + } + return tr.newRecordingSpan(psc, sc, name, samplingResult, config) +} + +// newRecordingSpan returns a new configured recordingSpan. +func (tr *tracer) newRecordingSpan(psc, sc trace.SpanContext, name string, sr SamplingResult, config *trace.SpanConfig) *recordingSpan { + startTime := config.Timestamp() + if startTime.IsZero() { + startTime = time.Now() + } + + s := &recordingSpan{ + // Do not pre-allocate the attributes slice here! Doing so will + // allocate memory that is likely never going to be used, or if used, + // will be over-sized. The default Go compiler has been tested to + // dynamically allocate needed space very well. Benchmarking has shown + // it to be more performant than what we can predetermine here, + // especially for the common use case of few to no added + // attributes. + + parent: psc, + spanContext: sc, + spanKind: trace.ValidateSpanKind(config.SpanKind()), + name: name, + startTime: startTime, + events: newEvictedQueue(tr.provider.spanLimits.EventCountLimit), + links: newEvictedQueue(tr.provider.spanLimits.LinkCountLimit), + tracer: tr, + } + + for _, l := range config.Links() { + s.addLink(l) + } + + s.SetAttributes(sr.Attributes...) + s.SetAttributes(config.Attributes()...) + + return s +} + +// newNonRecordingSpan returns a new configured nonRecordingSpan. +func (tr *tracer) newNonRecordingSpan(sc trace.SpanContext) nonRecordingSpan { + return nonRecordingSpan{tracer: tr, sc: sc} +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go new file mode 100644 index 0000000000..ba878d1cf6 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/doc.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package semconv implements OpenTelemetry semantic conventions. +// +// OpenTelemetry semantic conventions are agreed standardized naming +// patterns for OpenTelemetry things. This package represents the conventions +// as of the v1.7.0 version of the OpenTelemetry specification. +package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go new file mode 100644 index 0000000000..ea37068627 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/exception.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" + +const ( + // ExceptionEventName is the name of the Span event representing an exception. + ExceptionEventName = "exception" +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go new file mode 100644 index 0000000000..9b430fac0c --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/http.go @@ -0,0 +1,312 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" + +import ( + "fmt" + "net" + "net/http" + "strconv" + "strings" + + "go.opentelemetry.io/otel/trace" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// HTTP scheme attributes. +var ( + HTTPSchemeHTTP = HTTPSchemeKey.String("http") + HTTPSchemeHTTPS = HTTPSchemeKey.String("https") +) + +// NetAttributesFromHTTPRequest generates attributes of the net +// namespace as specified by the OpenTelemetry specification for a +// span. The network parameter is a string that net.Dial function +// from standard library can understand. +func NetAttributesFromHTTPRequest(network string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + switch network { + case "tcp", "tcp4", "tcp6": + attrs = append(attrs, NetTransportTCP) + case "udp", "udp4", "udp6": + attrs = append(attrs, NetTransportUDP) + case "ip", "ip4", "ip6": + attrs = append(attrs, NetTransportIP) + case "unix", "unixgram", "unixpacket": + attrs = append(attrs, NetTransportUnix) + default: + attrs = append(attrs, NetTransportOther) + } + + peerIP, peerName, peerPort := hostIPNamePort(request.RemoteAddr) + if peerIP != "" { + attrs = append(attrs, NetPeerIPKey.String(peerIP)) + } + if peerName != "" { + attrs = append(attrs, NetPeerNameKey.String(peerName)) + } + if peerPort != 0 { + attrs = append(attrs, NetPeerPortKey.Int(peerPort)) + } + + hostIP, hostName, hostPort := "", "", 0 + for _, someHost := range []string{request.Host, request.Header.Get("Host"), request.URL.Host} { + hostIP, hostName, hostPort = hostIPNamePort(someHost) + if hostIP != "" || hostName != "" || hostPort != 0 { + break + } + } + if hostIP != "" { + attrs = append(attrs, NetHostIPKey.String(hostIP)) + } + if hostName != "" { + attrs = append(attrs, NetHostNameKey.String(hostName)) + } + if hostPort != 0 { + attrs = append(attrs, NetHostPortKey.Int(hostPort)) + } + + return attrs +} + +// hostIPNamePort extracts the IP address, name and (optional) port from hostWithPort. +// It handles both IPv4 and IPv6 addresses. If the host portion is not recognized +// as a valid IPv4 or IPv6 address, the `ip` result will be empty and the +// host portion will instead be returned in `name`. +func hostIPNamePort(hostWithPort string) (ip string, name string, port int) { + var ( + hostPart, portPart string + parsedPort uint64 + err error + ) + if hostPart, portPart, err = net.SplitHostPort(hostWithPort); err != nil { + hostPart, portPart = hostWithPort, "" + } + if parsedIP := net.ParseIP(hostPart); parsedIP != nil { + ip = parsedIP.String() + } else { + name = hostPart + } + if parsedPort, err = strconv.ParseUint(portPart, 10, 16); err == nil { + port = int(parsedPort) + } + return +} + +// EndUserAttributesFromHTTPRequest generates attributes of the +// enduser namespace as specified by the OpenTelemetry specification +// for a span. +func EndUserAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + if username, _, ok := request.BasicAuth(); ok { + return []attribute.KeyValue{EnduserIDKey.String(username)} + } + return nil +} + +// HTTPClientAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the client side. +func HTTPClientAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + + if request.Method != "" { + attrs = append(attrs, HTTPMethodKey.String(request.Method)) + } else { + attrs = append(attrs, HTTPMethodKey.String(http.MethodGet)) + } + + // remove any username/password info that may be in the URL + // before adding it to the attributes + userinfo := request.URL.User + request.URL.User = nil + + attrs = append(attrs, HTTPURLKey.String(request.URL.String())) + + // restore any username/password info that was removed + request.URL.User = userinfo + + return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) +} + +func httpCommonAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if ua := request.UserAgent(); ua != "" { + attrs = append(attrs, HTTPUserAgentKey.String(ua)) + } + if request.ContentLength > 0 { + attrs = append(attrs, HTTPRequestContentLengthKey.Int64(request.ContentLength)) + } + + return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) +} + +func httpBasicAttributesFromHTTPRequest(request *http.Request) []attribute.KeyValue { + // as these attributes are used by HTTPServerMetricAttributesFromHTTPRequest, they should be low-cardinality + attrs := []attribute.KeyValue{} + + if request.TLS != nil { + attrs = append(attrs, HTTPSchemeHTTPS) + } else { + attrs = append(attrs, HTTPSchemeHTTP) + } + + if request.Host != "" { + attrs = append(attrs, HTTPHostKey.String(request.Host)) + } + + flavor := "" + if request.ProtoMajor == 1 { + flavor = fmt.Sprintf("1.%d", request.ProtoMinor) + } else if request.ProtoMajor == 2 { + flavor = "2" + } + if flavor != "" { + attrs = append(attrs, HTTPFlavorKey.String(flavor)) + } + + return attrs +} + +// HTTPServerMetricAttributesFromHTTPRequest generates low-cardinality attributes +// to be used with server-side HTTP metrics. +func HTTPServerMetricAttributesFromHTTPRequest(serverName string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{} + if serverName != "" { + attrs = append(attrs, HTTPServerNameKey.String(serverName)) + } + return append(attrs, httpBasicAttributesFromHTTPRequest(request)...) +} + +// HTTPServerAttributesFromHTTPRequest generates attributes of the +// http namespace as specified by the OpenTelemetry specification for +// a span on the server side. Currently, only basic authentication is +// supported. +func HTTPServerAttributesFromHTTPRequest(serverName, route string, request *http.Request) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + HTTPMethodKey.String(request.Method), + HTTPTargetKey.String(request.RequestURI), + } + + if serverName != "" { + attrs = append(attrs, HTTPServerNameKey.String(serverName)) + } + if route != "" { + attrs = append(attrs, HTTPRouteKey.String(route)) + } + if values, ok := request.Header["X-Forwarded-For"]; ok && len(values) > 0 { + if addresses := strings.SplitN(values[0], ",", 2); len(addresses) > 0 { + attrs = append(attrs, HTTPClientIPKey.String(addresses[0])) + } + } + + return append(attrs, httpCommonAttributesFromHTTPRequest(request)...) +} + +// HTTPAttributesFromHTTPStatusCode generates attributes of the http +// namespace as specified by the OpenTelemetry specification for a +// span. +func HTTPAttributesFromHTTPStatusCode(code int) []attribute.KeyValue { + attrs := []attribute.KeyValue{ + HTTPStatusCodeKey.Int(code), + } + return attrs +} + +type codeRange struct { + fromInclusive int + toInclusive int +} + +func (r codeRange) contains(code int) bool { + return r.fromInclusive <= code && code <= r.toInclusive +} + +var validRangesPerCategory = map[int][]codeRange{ + 1: { + {http.StatusContinue, http.StatusEarlyHints}, + }, + 2: { + {http.StatusOK, http.StatusAlreadyReported}, + {http.StatusIMUsed, http.StatusIMUsed}, + }, + 3: { + {http.StatusMultipleChoices, http.StatusUseProxy}, + {http.StatusTemporaryRedirect, http.StatusPermanentRedirect}, + }, + 4: { + {http.StatusBadRequest, http.StatusTeapot}, // yes, teapot is so useful… + {http.StatusMisdirectedRequest, http.StatusUpgradeRequired}, + {http.StatusPreconditionRequired, http.StatusTooManyRequests}, + {http.StatusRequestHeaderFieldsTooLarge, http.StatusRequestHeaderFieldsTooLarge}, + {http.StatusUnavailableForLegalReasons, http.StatusUnavailableForLegalReasons}, + }, + 5: { + {http.StatusInternalServerError, http.StatusLoopDetected}, + {http.StatusNotExtended, http.StatusNetworkAuthenticationRequired}, + }, +} + +// SpanStatusFromHTTPStatusCode generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +func SpanStatusFromHTTPStatusCode(code int) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + return spanCode, "" +} + +// SpanStatusFromHTTPStatusCodeAndSpanKind generates a status code and a message +// as specified by the OpenTelemetry specification for a span. +// Exclude 4xx for SERVER to set the appropriate status. +func SpanStatusFromHTTPStatusCodeAndSpanKind(code int, spanKind trace.SpanKind) (codes.Code, string) { + spanCode, valid := validateHTTPStatusCode(code) + if !valid { + return spanCode, fmt.Sprintf("Invalid HTTP status code %d", code) + } + category := code / 100 + if spanKind == trace.SpanKindServer && category == 4 { + return codes.Unset, "" + } + return spanCode, "" +} + +// Validates the HTTP status code and returns corresponding span status code. +// If the `code` is not a valid HTTP status code, returns span status Error +// and false. +func validateHTTPStatusCode(code int) (codes.Code, bool) { + category := code / 100 + ranges, ok := validRangesPerCategory[category] + if !ok { + return codes.Error, false + } + ok = false + for _, crange := range ranges { + ok = crange.contains(code) + if ok { + break + } + } + if !ok { + return codes.Error, false + } + if category > 0 && category < 4 { + return codes.Unset, true + } + return codes.Error, true +} diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go new file mode 100644 index 0000000000..aab6daf3c5 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/resource.go @@ -0,0 +1,946 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" + +import "go.opentelemetry.io/otel/attribute" + +// A cloud environment (e.g. GCP, Azure, AWS) +const ( + // Name of the cloud provider. + // + // Type: Enum + // Required: No + // Stability: stable + CloudProviderKey = attribute.Key("cloud.provider") + // The cloud account ID the resource is assigned to. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '111111111111', 'opentelemetry' + CloudAccountIDKey = attribute.Key("cloud.account.id") + // The geographical region the resource is running. Refer to your provider's docs + // to see the available regions, for example [Alibaba Cloud + // regions](https://www.alibabacloud.com/help/doc-detail/40654.htm), [AWS + // regions](https://aws.amazon.com/about-aws/global-infrastructure/regions_az/), + // [Azure regions](https://azure.microsoft.com/en-us/global- + // infrastructure/geographies/), or [Google Cloud + // regions](https://cloud.google.com/about/locations). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-central1', 'us-east-1' + CloudRegionKey = attribute.Key("cloud.region") + // Cloud regions often have multiple, isolated locations known as zones to + // increase availability. Availability zone represents the zone where the resource + // is running. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-east-1c' + // Note: Availability zones are called "zones" on Alibaba Cloud and Google Cloud. + CloudAvailabilityZoneKey = attribute.Key("cloud.availability_zone") + // The cloud platform in use. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: The prefix of the service SHOULD match the one specified in + // `cloud.provider`. + CloudPlatformKey = attribute.Key("cloud.platform") +) + +var ( + // Alibaba Cloud + CloudProviderAlibabaCloud = CloudProviderKey.String("alibaba_cloud") + // Amazon Web Services + CloudProviderAWS = CloudProviderKey.String("aws") + // Microsoft Azure + CloudProviderAzure = CloudProviderKey.String("azure") + // Google Cloud Platform + CloudProviderGCP = CloudProviderKey.String("gcp") +) + +var ( + // Alibaba Cloud Elastic Compute Service + CloudPlatformAlibabaCloudECS = CloudPlatformKey.String("alibaba_cloud_ecs") + // Alibaba Cloud Function Compute + CloudPlatformAlibabaCloudFc = CloudPlatformKey.String("alibaba_cloud_fc") + // AWS Elastic Compute Cloud + CloudPlatformAWSEC2 = CloudPlatformKey.String("aws_ec2") + // AWS Elastic Container Service + CloudPlatformAWSECS = CloudPlatformKey.String("aws_ecs") + // AWS Elastic Kubernetes Service + CloudPlatformAWSEKS = CloudPlatformKey.String("aws_eks") + // AWS Lambda + CloudPlatformAWSLambda = CloudPlatformKey.String("aws_lambda") + // AWS Elastic Beanstalk + CloudPlatformAWSElasticBeanstalk = CloudPlatformKey.String("aws_elastic_beanstalk") + // Azure Virtual Machines + CloudPlatformAzureVM = CloudPlatformKey.String("azure_vm") + // Azure Container Instances + CloudPlatformAzureContainerInstances = CloudPlatformKey.String("azure_container_instances") + // Azure Kubernetes Service + CloudPlatformAzureAKS = CloudPlatformKey.String("azure_aks") + // Azure Functions + CloudPlatformAzureFunctions = CloudPlatformKey.String("azure_functions") + // Azure App Service + CloudPlatformAzureAppService = CloudPlatformKey.String("azure_app_service") + // Google Cloud Compute Engine (GCE) + CloudPlatformGCPComputeEngine = CloudPlatformKey.String("gcp_compute_engine") + // Google Cloud Run + CloudPlatformGCPCloudRun = CloudPlatformKey.String("gcp_cloud_run") + // Google Cloud Kubernetes Engine (GKE) + CloudPlatformGCPKubernetesEngine = CloudPlatformKey.String("gcp_kubernetes_engine") + // Google Cloud Functions (GCF) + CloudPlatformGCPCloudFunctions = CloudPlatformKey.String("gcp_cloud_functions") + // Google Cloud App Engine (GAE) + CloudPlatformGCPAppEngine = CloudPlatformKey.String("gcp_app_engine") +) + +// Resources used by AWS Elastic Container Service (ECS). +const ( + // The Amazon Resource Name (ARN) of an [ECS container instance](https://docs.aws. + // amazon.com/AmazonECS/latest/developerguide/ECS_instances.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:container/32624152-9086-4f0e-acae-1a75b14fe4d9' + AWSECSContainerARNKey = attribute.Key("aws.ecs.container.arn") + // The ARN of an [ECS cluster](https://docs.aws.amazon.com/AmazonECS/latest/develo + // perguide/clusters.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSECSClusterARNKey = attribute.Key("aws.ecs.cluster.arn") + // The [launch type](https://docs.aws.amazon.com/AmazonECS/latest/developerguide/l + // aunch_types.html) for an ECS task. + // + // Type: Enum + // Required: No + // Stability: stable + AWSECSLaunchtypeKey = attribute.Key("aws.ecs.launchtype") + // The ARN of an [ECS task definition](https://docs.aws.amazon.com/AmazonECS/lates + // t/developerguide/task_definitions.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us- + // west-1:123456789123:task/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSECSTaskARNKey = attribute.Key("aws.ecs.task.arn") + // The task definition family this task definition is a member of. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-family' + AWSECSTaskFamilyKey = attribute.Key("aws.ecs.task.family") + // The revision for this task definition. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '8', '26' + AWSECSTaskRevisionKey = attribute.Key("aws.ecs.task.revision") +) + +var ( + // ec2 + AWSECSLaunchtypeEC2 = AWSECSLaunchtypeKey.String("ec2") + // fargate + AWSECSLaunchtypeFargate = AWSECSLaunchtypeKey.String("fargate") +) + +// Resources used by AWS Elastic Kubernetes Service (EKS). +const ( + // The ARN of an EKS cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:ecs:us-west-2:123456789123:cluster/my-cluster' + AWSEKSClusterARNKey = attribute.Key("aws.eks.cluster.arn") +) + +// Resources specific to Amazon Web Services. +const ( + // The name(s) of the AWS log group(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '/aws/lambda/my-function', 'opentelemetry-service' + // Note: Multiple log groups must be supported for cases like multi-container + // applications, where a single application has sidecar containers, and each write + // to their own log group. + AWSLogGroupNamesKey = attribute.Key("aws.log.group.names") + // The Amazon Resource Name(s) (ARN) of the AWS log group(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:*' + // Note: See the [log group ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). + AWSLogGroupARNsKey = attribute.Key("aws.log.group.arns") + // The name(s) of the AWS log stream(s) an application is writing to. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + AWSLogStreamNamesKey = attribute.Key("aws.log.stream.names") + // The ARN(s) of the AWS log stream(s). + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'arn:aws:logs:us-west-1:123456789012:log-group:/aws/my/group:log- + // stream:logs/main/10838bed-421f-43ef-870a-f43feacbbb5b' + // Note: See the [log stream ARN format + // documentation](https://docs.aws.amazon.com/AmazonCloudWatch/latest/logs/iam- + // access-control-overview-cwl.html#CWL_ARN_Format). One log group can contain + // several log streams, so these ARNs necessarily identify both a log group and a + // log stream. + AWSLogStreamARNsKey = attribute.Key("aws.log.stream.arns") +) + +// A container instance. +const ( + // Container name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-autoconf' + ContainerNameKey = attribute.Key("container.name") + // Container ID. Usually a UUID, as for example used to [identify Docker + // containers](https://docs.docker.com/engine/reference/run/#container- + // identification). The UUID might be abbreviated. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'a3bf90e006b2' + ContainerIDKey = attribute.Key("container.id") + // The container runtime managing this container. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'docker', 'containerd', 'rkt' + ContainerRuntimeKey = attribute.Key("container.runtime") + // Name of the image the container was built on. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'gcr.io/opentelemetry/operator' + ContainerImageNameKey = attribute.Key("container.image.name") + // Container image tag. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + ContainerImageTagKey = attribute.Key("container.image.tag") +) + +// The software deployment. +const ( + // Name of the [deployment + // environment](https://en.wikipedia.org/wiki/Deployment_environment) (aka + // deployment tier). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'staging', 'production' + DeploymentEnvironmentKey = attribute.Key("deployment.environment") +) + +// The device on which the process represented by this resource is running. +const ( + // A unique identifier representing the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2ab2916d-a51f-4ac8-80ee-45ac31a28092' + // Note: The device identifier MUST only be defined using the values outlined + // below. This value is not an advertising identifier and MUST NOT be used as + // such. On iOS (Swift or Objective-C), this value MUST be equal to the [vendor id + // entifier](https://developer.apple.com/documentation/uikit/uidevice/1620059-iden + // tifierforvendor). On Android (Java or Kotlin), this value MUST be equal to the + // Firebase Installation ID or a globally unique UUID which is persisted across + // sessions in your application. More information can be found + // [here](https://developer.android.com/training/articles/user-data-ids) on best + // practices and exact implementation details. Caution should be taken when + // storing personal data or anything which can identify a user. GDPR and data + // protection laws may apply, ensure you do your own due diligence. + DeviceIDKey = attribute.Key("device.id") + // The model identifier for the device + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone3,4', 'SM-G920F' + // Note: It's recommended this value represents a machine readable version of the + // model identifier rather than the market or consumer-friendly name of the + // device. + DeviceModelIdentifierKey = attribute.Key("device.model.identifier") + // The marketing name for the device model + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iPhone 6s Plus', 'Samsung Galaxy S6' + // Note: It's recommended this value represents a human readable version of the + // device model rather than a machine readable alternative. + DeviceModelNameKey = attribute.Key("device.model.name") +) + +// A serverless instance. +const ( + // The name of the single function that this runtime instance executes. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: This is the name of the function as configured/deployed on the FaaS + // platform and is usually different from the name of the callback function (which + // may be stored in the + // [`code.namespace`/`code.function`](../../trace/semantic_conventions/span- + // general.md#source-code-attributes) span attributes). + FaaSNameKey = attribute.Key("faas.name") + // The unique ID of the single function that this runtime instance executes. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-west-2:123456789012:function:my-function' + // Note: Depending on the cloud provider, use: + + // * **AWS Lambda:** The function + // [ARN](https://docs.aws.amazon.com/general/latest/gr/aws-arns-and- + // namespaces.html). + // Take care not to use the "invoked ARN" directly but replace any + // [alias suffix](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // aliases.html) with the resolved function version, as the same runtime instance + // may be invokable with multiple + // different aliases. + // * **GCP:** The [URI of the resource](https://cloud.google.com/iam/docs/full- + // resource-names) + // * **Azure:** The [Fully Qualified Resource ID](https://docs.microsoft.com/en- + // us/rest/api/resources/resources/get-by-id). + + // On some providers, it may not be possible to determine the full ID at startup, + // which is why this field cannot be made required. For example, on AWS the + // account ID + // part of the ARN is not available without calling another AWS API + // which may be deemed too slow for a short-running lambda function. + // As an alternative, consider setting `faas.id` as a span attribute instead. + FaaSIDKey = attribute.Key("faas.id") + // The immutable version of the function being executed. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '26', 'pinkfroid-00002' + // Note: Depending on the cloud provider and platform, use: + + // * **AWS Lambda:** The [function + // version](https://docs.aws.amazon.com/lambda/latest/dg/configuration- + // versions.html) + // (an integer represented as a decimal string). + // * **Google Cloud Run:** The + // [revision](https://cloud.google.com/run/docs/managing/revisions) + // (i.e., the function name plus the revision suffix). + // * **Google Cloud Functions:** The value of the + // [`K_REVISION` environment + // variable](https://cloud.google.com/functions/docs/env- + // var#runtime_environment_variables_set_automatically). + // * **Azure Functions:** Not applicable. Do not set this attribute. + FaaSVersionKey = attribute.Key("faas.version") + // The execution environment ID as a string, that will be potentially reused for + // other invocations to the same function/function version. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2021/06/28/[$LATEST]2f399eb14537447da05ab2a2e39309de' + // Note: * **AWS Lambda:** Use the (full) log stream name. + FaaSInstanceKey = attribute.Key("faas.instance") + // The amount of memory available to the serverless function in MiB. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 128 + // Note: It's recommended to set this attribute since e.g. too little memory can + // easily stop a Java AWS Lambda function from working correctly. On AWS Lambda, + // the environment variable `AWS_LAMBDA_FUNCTION_MEMORY_SIZE` provides this + // information. + FaaSMaxMemoryKey = attribute.Key("faas.max_memory") +) + +// A host is defined as a general computing instance. +const ( + // Unique host ID. For Cloud, this must be the instance_id assigned by the cloud + // provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostIDKey = attribute.Key("host.id") + // Name of the host. On Unix systems, it may contain what the hostname command + // returns, or the fully qualified hostname, or another name specified by the + // user. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-test' + HostNameKey = attribute.Key("host.name") + // Type of host. For Cloud, this must be the machine type. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'n1-standard-1' + HostTypeKey = attribute.Key("host.type") + // The CPU architecture the host system is running on. + // + // Type: Enum + // Required: No + // Stability: stable + HostArchKey = attribute.Key("host.arch") + // Name of the VM image or OS install the host was instantiated from. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'infra-ami-eks-worker-node-7d4ec78312', 'CentOS-8-x86_64-1905' + HostImageNameKey = attribute.Key("host.image.name") + // VM image ID. For Cloud, this value is from the provider. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ami-07b06b442921831e5' + HostImageIDKey = attribute.Key("host.image.id") + // The version string of the VM image as defined in [Version + // Attributes](README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.1' + HostImageVersionKey = attribute.Key("host.image.version") +) + +var ( + // AMD64 + HostArchAMD64 = HostArchKey.String("amd64") + // ARM32 + HostArchARM32 = HostArchKey.String("arm32") + // ARM64 + HostArchARM64 = HostArchKey.String("arm64") + // Itanium + HostArchIA64 = HostArchKey.String("ia64") + // 32-bit PowerPC + HostArchPPC32 = HostArchKey.String("ppc32") + // 64-bit PowerPC + HostArchPPC64 = HostArchKey.String("ppc64") + // 32-bit x86 + HostArchX86 = HostArchKey.String("x86") +) + +// A Kubernetes Cluster. +const ( + // The name of the cluster. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-cluster' + K8SClusterNameKey = attribute.Key("k8s.cluster.name") +) + +// A Kubernetes Node object. +const ( + // The name of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'node-1' + K8SNodeNameKey = attribute.Key("k8s.node.name") + // The UID of the Node. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1eb3a0c6-0477-4080-a9cb-0cb7db65c6a2' + K8SNodeUIDKey = attribute.Key("k8s.node.uid") +) + +// A Kubernetes Namespace. +const ( + // The name of the namespace that the pod is running in. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'default' + K8SNamespaceNameKey = attribute.Key("k8s.namespace.name") +) + +// A Kubernetes Pod object. +const ( + // The UID of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SPodUIDKey = attribute.Key("k8s.pod.uid") + // The name of the Pod. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry-pod-autoconf' + K8SPodNameKey = attribute.Key("k8s.pod.name") +) + +// A container in a [PodTemplate](https://kubernetes.io/docs/concepts/workloads/pods/#pod-templates). +const ( + // The name of the Container in a Pod template. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'redis' + K8SContainerNameKey = attribute.Key("k8s.container.name") +) + +// A Kubernetes ReplicaSet object. +const ( + // The UID of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SReplicaSetUIDKey = attribute.Key("k8s.replicaset.uid") + // The name of the ReplicaSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SReplicaSetNameKey = attribute.Key("k8s.replicaset.name") +) + +// A Kubernetes Deployment object. +const ( + // The UID of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDeploymentUIDKey = attribute.Key("k8s.deployment.uid") + // The name of the Deployment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDeploymentNameKey = attribute.Key("k8s.deployment.name") +) + +// A Kubernetes StatefulSet object. +const ( + // The UID of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SStatefulSetUIDKey = attribute.Key("k8s.statefulset.uid") + // The name of the StatefulSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SStatefulSetNameKey = attribute.Key("k8s.statefulset.name") +) + +// A Kubernetes DaemonSet object. +const ( + // The UID of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SDaemonSetUIDKey = attribute.Key("k8s.daemonset.uid") + // The name of the DaemonSet. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SDaemonSetNameKey = attribute.Key("k8s.daemonset.name") +) + +// A Kubernetes Job object. +const ( + // The UID of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SJobUIDKey = attribute.Key("k8s.job.uid") + // The name of the Job. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SJobNameKey = attribute.Key("k8s.job.name") +) + +// A Kubernetes CronJob object. +const ( + // The UID of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '275ecb36-5aa8-4c2a-9c47-d8bb681b9aff' + K8SCronJobUIDKey = attribute.Key("k8s.cronjob.uid") + // The name of the CronJob. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + K8SCronJobNameKey = attribute.Key("k8s.cronjob.name") +) + +// The operating system (OS) on which the process represented by this resource is running. +const ( + // The operating system type. + // + // Type: Enum + // Required: Always + // Stability: stable + OSTypeKey = attribute.Key("os.type") + // Human readable (not intended to be parsed) OS version information, like e.g. + // reported by `ver` or `lsb_release -a` commands. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Microsoft Windows [Version 10.0.18363.778]', 'Ubuntu 18.04.1 LTS' + OSDescriptionKey = attribute.Key("os.description") + // Human readable operating system name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'iOS', 'Android', 'Ubuntu' + OSNameKey = attribute.Key("os.name") + // The version string of the operating system as defined in [Version + // Attributes](../../resource/semantic_conventions/README.md#version-attributes). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.2.1', '18.04.1' + OSVersionKey = attribute.Key("os.version") +) + +var ( + // Microsoft Windows + OSTypeWindows = OSTypeKey.String("windows") + // Linux + OSTypeLinux = OSTypeKey.String("linux") + // Apple Darwin + OSTypeDarwin = OSTypeKey.String("darwin") + // FreeBSD + OSTypeFreeBSD = OSTypeKey.String("freebsd") + // NetBSD + OSTypeNetBSD = OSTypeKey.String("netbsd") + // OpenBSD + OSTypeOpenBSD = OSTypeKey.String("openbsd") + // DragonFly BSD + OSTypeDragonflyBSD = OSTypeKey.String("dragonflybsd") + // HP-UX (Hewlett Packard Unix) + OSTypeHPUX = OSTypeKey.String("hpux") + // AIX (Advanced Interactive eXecutive) + OSTypeAIX = OSTypeKey.String("aix") + // Oracle Solaris + OSTypeSolaris = OSTypeKey.String("solaris") + // IBM z/OS + OSTypeZOS = OSTypeKey.String("z_os") +) + +// An operating system process. +const ( + // Process identifier (PID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 1234 + ProcessPIDKey = attribute.Key("process.pid") + // The name of the process executable. On Linux based systems, can be set to the + // `Name` in `proc/[pid]/status`. On Windows, can be set to the base name of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'otelcol' + ProcessExecutableNameKey = attribute.Key("process.executable.name") + // The full path to the process executable. On Linux based systems, can be set to + // the target of `proc/[pid]/exe`. On Windows, can be set to the result of + // `GetProcessImageFileNameW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: '/usr/bin/cmd/otelcol' + ProcessExecutablePathKey = attribute.Key("process.executable.path") + // The command used to launch the process (i.e. the command name). On Linux based + // systems, can be set to the zeroth string in `proc/[pid]/cmdline`. On Windows, + // can be set to the first parameter extracted from `GetCommandLineW`. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'cmd/otelcol' + ProcessCommandKey = attribute.Key("process.command") + // The full command used to launch the process as a single string representing the + // full command. On Windows, can be set to the result of `GetCommandLineW`. Do not + // set this if you have to assemble it just for monitoring; use + // `process.command_args` instead. + // + // Type: string + // Required: See below + // Stability: stable + // Examples: 'C:\\cmd\\otecol --config="my directory\\config.yaml"' + ProcessCommandLineKey = attribute.Key("process.command_line") + // All the command arguments (including the command/executable itself) as received + // by the process. On Linux-based systems (and some other Unixoid systems + // supporting procfs), can be set according to the list of null-delimited strings + // extracted from `proc/[pid]/cmdline`. For libc-based executables, this would be + // the full argv vector passed to `main`. + // + // Type: string[] + // Required: See below + // Stability: stable + // Examples: 'cmd/otecol', '--config=config.yaml' + ProcessCommandArgsKey = attribute.Key("process.command_args") + // The username of the user that owns the process. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'root' + ProcessOwnerKey = attribute.Key("process.owner") +) + +// The single (language) runtime instance which is monitored. +const ( + // The name of the runtime of this process. For compiled native binaries, this + // SHOULD be the name of the compiler. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'OpenJDK Runtime Environment' + ProcessRuntimeNameKey = attribute.Key("process.runtime.name") + // The version of the runtime of this process, as returned by the runtime without + // modification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '14.0.2' + ProcessRuntimeVersionKey = attribute.Key("process.runtime.version") + // An additional description about the runtime of the process, for example a + // specific vendor customization of the runtime environment. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Eclipse OpenJ9 Eclipse OpenJ9 VM openj9-0.21.0' + ProcessRuntimeDescriptionKey = attribute.Key("process.runtime.description") +) + +// A service instance. +const ( + // Logical name of the service. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'shoppingcart' + // Note: MUST be the same for all instances of horizontally scaled services. If + // the value was not specified, SDKs MUST fallback to `unknown_service:` + // concatenated with [`process.executable.name`](process.md#process), e.g. + // `unknown_service:bash`. If `process.executable.name` is not available, the + // value MUST be set to `unknown_service`. + ServiceNameKey = attribute.Key("service.name") + // A namespace for `service.name`. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Shop' + // Note: A string value having a meaning that helps to distinguish a group of + // services, for example the team name that owns a group of services. + // `service.name` is expected to be unique within the same namespace. If + // `service.namespace` is not specified in the Resource then `service.name` is + // expected to be unique for all services that have no explicit namespace defined + // (so the empty/unspecified namespace is simply one more valid namespace). Zero- + // length namespace string is assumed equal to unspecified namespace. + ServiceNamespaceKey = attribute.Key("service.namespace") + // The string ID of the service instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '627cc493-f310-47de-96bd-71410b7dec09' + // Note: MUST be unique for each instance of the same + // `service.namespace,service.name` pair (in other words + // `service.namespace,service.name,service.instance.id` triplet MUST be globally + // unique). The ID helps to distinguish instances of the same service that exist + // at the same time (e.g. instances of a horizontally scaled service). It is + // preferable for the ID to be persistent and stay the same for the lifetime of + // the service instance, however it is acceptable that the ID is ephemeral and + // changes during important lifetime events for the service (e.g. service + // restarts). If the service has no inherent unique ID that can be used as the + // value of this attribute it is recommended to generate a random Version 1 or + // Version 4 RFC 4122 UUID (services aiming for reproducible UUIDs may also use + // Version 5, see RFC 4122 for more recommendations). + ServiceInstanceIDKey = attribute.Key("service.instance.id") + // The version string of the service API or implementation. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '2.0.0' + ServiceVersionKey = attribute.Key("service.version") +) + +// The telemetry SDK used to capture data recorded by the instrumentation libraries. +const ( + // The name of the telemetry SDK as defined above. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'opentelemetry' + TelemetrySDKNameKey = attribute.Key("telemetry.sdk.name") + // The language of the telemetry SDK. + // + // Type: Enum + // Required: No + // Stability: stable + TelemetrySDKLanguageKey = attribute.Key("telemetry.sdk.language") + // The version string of the telemetry SDK. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetrySDKVersionKey = attribute.Key("telemetry.sdk.version") + // The version string of the auto instrumentation agent, if used. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '1.2.3' + TelemetryAutoVersionKey = attribute.Key("telemetry.auto.version") +) + +var ( + // cpp + TelemetrySDKLanguageCPP = TelemetrySDKLanguageKey.String("cpp") + // dotnet + TelemetrySDKLanguageDotnet = TelemetrySDKLanguageKey.String("dotnet") + // erlang + TelemetrySDKLanguageErlang = TelemetrySDKLanguageKey.String("erlang") + // go + TelemetrySDKLanguageGo = TelemetrySDKLanguageKey.String("go") + // java + TelemetrySDKLanguageJava = TelemetrySDKLanguageKey.String("java") + // nodejs + TelemetrySDKLanguageNodejs = TelemetrySDKLanguageKey.String("nodejs") + // php + TelemetrySDKLanguagePHP = TelemetrySDKLanguageKey.String("php") + // python + TelemetrySDKLanguagePython = TelemetrySDKLanguageKey.String("python") + // ruby + TelemetrySDKLanguageRuby = TelemetrySDKLanguageKey.String("ruby") + // webjs + TelemetrySDKLanguageWebjs = TelemetrySDKLanguageKey.String("webjs") +) + +// Resource describing the packaged software running the application code. Web engines are typically executed using process.runtime. +const ( + // The name of the web engine. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'WildFly' + WebEngineNameKey = attribute.Key("webengine.name") + // The version of the web engine. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '21.0.0' + WebEngineVersionKey = attribute.Key("webengine.version") + // Additional description of the web engine (e.g. detailed version and edition + // information). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'WildFly Full 21.0.0.Final (WildFly Core 13.0.1.Final) - 2.2.2.Final' + WebEngineDescriptionKey = attribute.Key("webengine.description") +) diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go new file mode 100644 index 0000000000..ec8b463d98 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/schema.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" + +// SchemaURL is the schema URL that matches the version of the semantic conventions +// that this package defines. Semconv packages starting from v1.4.0 must declare +// non-empty schema URL in the form https://opentelemetry.io/schemas/ +const SchemaURL = "https://opentelemetry.io/schemas/v1.7.0" diff --git a/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go new file mode 100644 index 0000000000..9b75bd77ae --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/semconv/v1.7.0/trace.go @@ -0,0 +1,1558 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated from semantic convention specification. DO NOT EDIT. + +package semconv // import "go.opentelemetry.io/otel/semconv/v1.7.0" + +import "go.opentelemetry.io/otel/attribute" + +// Span attributes used by AWS Lambda (in addition to general `faas` attributes). +const ( + // The full invoked ARN as provided on the `Context` passed to the function + // (`Lambda-Runtime-Invoked-Function-ARN` header on the `/runtime/invocation/next` + // applicable). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'arn:aws:lambda:us-east-1:123456:function:myfunction:myalias' + // Note: This may be different from `faas.id` if an alias is involved. + AWSLambdaInvokedARNKey = attribute.Key("aws.lambda.invoked_arn") +) + +// This document defines the attributes used to perform database client calls. +const ( + // An identifier for the database management system (DBMS) product being used. See + // below for a list of well-known identifiers. + // + // Type: Enum + // Required: Always + // Stability: stable + DBSystemKey = attribute.Key("db.system") + // The connection string used to connect to the database. It is recommended to + // remove embedded credentials. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Server=(localdb)\\v11.0;Integrated Security=true;' + DBConnectionStringKey = attribute.Key("db.connection_string") + // Username for accessing the database. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'readonly_user', 'reporting_user' + DBUserKey = attribute.Key("db.user") + // The fully-qualified class name of the [Java Database Connectivity + // (JDBC)](https://docs.oracle.com/javase/8/docs/technotes/guides/jdbc/) driver + // used to connect. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'org.postgresql.Driver', + // 'com.microsoft.sqlserver.jdbc.SQLServerDriver' + DBJDBCDriverClassnameKey = attribute.Key("db.jdbc.driver_classname") + // If no [tech-specific attribute](#call-level-attributes-for-specific- + // technologies) is defined, this attribute is used to report the name of the + // database being accessed. For commands that switch the database, this should be + // set to the target database (even if the command fails). + // + // Type: string + // Required: Required, if applicable and no more-specific attribute is defined. + // Stability: stable + // Examples: 'customers', 'main' + // Note: In some SQL databases, the database name to be used is called "schema + // name". + DBNameKey = attribute.Key("db.name") + // The database statement being executed. + // + // Type: string + // Required: Required if applicable and not explicitly disabled via + // instrumentation configuration. + // Stability: stable + // Examples: 'SELECT * FROM wuser_table', 'SET mykey "WuValue"' + // Note: The value may be sanitized to exclude sensitive information. + DBStatementKey = attribute.Key("db.statement") + // The name of the operation being executed, e.g. the [MongoDB command + // name](https://docs.mongodb.com/manual/reference/command/#database-operations) + // such as `findAndModify`, or the SQL keyword. + // + // Type: string + // Required: Required, if `db.statement` is not applicable. + // Stability: stable + // Examples: 'findAndModify', 'HMSET', 'SELECT' + // Note: When setting this to an SQL keyword, it is not recommended to attempt any + // client-side parsing of `db.statement` just to get this property, but it should + // be set if the operation name is provided by the library being instrumented. If + // the SQL statement has an ambiguous operation, or performs more than one + // operation, this value may be omitted. + DBOperationKey = attribute.Key("db.operation") +) + +var ( + // Some other SQL database. Fallback only. See notes + DBSystemOtherSQL = DBSystemKey.String("other_sql") + // Microsoft SQL Server + DBSystemMSSQL = DBSystemKey.String("mssql") + // MySQL + DBSystemMySQL = DBSystemKey.String("mysql") + // Oracle Database + DBSystemOracle = DBSystemKey.String("oracle") + // IBM DB2 + DBSystemDB2 = DBSystemKey.String("db2") + // PostgreSQL + DBSystemPostgreSQL = DBSystemKey.String("postgresql") + // Amazon Redshift + DBSystemRedshift = DBSystemKey.String("redshift") + // Apache Hive + DBSystemHive = DBSystemKey.String("hive") + // Cloudscape + DBSystemCloudscape = DBSystemKey.String("cloudscape") + // HyperSQL DataBase + DBSystemHSQLDB = DBSystemKey.String("hsqldb") + // Progress Database + DBSystemProgress = DBSystemKey.String("progress") + // SAP MaxDB + DBSystemMaxDB = DBSystemKey.String("maxdb") + // SAP HANA + DBSystemHanaDB = DBSystemKey.String("hanadb") + // Ingres + DBSystemIngres = DBSystemKey.String("ingres") + // FirstSQL + DBSystemFirstSQL = DBSystemKey.String("firstsql") + // EnterpriseDB + DBSystemEDB = DBSystemKey.String("edb") + // InterSystems Caché + DBSystemCache = DBSystemKey.String("cache") + // Adabas (Adaptable Database System) + DBSystemAdabas = DBSystemKey.String("adabas") + // Firebird + DBSystemFirebird = DBSystemKey.String("firebird") + // Apache Derby + DBSystemDerby = DBSystemKey.String("derby") + // FileMaker + DBSystemFilemaker = DBSystemKey.String("filemaker") + // Informix + DBSystemInformix = DBSystemKey.String("informix") + // InstantDB + DBSystemInstantDB = DBSystemKey.String("instantdb") + // InterBase + DBSystemInterbase = DBSystemKey.String("interbase") + // MariaDB + DBSystemMariaDB = DBSystemKey.String("mariadb") + // Netezza + DBSystemNetezza = DBSystemKey.String("netezza") + // Pervasive PSQL + DBSystemPervasive = DBSystemKey.String("pervasive") + // PointBase + DBSystemPointbase = DBSystemKey.String("pointbase") + // SQLite + DBSystemSqlite = DBSystemKey.String("sqlite") + // Sybase + DBSystemSybase = DBSystemKey.String("sybase") + // Teradata + DBSystemTeradata = DBSystemKey.String("teradata") + // Vertica + DBSystemVertica = DBSystemKey.String("vertica") + // H2 + DBSystemH2 = DBSystemKey.String("h2") + // ColdFusion IMQ + DBSystemColdfusion = DBSystemKey.String("coldfusion") + // Apache Cassandra + DBSystemCassandra = DBSystemKey.String("cassandra") + // Apache HBase + DBSystemHBase = DBSystemKey.String("hbase") + // MongoDB + DBSystemMongoDB = DBSystemKey.String("mongodb") + // Redis + DBSystemRedis = DBSystemKey.String("redis") + // Couchbase + DBSystemCouchbase = DBSystemKey.String("couchbase") + // CouchDB + DBSystemCouchDB = DBSystemKey.String("couchdb") + // Microsoft Azure Cosmos DB + DBSystemCosmosDB = DBSystemKey.String("cosmosdb") + // Amazon DynamoDB + DBSystemDynamoDB = DBSystemKey.String("dynamodb") + // Neo4j + DBSystemNeo4j = DBSystemKey.String("neo4j") + // Apache Geode + DBSystemGeode = DBSystemKey.String("geode") + // Elasticsearch + DBSystemElasticsearch = DBSystemKey.String("elasticsearch") + // Memcached + DBSystemMemcached = DBSystemKey.String("memcached") + // CockroachDB + DBSystemCockroachdb = DBSystemKey.String("cockroachdb") +) + +// Connection-level attributes for Microsoft SQL Server +const ( + // The Microsoft SQL Server [instance name](https://docs.microsoft.com/en- + // us/sql/connect/jdbc/building-the-connection-url?view=sql-server-ver15) + // connecting to. This name is used to determine the port of a named instance. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MSSQLSERVER' + // Note: If setting a `db.mssql.instance_name`, `net.peer.port` is no longer + // required (but still recommended if non-standard). + DBMSSQLInstanceNameKey = attribute.Key("db.mssql.instance_name") +) + +// Call-level attributes for Cassandra +const ( + // The name of the keyspace being accessed. To be used instead of the generic + // `db.name` attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'mykeyspace' + DBCassandraKeyspaceKey = attribute.Key("db.cassandra.keyspace") + // The fetch size used for paging, i.e. how many rows will be returned at once. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5000 + DBCassandraPageSizeKey = attribute.Key("db.cassandra.page_size") + // The consistency level of the query. Based on consistency values from + // [CQL](https://docs.datastax.com/en/cassandra- + // oss/3.0/cassandra/dml/dmlConfigConsistency.html). + // + // Type: Enum + // Required: No + // Stability: stable + DBCassandraConsistencyLevelKey = attribute.Key("db.cassandra.consistency_level") + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'mytable' + // Note: This mirrors the db.sql.table attribute but references cassandra rather + // than sql. It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBCassandraTableKey = attribute.Key("db.cassandra.table") + // Whether or not the query is idempotent. + // + // Type: boolean + // Required: No + // Stability: stable + DBCassandraIdempotenceKey = attribute.Key("db.cassandra.idempotence") + // The number of times a query was speculatively executed. Not set or `0` if the + // query was not executed speculatively. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 0, 2 + DBCassandraSpeculativeExecutionCountKey = attribute.Key("db.cassandra.speculative_execution_count") + // The ID of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'be13faa2-8574-4d71-926d-27f16cf8a7af' + DBCassandraCoordinatorIDKey = attribute.Key("db.cassandra.coordinator.id") + // The data center of the coordinating node for a query. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'us-west-2' + DBCassandraCoordinatorDCKey = attribute.Key("db.cassandra.coordinator.dc") +) + +var ( + // all + DBCassandraConsistencyLevelAll = DBCassandraConsistencyLevelKey.String("all") + // each_quorum + DBCassandraConsistencyLevelEachQuorum = DBCassandraConsistencyLevelKey.String("each_quorum") + // quorum + DBCassandraConsistencyLevelQuorum = DBCassandraConsistencyLevelKey.String("quorum") + // local_quorum + DBCassandraConsistencyLevelLocalQuorum = DBCassandraConsistencyLevelKey.String("local_quorum") + // one + DBCassandraConsistencyLevelOne = DBCassandraConsistencyLevelKey.String("one") + // two + DBCassandraConsistencyLevelTwo = DBCassandraConsistencyLevelKey.String("two") + // three + DBCassandraConsistencyLevelThree = DBCassandraConsistencyLevelKey.String("three") + // local_one + DBCassandraConsistencyLevelLocalOne = DBCassandraConsistencyLevelKey.String("local_one") + // any + DBCassandraConsistencyLevelAny = DBCassandraConsistencyLevelKey.String("any") + // serial + DBCassandraConsistencyLevelSerial = DBCassandraConsistencyLevelKey.String("serial") + // local_serial + DBCassandraConsistencyLevelLocalSerial = DBCassandraConsistencyLevelKey.String("local_serial") +) + +// Call-level attributes for Apache HBase +const ( + // The [HBase namespace](https://hbase.apache.org/book.html#_namespace) being + // accessed. To be used instead of the generic `db.name` attribute. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'default' + DBHBaseNamespaceKey = attribute.Key("db.hbase.namespace") +) + +// Call-level attributes for Redis +const ( + // The index of the database being accessed as used in the [`SELECT` + // command](https://redis.io/commands/select), provided as an integer. To be used + // instead of the generic `db.name` attribute. + // + // Type: int + // Required: Required, if other than the default database (`0`). + // Stability: stable + // Examples: 0, 1, 15 + DBRedisDBIndexKey = attribute.Key("db.redis.database_index") +) + +// Call-level attributes for MongoDB +const ( + // The collection being accessed within the database stated in `db.name`. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'customers', 'products' + DBMongoDBCollectionKey = attribute.Key("db.mongodb.collection") +) + +// Call-level attrbiutes for SQL databases +const ( + // The name of the primary table that the operation is acting upon, including the + // schema name (if applicable). + // + // Type: string + // Required: Recommended if available. + // Stability: stable + // Examples: 'public.users', 'customers' + // Note: It is not recommended to attempt any client-side parsing of + // `db.statement` just to get this property, but it should be set if it is + // provided by the library being instrumented. If the operation is acting upon an + // anonymous table, or more than one table, this value MUST NOT be set. + DBSQLTableKey = attribute.Key("db.sql.table") +) + +// This document defines the attributes used to report a single exception associated with a span. +const ( + // The type of the exception (its fully-qualified class name, if applicable). The + // dynamic type of the exception should be preferred over the static type in + // languages that support it. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'java.net.ConnectException', 'OSError' + ExceptionTypeKey = attribute.Key("exception.type") + // The exception message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Division by zero', "Can't convert 'int' object to str implicitly" + ExceptionMessageKey = attribute.Key("exception.message") + // A stacktrace as a string in the natural representation for the language + // runtime. The representation is to be determined and documented by each language + // SIG. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Exception in thread "main" java.lang.RuntimeException: Test + // exception\\n at ' + // 'com.example.GenerateTrace.methodB(GenerateTrace.java:13)\\n at ' + // 'com.example.GenerateTrace.methodA(GenerateTrace.java:9)\\n at ' + // 'com.example.GenerateTrace.main(GenerateTrace.java:5)' + ExceptionStacktraceKey = attribute.Key("exception.stacktrace") + // SHOULD be set to true if the exception event is recorded at a point where it is + // known that the exception is escaping the scope of the span. + // + // Type: boolean + // Required: No + // Stability: stable + // Note: An exception is considered to have escaped (or left) the scope of a span, + // if that span is ended while the exception is still logically "in flight". + // This may be actually "in flight" in some languages (e.g. if the exception + // is passed to a Context manager's `__exit__` method in Python) but will + // usually be caught at the point of recording the exception in most languages. + + // It is usually not possible to determine at the point where an exception is + // thrown + // whether it will escape the scope of a span. + // However, it is trivial to know that an exception + // will escape, if one checks for an active exception just before ending the span, + // as done in the [example above](#exception-end-example). + + // It follows that an exception may still escape the scope of the span + // even if the `exception.escaped` attribute was not set or set to false, + // since the event might have been recorded at a time where it was not + // clear whether the exception will escape. + ExceptionEscapedKey = attribute.Key("exception.escaped") +) + +// This semantic convention describes an instance of a function that runs without provisioning or managing of servers (also known as serverless functions or Function as a Service (FaaS)) with spans. +const ( + // Type of the trigger on which the function is executed. + // + // Type: Enum + // Required: On FaaS instances, faas.trigger MUST be set on incoming invocations. + // Clients invoking FaaS instances MUST set `faas.trigger` on outgoing + // invocations, if it is known to the client. This is, for example, not the case, + // when the transport layer is abstracted in a FaaS client framework without + // access to its configuration. + // Stability: stable + FaaSTriggerKey = attribute.Key("faas.trigger") + // The execution ID of the current function execution. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'af9d5aa4-a685-4c5f-a22b-444f80b3cc28' + FaaSExecutionKey = attribute.Key("faas.execution") +) + +var ( + // A response to some data source operation such as a database or filesystem read/write + FaaSTriggerDatasource = FaaSTriggerKey.String("datasource") + // To provide an answer to an inbound HTTP request + FaaSTriggerHTTP = FaaSTriggerKey.String("http") + // A function is set to be executed when messages are sent to a messaging system + FaaSTriggerPubsub = FaaSTriggerKey.String("pubsub") + // A function is scheduled to be executed regularly + FaaSTriggerTimer = FaaSTriggerKey.String("timer") + // If none of the others apply + FaaSTriggerOther = FaaSTriggerKey.String("other") +) + +// Semantic Convention for FaaS triggered as a response to some data source operation such as a database or filesystem read/write. +const ( + // The name of the source on which the triggering operation was performed. For + // example, in Cloud Storage or S3 corresponds to the bucket name, and in Cosmos + // DB to the database name. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'myBucketName', 'myDBName' + FaaSDocumentCollectionKey = attribute.Key("faas.document.collection") + // Describes the type of the operation that was performed on the data. + // + // Type: Enum + // Required: Always + // Stability: stable + FaaSDocumentOperationKey = attribute.Key("faas.document.operation") + // A string containing the time when the data was accessed in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSDocumentTimeKey = attribute.Key("faas.document.time") + // The document name/table subjected to the operation. For example, in Cloud + // Storage or S3 is the name of the file, and in Cosmos DB the table name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myFile.txt', 'myTableName' + FaaSDocumentNameKey = attribute.Key("faas.document.name") +) + +var ( + // When a new object is created + FaaSDocumentOperationInsert = FaaSDocumentOperationKey.String("insert") + // When an object is modified + FaaSDocumentOperationEdit = FaaSDocumentOperationKey.String("edit") + // When an object is deleted + FaaSDocumentOperationDelete = FaaSDocumentOperationKey.String("delete") +) + +// Semantic Convention for FaaS scheduled to be executed regularly. +const ( + // A string containing the function invocation time in the [ISO + // 8601](https://www.iso.org/iso-8601-date-and-time-format.html) format expressed + // in [UTC](https://www.w3.org/TR/NOTE-datetime). + // + // Type: string + // Required: Always + // Stability: stable + // Examples: '2020-01-23T13:47:06Z' + FaaSTimeKey = attribute.Key("faas.time") + // A string containing the schedule period as [Cron Expression](https://docs.oracl + // e.com/cd/E12058_01/doc/doc.1014/e12030/cron_expressions.htm). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0/5 * * * ? *' + FaaSCronKey = attribute.Key("faas.cron") +) + +// Contains additional attributes for incoming FaaS spans. +const ( + // A boolean that is true if the serverless function is executed for the first + // time (aka cold-start). + // + // Type: boolean + // Required: No + // Stability: stable + FaaSColdstartKey = attribute.Key("faas.coldstart") +) + +// Contains additional attributes for outgoing FaaS spans. +const ( + // The name of the invoked function. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'my-function' + // Note: SHOULD be equal to the `faas.name` resource attribute of the invoked + // function. + FaaSInvokedNameKey = attribute.Key("faas.invoked_name") + // The cloud provider of the invoked function. + // + // Type: Enum + // Required: Always + // Stability: stable + // Note: SHOULD be equal to the `cloud.provider` resource attribute of the invoked + // function. + FaaSInvokedProviderKey = attribute.Key("faas.invoked_provider") + // The cloud region of the invoked function. + // + // Type: string + // Required: For some cloud providers, like AWS or GCP, the region in which a + // function is hosted is essential to uniquely identify the function and also part + // of its endpoint. Since it's part of the endpoint being called, the region is + // always known to clients. In these cases, `faas.invoked_region` MUST be set + // accordingly. If the region is unknown to the client or not required for + // identifying the invoked function, setting `faas.invoked_region` is optional. + // Stability: stable + // Examples: 'eu-central-1' + // Note: SHOULD be equal to the `cloud.region` resource attribute of the invoked + // function. + FaaSInvokedRegionKey = attribute.Key("faas.invoked_region") +) + +var ( + // Alibaba Cloud + FaaSInvokedProviderAlibabaCloud = FaaSInvokedProviderKey.String("alibaba_cloud") + // Amazon Web Services + FaaSInvokedProviderAWS = FaaSInvokedProviderKey.String("aws") + // Microsoft Azure + FaaSInvokedProviderAzure = FaaSInvokedProviderKey.String("azure") + // Google Cloud Platform + FaaSInvokedProviderGCP = FaaSInvokedProviderKey.String("gcp") +) + +// These attributes may be used for any network related operation. +const ( + // Transport protocol used. See note below. + // + // Type: Enum + // Required: No + // Stability: stable + NetTransportKey = attribute.Key("net.transport") + // Remote address of the peer (dotted decimal for IPv4 or + // [RFC5952](https://tools.ietf.org/html/rfc5952) for IPv6) + // + // Type: string + // Required: No + // Stability: stable + // Examples: '127.0.0.1' + NetPeerIPKey = attribute.Key("net.peer.ip") + // Remote port number. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 80, 8080, 443 + NetPeerPortKey = attribute.Key("net.peer.port") + // Remote hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + NetPeerNameKey = attribute.Key("net.peer.name") + // Like `net.peer.ip` but for the host IP. Useful in case of a multi-IP host. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '192.168.0.1' + NetHostIPKey = attribute.Key("net.host.ip") + // Like `net.peer.port` but for the host port. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 35555 + NetHostPortKey = attribute.Key("net.host.port") + // Local hostname or similar, see note below. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'localhost' + NetHostNameKey = attribute.Key("net.host.name") + // The internet connection type currently being used by the host. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'wifi' + NetHostConnectionTypeKey = attribute.Key("net.host.connection.type") + // This describes more details regarding the connection.type. It may be the type + // of cell technology connection, but it could be used for describing details + // about a wifi connection. + // + // Type: Enum + // Required: No + // Stability: stable + // Examples: 'LTE' + NetHostConnectionSubtypeKey = attribute.Key("net.host.connection.subtype") + // The name of the mobile carrier. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'sprint' + NetHostCarrierNameKey = attribute.Key("net.host.carrier.name") + // The mobile carrier country code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '310' + NetHostCarrierMccKey = attribute.Key("net.host.carrier.mcc") + // The mobile carrier network code. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '001' + NetHostCarrierMncKey = attribute.Key("net.host.carrier.mnc") + // The ISO 3166-1 alpha-2 2-character country code associated with the mobile + // carrier network. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'DE' + NetHostCarrierIccKey = attribute.Key("net.host.carrier.icc") +) + +var ( + // ip_tcp + NetTransportTCP = NetTransportKey.String("ip_tcp") + // ip_udp + NetTransportUDP = NetTransportKey.String("ip_udp") + // Another IP-based protocol + NetTransportIP = NetTransportKey.String("ip") + // Unix Domain socket. See below + NetTransportUnix = NetTransportKey.String("unix") + // Named or anonymous pipe. See note below + NetTransportPipe = NetTransportKey.String("pipe") + // In-process communication + NetTransportInProc = NetTransportKey.String("inproc") + // Something else (non IP-based) + NetTransportOther = NetTransportKey.String("other") +) + +var ( + // wifi + NetHostConnectionTypeWifi = NetHostConnectionTypeKey.String("wifi") + // wired + NetHostConnectionTypeWired = NetHostConnectionTypeKey.String("wired") + // cell + NetHostConnectionTypeCell = NetHostConnectionTypeKey.String("cell") + // unavailable + NetHostConnectionTypeUnavailable = NetHostConnectionTypeKey.String("unavailable") + // unknown + NetHostConnectionTypeUnknown = NetHostConnectionTypeKey.String("unknown") +) + +var ( + // GPRS + NetHostConnectionSubtypeGprs = NetHostConnectionSubtypeKey.String("gprs") + // EDGE + NetHostConnectionSubtypeEdge = NetHostConnectionSubtypeKey.String("edge") + // UMTS + NetHostConnectionSubtypeUmts = NetHostConnectionSubtypeKey.String("umts") + // CDMA + NetHostConnectionSubtypeCdma = NetHostConnectionSubtypeKey.String("cdma") + // EVDO Rel. 0 + NetHostConnectionSubtypeEvdo0 = NetHostConnectionSubtypeKey.String("evdo_0") + // EVDO Rev. A + NetHostConnectionSubtypeEvdoA = NetHostConnectionSubtypeKey.String("evdo_a") + // CDMA2000 1XRTT + NetHostConnectionSubtypeCdma20001xrtt = NetHostConnectionSubtypeKey.String("cdma2000_1xrtt") + // HSDPA + NetHostConnectionSubtypeHsdpa = NetHostConnectionSubtypeKey.String("hsdpa") + // HSUPA + NetHostConnectionSubtypeHsupa = NetHostConnectionSubtypeKey.String("hsupa") + // HSPA + NetHostConnectionSubtypeHspa = NetHostConnectionSubtypeKey.String("hspa") + // IDEN + NetHostConnectionSubtypeIden = NetHostConnectionSubtypeKey.String("iden") + // EVDO Rev. B + NetHostConnectionSubtypeEvdoB = NetHostConnectionSubtypeKey.String("evdo_b") + // LTE + NetHostConnectionSubtypeLte = NetHostConnectionSubtypeKey.String("lte") + // EHRPD + NetHostConnectionSubtypeEhrpd = NetHostConnectionSubtypeKey.String("ehrpd") + // HSPAP + NetHostConnectionSubtypeHspap = NetHostConnectionSubtypeKey.String("hspap") + // GSM + NetHostConnectionSubtypeGsm = NetHostConnectionSubtypeKey.String("gsm") + // TD-SCDMA + NetHostConnectionSubtypeTdScdma = NetHostConnectionSubtypeKey.String("td_scdma") + // IWLAN + NetHostConnectionSubtypeIwlan = NetHostConnectionSubtypeKey.String("iwlan") + // 5G NR (New Radio) + NetHostConnectionSubtypeNr = NetHostConnectionSubtypeKey.String("nr") + // 5G NRNSA (New Radio Non-Standalone) + NetHostConnectionSubtypeNrnsa = NetHostConnectionSubtypeKey.String("nrnsa") + // LTE CA + NetHostConnectionSubtypeLteCa = NetHostConnectionSubtypeKey.String("lte_ca") +) + +// Operations that access some remote service. +const ( + // The [`service.name`](../../resource/semantic_conventions/README.md#service) of + // the remote service. SHOULD be equal to the actual `service.name` resource + // attribute of the remote service if any. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AuthTokenCache' + PeerServiceKey = attribute.Key("peer.service") +) + +// These attributes may be used for any operation with an authenticated and/or authorized enduser. +const ( + // Username or client_id extracted from the access token or + // [Authorization](https://tools.ietf.org/html/rfc7235#section-4.2) header in the + // inbound request from outside the system. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'username' + EnduserIDKey = attribute.Key("enduser.id") + // Actual/assumed role the client is making the request under extracted from token + // or application security context. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'admin' + EnduserRoleKey = attribute.Key("enduser.role") + // Scopes or granted authorities the client currently possesses extracted from + // token or application security context. The value would come from the scope + // associated with an [OAuth 2.0 Access + // Token](https://tools.ietf.org/html/rfc6749#section-3.3) or an attribute value + // in a [SAML 2.0 Assertion](http://docs.oasis- + // open.org/security/saml/Post2.0/sstc-saml-tech-overview-2.0.html). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'read:message, write:files' + EnduserScopeKey = attribute.Key("enduser.scope") +) + +// These attributes may be used for any operation to store information about a thread that started a span. +const ( + // Current "managed" thread ID (as opposed to OS thread ID). + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + ThreadIDKey = attribute.Key("thread.id") + // Current thread name. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'main' + ThreadNameKey = attribute.Key("thread.name") +) + +// These attributes allow to report this unit of code and therefore to provide more context about the span. +const ( + // The method or function name, or equivalent (usually rightmost part of the code + // unit's name). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'serveRequest' + CodeFunctionKey = attribute.Key("code.function") + // The "namespace" within which `code.function` is defined. Usually the qualified + // class or module name, such that `code.namespace` + some separator + + // `code.function` form a unique identifier for the code unit. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'com.example.MyHTTPService' + CodeNamespaceKey = attribute.Key("code.namespace") + // The source code file name that identifies the code unit as uniquely as possible + // (preferably an absolute file path). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/usr/local/MyApplication/content_root/app/index.php' + CodeFilepathKey = attribute.Key("code.filepath") + // The line number in `code.filepath` best representing the operation. It SHOULD + // point within the code unit named in `code.function`. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 42 + CodeLineNumberKey = attribute.Key("code.lineno") +) + +// This document defines semantic conventions for HTTP client and server Spans. +const ( + // HTTP request method. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'GET', 'POST', 'HEAD' + HTTPMethodKey = attribute.Key("http.method") + // Full HTTP request URL in the form `scheme://host[:port]/path?query[#fragment]`. + // Usually the fragment is not transmitted over HTTP, but if it is known, it + // should be included nevertheless. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'https://www.foo.bar/search?q=OpenTelemetry#SemConv' + // Note: `http.url` MUST NOT contain credentials passed via URL in form of + // `https://username:password@www.example.com/`. In such case the attribute's + // value should be `https://www.example.com/`. + HTTPURLKey = attribute.Key("http.url") + // The full request target as passed in a HTTP request line or equivalent. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/path/12314/?q=ddds#123' + HTTPTargetKey = attribute.Key("http.target") + // The value of the [HTTP host + // header](https://tools.ietf.org/html/rfc7230#section-5.4). An empty Host header + // should also be reported, see note. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'www.example.org' + // Note: When the header is present but empty the attribute SHOULD be set to the + // empty string. Note that this is a valid situation that is expected in certain + // cases, according the aforementioned [section of RFC + // 7230](https://tools.ietf.org/html/rfc7230#section-5.4). When the header is not + // set the attribute MUST NOT be set. + HTTPHostKey = attribute.Key("http.host") + // The URI scheme identifying the used protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'http', 'https' + HTTPSchemeKey = attribute.Key("http.scheme") + // [HTTP response status code](https://tools.ietf.org/html/rfc7231#section-6). + // + // Type: int + // Required: If and only if one was received/sent. + // Stability: stable + // Examples: 200 + HTTPStatusCodeKey = attribute.Key("http.status_code") + // Kind of HTTP protocol used. + // + // Type: Enum + // Required: No + // Stability: stable + // Note: If `net.transport` is not specified, it can be assumed to be `IP.TCP` + // except if `http.flavor` is `QUIC`, in which case `IP.UDP` is assumed. + HTTPFlavorKey = attribute.Key("http.flavor") + // Value of the [HTTP User- + // Agent](https://tools.ietf.org/html/rfc7231#section-5.5.3) header sent by the + // client. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'CERN-LineMode/2.15 libwww/2.17b3' + HTTPUserAgentKey = attribute.Key("http.user_agent") + // The size of the request payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPRequestContentLengthKey = attribute.Key("http.request_content_length") + // The size of the uncompressed request payload body after transport decoding. Not + // set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPRequestContentLengthUncompressedKey = attribute.Key("http.request_content_length_uncompressed") + // The size of the response payload body in bytes. This is the number of bytes + // transferred excluding headers and is often, but not always, present as the + // [Content-Length](https://tools.ietf.org/html/rfc7230#section-3.3.2) header. For + // requests using transport encoding, this should be the compressed size. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 3495 + HTTPResponseContentLengthKey = attribute.Key("http.response_content_length") + // The size of the uncompressed response payload body after transport decoding. + // Not set if transport encoding not used. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 5493 + HTTPResponseContentLengthUncompressedKey = attribute.Key("http.response_content_length_uncompressed") +) + +var ( + // HTTP 1.0 + HTTPFlavorHTTP10 = HTTPFlavorKey.String("1.0") + // HTTP 1.1 + HTTPFlavorHTTP11 = HTTPFlavorKey.String("1.1") + // HTTP 2 + HTTPFlavorHTTP20 = HTTPFlavorKey.String("2.0") + // SPDY protocol + HTTPFlavorSPDY = HTTPFlavorKey.String("SPDY") + // QUIC protocol + HTTPFlavorQUIC = HTTPFlavorKey.String("QUIC") +) + +// Semantic Convention for HTTP Server +const ( + // The primary server name of the matched virtual host. This should be obtained + // via configuration. If no such configuration can be obtained, this attribute + // MUST NOT be set ( `net.host.name` should be used instead). + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'example.com' + // Note: `http.url` is usually not readily available on the server side but would + // have to be assembled in a cumbersome and sometimes lossy process from other + // information (see e.g. open-telemetry/opentelemetry-python/pull/148). It is thus + // preferred to supply the raw data that is available. + HTTPServerNameKey = attribute.Key("http.server_name") + // The matched route (path template). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '/users/:userID?' + HTTPRouteKey = attribute.Key("http.route") + // The IP address of the original client behind all proxies, if known (e.g. from + // [X-Forwarded-For](https://developer.mozilla.org/en- + // US/docs/Web/HTTP/Headers/X-Forwarded-For)). + // + // Type: string + // Required: No + // Stability: stable + // Examples: '83.164.160.102' + // Note: This is not necessarily the same as `net.peer.ip`, which would + // identify the network-level peer, which may be a proxy. + + // This attribute should be set when a source of information different + // from the one used for `net.peer.ip`, is available even if that other + // source just confirms the same value as `net.peer.ip`. + // Rationale: For `net.peer.ip`, one typically does not know if it + // comes from a proxy, reverse proxy, or the actual client. Setting + // `http.client_ip` when it's the same as `net.peer.ip` means that + // one is at least somewhat confident that the address is not that of + // the closest proxy. + HTTPClientIPKey = attribute.Key("http.client_ip") +) + +// Attributes that exist for multiple DynamoDB request types. +const ( + // The keys in the `RequestItems` object field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'Users', 'Cats' + AWSDynamoDBTableNamesKey = attribute.Key("aws.dynamodb.table_names") + // The JSON-serialized value of each item in the `ConsumedCapacity` response + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "CapacityUnits": number, "GlobalSecondaryIndexes": { "string" : { + // "CapacityUnits": number, "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }, "LocalSecondaryIndexes": { "string" : { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number } }, + // "ReadCapacityUnits": number, "Table": { "CapacityUnits": number, + // "ReadCapacityUnits": number, "WriteCapacityUnits": number }, "TableName": + // "string", "WriteCapacityUnits": number }' + AWSDynamoDBConsumedCapacityKey = attribute.Key("aws.dynamodb.consumed_capacity") + // The JSON-serialized value of the `ItemCollectionMetrics` response field. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '{ "string" : [ { "ItemCollectionKey": { "string" : { "B": blob, + // "BOOL": boolean, "BS": [ blob ], "L": [ "AttributeValue" ], "M": { "string" : + // "AttributeValue" }, "N": "string", "NS": [ "string" ], "NULL": boolean, "S": + // "string", "SS": [ "string" ] } }, "SizeEstimateRangeGB": [ number ] } ] }' + AWSDynamoDBItemCollectionMetricsKey = attribute.Key("aws.dynamodb.item_collection_metrics") + // The value of the `ProvisionedThroughput.ReadCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedReadCapacityKey = attribute.Key("aws.dynamodb.provisioned_read_capacity") + // The value of the `ProvisionedThroughput.WriteCapacityUnits` request parameter. + // + // Type: double + // Required: No + // Stability: stable + // Examples: 1.0, 2.0 + AWSDynamoDBProvisionedWriteCapacityKey = attribute.Key("aws.dynamodb.provisioned_write_capacity") + // The value of the `ConsistentRead` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBConsistentReadKey = attribute.Key("aws.dynamodb.consistent_read") + // The value of the `ProjectionExpression` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Title', 'Title, Price, Color', 'Title, Description, RelatedItems, + // ProductReviews' + AWSDynamoDBProjectionKey = attribute.Key("aws.dynamodb.projection") + // The value of the `Limit` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBLimitKey = attribute.Key("aws.dynamodb.limit") + // The value of the `AttributesToGet` request parameter. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: 'lives', 'id' + AWSDynamoDBAttributesToGetKey = attribute.Key("aws.dynamodb.attributes_to_get") + // The value of the `IndexName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'name_to_group' + AWSDynamoDBIndexNameKey = attribute.Key("aws.dynamodb.index_name") + // The value of the `Select` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'ALL_ATTRIBUTES', 'COUNT' + AWSDynamoDBSelectKey = attribute.Key("aws.dynamodb.select") +) + +// DynamoDB.CreateTable +const ( + // The JSON-serialized value of each item of the `GlobalSecondaryIndexes` request + // field + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexName": "string", "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" }, "ProvisionedThroughput": { "ReadCapacityUnits": + // number, "WriteCapacityUnits": number } }' + AWSDynamoDBGlobalSecondaryIndexesKey = attribute.Key("aws.dynamodb.global_secondary_indexes") + // The JSON-serialized value of each item of the `LocalSecondaryIndexes` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "IndexARN": "string", "IndexName": "string", "IndexSizeBytes": + // number, "ItemCount": number, "KeySchema": [ { "AttributeName": "string", + // "KeyType": "string" } ], "Projection": { "NonKeyAttributes": [ "string" ], + // "ProjectionType": "string" } }' + AWSDynamoDBLocalSecondaryIndexesKey = attribute.Key("aws.dynamodb.local_secondary_indexes") +) + +// DynamoDB.ListTables +const ( + // The value of the `ExclusiveStartTableName` request parameter. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Users', 'CatsTable' + AWSDynamoDBExclusiveStartTableKey = attribute.Key("aws.dynamodb.exclusive_start_table") + // The the number of items in the `TableNames` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 20 + AWSDynamoDBTableCountKey = attribute.Key("aws.dynamodb.table_count") +) + +// DynamoDB.Query +const ( + // The value of the `ScanIndexForward` request parameter. + // + // Type: boolean + // Required: No + // Stability: stable + AWSDynamoDBScanForwardKey = attribute.Key("aws.dynamodb.scan_forward") +) + +// DynamoDB.Scan +const ( + // The value of the `Segment` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBSegmentKey = attribute.Key("aws.dynamodb.segment") + // The value of the `TotalSegments` request parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 100 + AWSDynamoDBTotalSegmentsKey = attribute.Key("aws.dynamodb.total_segments") + // The value of the `Count` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 10 + AWSDynamoDBCountKey = attribute.Key("aws.dynamodb.count") + // The value of the `ScannedCount` response parameter. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 50 + AWSDynamoDBScannedCountKey = attribute.Key("aws.dynamodb.scanned_count") +) + +// DynamoDB.UpdateTable +const ( + // The JSON-serialized value of each item in the `AttributeDefinitions` request + // field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "AttributeName": "string", "AttributeType": "string" }' + AWSDynamoDBAttributeDefinitionsKey = attribute.Key("aws.dynamodb.attribute_definitions") + // The JSON-serialized value of each item in the the `GlobalSecondaryIndexUpdates` + // request field. + // + // Type: string[] + // Required: No + // Stability: stable + // Examples: '{ "Create": { "IndexName": "string", "KeySchema": [ { + // "AttributeName": "string", "KeyType": "string" } ], "Projection": { + // "NonKeyAttributes": [ "string" ], "ProjectionType": "string" }, + // "ProvisionedThroughput": { "ReadCapacityUnits": number, "WriteCapacityUnits": + // number } }' + AWSDynamoDBGlobalSecondaryIndexUpdatesKey = attribute.Key("aws.dynamodb.global_secondary_index_updates") +) + +// This document defines the attributes used in messaging systems. +const ( + // A string identifying the messaging system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'kafka', 'rabbitmq', 'activemq', 'AmazonSQS' + MessagingSystemKey = attribute.Key("messaging.system") + // The message destination name. This might be equal to the span name but is + // required nevertheless. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'MyQueue', 'MyTopic' + MessagingDestinationKey = attribute.Key("messaging.destination") + // The kind of message destination + // + // Type: Enum + // Required: Required only if the message destination is either a `queue` or + // `topic`. + // Stability: stable + MessagingDestinationKindKey = attribute.Key("messaging.destination_kind") + // A boolean that is true if the message destination is temporary. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingTempDestinationKey = attribute.Key("messaging.temp_destination") + // The name of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'AMQP', 'MQTT' + MessagingProtocolKey = attribute.Key("messaging.protocol") + // The version of the transport protocol. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '0.9.1' + MessagingProtocolVersionKey = attribute.Key("messaging.protocol_version") + // Connection string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'tibjmsnaming://localhost:7222', + // 'https://queue.amazonaws.com/80398EXAMPLE/MyQueue' + MessagingURLKey = attribute.Key("messaging.url") + // A value used by the messaging system as an identifier for the message, + // represented as a string. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '452a7c7c7c7048c2f887f61572b18fc2' + MessagingMessageIDKey = attribute.Key("messaging.message_id") + // The [conversation ID](#conversations) identifying the conversation to which the + // message belongs, represented as a string. Sometimes called "Correlation ID". + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'MyConversationID' + MessagingConversationIDKey = attribute.Key("messaging.conversation_id") + // The (uncompressed) size of the message payload in bytes. Also use this + // attribute if it is unknown whether the compressed or uncompressed payload size + // is reported. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2738 + MessagingMessagePayloadSizeBytesKey = attribute.Key("messaging.message_payload_size_bytes") + // The compressed size of the message payload in bytes. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2048 + MessagingMessagePayloadCompressedSizeBytesKey = attribute.Key("messaging.message_payload_compressed_size_bytes") +) + +var ( + // A message sent to a queue + MessagingDestinationKindQueue = MessagingDestinationKindKey.String("queue") + // A message sent to a topic + MessagingDestinationKindTopic = MessagingDestinationKindKey.String("topic") +) + +// Semantic convention for a consumer of messages received from a messaging system +const ( + // A string identifying the kind of message consumption as defined in the + // [Operation names](#operation-names) section above. If the operation is "send", + // this attribute MUST NOT be set, since the operation can be inferred from the + // span kind in that case. + // + // Type: Enum + // Required: No + // Stability: stable + MessagingOperationKey = attribute.Key("messaging.operation") + // The identifier for the consumer receiving a message. For Kafka, set it to + // `{messaging.kafka.consumer_group} - {messaging.kafka.client_id}`, if both are + // present, or only `messaging.kafka.consumer_group`. For brokers, such as + // RabbitMQ and Artemis, set it to the `client_id` of the client consuming the + // message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'mygroup - client-6' + MessagingConsumerIDKey = attribute.Key("messaging.consumer_id") +) + +var ( + // receive + MessagingOperationReceive = MessagingOperationKey.String("receive") + // process + MessagingOperationProcess = MessagingOperationKey.String("process") +) + +// Attributes for RabbitMQ +const ( + // RabbitMQ message routing key. + // + // Type: string + // Required: Unless it is empty. + // Stability: stable + // Examples: 'myKey' + MessagingRabbitmqRoutingKeyKey = attribute.Key("messaging.rabbitmq.routing_key") +) + +// Attributes for Apache Kafka +const ( + // Message keys in Kafka are used for grouping alike messages to ensure they're + // processed on the same partition. They differ from `messaging.message_id` in + // that they're not unique. If the key is `null`, the attribute MUST NOT be set. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'myKey' + // Note: If the key type is not string, it's string representation has to be + // supplied for the attribute. If the key has no unambiguous, canonical string + // form, don't include its value. + MessagingKafkaMessageKeyKey = attribute.Key("messaging.kafka.message_key") + // Name of the Kafka Consumer Group that is handling the message. Only applies to + // consumers, not producers. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'my-group' + MessagingKafkaConsumerGroupKey = attribute.Key("messaging.kafka.consumer_group") + // Client ID for the Consumer or Producer that is handling the message. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'client-5' + MessagingKafkaClientIDKey = attribute.Key("messaging.kafka.client_id") + // Partition the message is sent to. + // + // Type: int + // Required: No + // Stability: stable + // Examples: 2 + MessagingKafkaPartitionKey = attribute.Key("messaging.kafka.partition") + // A boolean that is true if the message is a tombstone. + // + // Type: boolean + // Required: If missing, it is assumed to be false. + // Stability: stable + MessagingKafkaTombstoneKey = attribute.Key("messaging.kafka.tombstone") +) + +// This document defines semantic conventions for remote procedure calls. +const ( + // A string identifying the remoting system. + // + // Type: string + // Required: Always + // Stability: stable + // Examples: 'grpc', 'java_rmi', 'wcf' + RPCSystemKey = attribute.Key("rpc.system") + // The full (logical) name of the service being called, including its package + // name, if applicable. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'myservice.EchoService' + // Note: This is the logical name of the service from the RPC interface + // perspective, which can be different from the name of any implementing class. + // The `code.namespace` attribute may be used to store the latter (despite the + // attribute name, it may include a class name; e.g., class with method actually + // executing the call on the server side, RPC client stub class on the client + // side). + RPCServiceKey = attribute.Key("rpc.service") + // The name of the (logical) method being called, must be equal to the $method + // part in the span name. + // + // Type: string + // Required: No, but recommended + // Stability: stable + // Examples: 'exampleMethod' + // Note: This is the logical name of the method from the RPC interface + // perspective, which can be different from the name of any implementing + // method/function. The `code.function` attribute may be used to store the latter + // (e.g., method actually executing the call on the server side, RPC client stub + // method on the client side). + RPCMethodKey = attribute.Key("rpc.method") +) + +// Tech-specific attributes for gRPC. +const ( + // The [numeric status + // code](https://github.com/grpc/grpc/blob/v1.33.2/doc/statuscodes.md) of the gRPC + // request. + // + // Type: Enum + // Required: Always + // Stability: stable + RPCGRPCStatusCodeKey = attribute.Key("rpc.grpc.status_code") +) + +var ( + // OK + RPCGRPCStatusCodeOk = RPCGRPCStatusCodeKey.Int(0) + // CANCELLED + RPCGRPCStatusCodeCancelled = RPCGRPCStatusCodeKey.Int(1) + // UNKNOWN + RPCGRPCStatusCodeUnknown = RPCGRPCStatusCodeKey.Int(2) + // INVALID_ARGUMENT + RPCGRPCStatusCodeInvalidArgument = RPCGRPCStatusCodeKey.Int(3) + // DEADLINE_EXCEEDED + RPCGRPCStatusCodeDeadlineExceeded = RPCGRPCStatusCodeKey.Int(4) + // NOT_FOUND + RPCGRPCStatusCodeNotFound = RPCGRPCStatusCodeKey.Int(5) + // ALREADY_EXISTS + RPCGRPCStatusCodeAlreadyExists = RPCGRPCStatusCodeKey.Int(6) + // PERMISSION_DENIED + RPCGRPCStatusCodePermissionDenied = RPCGRPCStatusCodeKey.Int(7) + // RESOURCE_EXHAUSTED + RPCGRPCStatusCodeResourceExhausted = RPCGRPCStatusCodeKey.Int(8) + // FAILED_PRECONDITION + RPCGRPCStatusCodeFailedPrecondition = RPCGRPCStatusCodeKey.Int(9) + // ABORTED + RPCGRPCStatusCodeAborted = RPCGRPCStatusCodeKey.Int(10) + // OUT_OF_RANGE + RPCGRPCStatusCodeOutOfRange = RPCGRPCStatusCodeKey.Int(11) + // UNIMPLEMENTED + RPCGRPCStatusCodeUnimplemented = RPCGRPCStatusCodeKey.Int(12) + // INTERNAL + RPCGRPCStatusCodeInternal = RPCGRPCStatusCodeKey.Int(13) + // UNAVAILABLE + RPCGRPCStatusCodeUnavailable = RPCGRPCStatusCodeKey.Int(14) + // DATA_LOSS + RPCGRPCStatusCodeDataLoss = RPCGRPCStatusCodeKey.Int(15) + // UNAUTHENTICATED + RPCGRPCStatusCodeUnauthenticated = RPCGRPCStatusCodeKey.Int(16) +) + +// Tech-specific attributes for [JSON RPC](https://www.jsonrpc.org/). +const ( + // Protocol version as in `jsonrpc` property of request/response. Since JSON-RPC + // 1.0 does not specify this, the value can be omitted. + // + // Type: string + // Required: If missing, it is assumed to be "1.0". + // Stability: stable + // Examples: '2.0', '1.0' + RPCJsonrpcVersionKey = attribute.Key("rpc.jsonrpc.version") + // `id` property of request or response. Since protocol allows id to be int, + // string, `null` or missing (for notifications), value is expected to be cast to + // string for simplicity. Use empty string in case of `null` value. Omit entirely + // if this is a notification. + // + // Type: string + // Required: No + // Stability: stable + // Examples: '10', 'request-7', '' + RPCJsonrpcRequestIDKey = attribute.Key("rpc.jsonrpc.request_id") + // `error.code` property of response if it is an error response. + // + // Type: int + // Required: If missing, response is assumed to be successful. + // Stability: stable + // Examples: -32700, 100 + RPCJsonrpcErrorCodeKey = attribute.Key("rpc.jsonrpc.error_code") + // `error.message` property of response if it is an error response. + // + // Type: string + // Required: No + // Stability: stable + // Examples: 'Parse error', 'User already exists' + RPCJsonrpcErrorMessageKey = attribute.Key("rpc.jsonrpc.error_message") +) + +// RPC received/sent message. +const ( + // Whether this is a received or sent message. + // + // Type: Enum + // Required: No + // Stability: stable + MessageTypeKey = attribute.Key("message.type") + // MUST be calculated as two different counters starting from `1` one for sent + // messages and one for received message. + // + // Type: int + // Required: No + // Stability: stable + // Note: This way we guarantee that the values will be consistent between + // different implementations. + MessageIDKey = attribute.Key("message.id") + // Compressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageCompressedSizeKey = attribute.Key("message.compressed_size") + // Uncompressed size of the message in bytes. + // + // Type: int + // Required: No + // Stability: stable + MessageUncompressedSizeKey = attribute.Key("message.uncompressed_size") +) + +var ( + // sent + MessageTypeSent = MessageTypeKey.String("SENT") + // received + MessageTypeReceived = MessageTypeKey.String("RECEIVED") +) diff --git a/vendor/go.opentelemetry.io/otel/trace.go b/vendor/go.opentelemetry.io/otel/trace.go new file mode 100644 index 0000000000..28b4f5e4d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace.go @@ -0,0 +1,44 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +import ( + "go.opentelemetry.io/otel/internal/global" + "go.opentelemetry.io/otel/trace" +) + +// Tracer creates a named tracer that implements Tracer interface. +// If the name is an empty string then provider uses default name. +// +// This is short for GetTracerProvider().Tracer(name, opts...) +func Tracer(name string, opts ...trace.TracerOption) trace.Tracer { + return GetTracerProvider().Tracer(name, opts...) +} + +// GetTracerProvider returns the registered global trace provider. +// If none is registered then an instance of NoopTracerProvider is returned. +// +// Use the trace provider to create a named tracer. E.g. +// tracer := otel.GetTracerProvider().Tracer("example.com/foo") +// or +// tracer := otel.Tracer("example.com/foo") +func GetTracerProvider() trace.TracerProvider { + return global.TracerProvider() +} + +// SetTracerProvider registers `tp` as the global trace provider. +func SetTracerProvider(tp trace.TracerProvider) { + global.SetTracerProvider(tp) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/LICENSE b/vendor/go.opentelemetry.io/otel/trace/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/otel/trace/config.go b/vendor/go.opentelemetry.io/otel/trace/config.go new file mode 100644 index 0000000000..bcc333e04e --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/config.go @@ -0,0 +1,316 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "time" + + "go.opentelemetry.io/otel/attribute" +) + +// TracerConfig is a group of options for a Tracer. +type TracerConfig struct { + instrumentationVersion string + // Schema URL of the telemetry emitted by the Tracer. + schemaURL string +} + +// InstrumentationVersion returns the version of the library providing instrumentation. +func (t *TracerConfig) InstrumentationVersion() string { + return t.instrumentationVersion +} + +// SchemaURL returns the Schema URL of the telemetry emitted by the Tracer. +func (t *TracerConfig) SchemaURL() string { + return t.schemaURL +} + +// NewTracerConfig applies all the options to a returned TracerConfig. +func NewTracerConfig(options ...TracerOption) TracerConfig { + var config TracerConfig + for _, option := range options { + config = option.apply(config) + } + return config +} + +// TracerOption applies an option to a TracerConfig. +type TracerOption interface { + apply(TracerConfig) TracerConfig +} + +type tracerOptionFunc func(TracerConfig) TracerConfig + +func (fn tracerOptionFunc) apply(cfg TracerConfig) TracerConfig { + return fn(cfg) +} + +// SpanConfig is a group of options for a Span. +type SpanConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + links []Link + newRoot bool + spanKind SpanKind + stackTrace bool +} + +// Attributes describe the associated qualities of a Span. +func (cfg *SpanConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in a Span life-cycle. +func (cfg *SpanConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *SpanConfig) StackTrace() bool { + return cfg.stackTrace +} + +// Links are the associations a Span has with other Spans. +func (cfg *SpanConfig) Links() []Link { + return cfg.links +} + +// NewRoot identifies a Span as the root Span for a new trace. This is +// commonly used when an existing trace crosses trust boundaries and the +// remote parent span context should be ignored for security. +func (cfg *SpanConfig) NewRoot() bool { + return cfg.newRoot +} + +// SpanKind is the role a Span has in a trace. +func (cfg *SpanConfig) SpanKind() SpanKind { + return cfg.spanKind +} + +// NewSpanStartConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanStartConfig(options ...SpanStartOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanStart(c) + } + return c +} + +// NewSpanEndConfig applies all the options to a returned SpanConfig. +// No validation is performed on the returned SpanConfig (e.g. no uniqueness +// checking or bounding of data), it is left to the SDK to perform this +// action. +func NewSpanEndConfig(options ...SpanEndOption) SpanConfig { + var c SpanConfig + for _, option := range options { + c = option.applySpanEnd(c) + } + return c +} + +// SpanStartOption applies an option to a SpanConfig. These options are applicable +// only when the span is created +type SpanStartOption interface { + applySpanStart(SpanConfig) SpanConfig +} + +type spanOptionFunc func(SpanConfig) SpanConfig + +func (fn spanOptionFunc) applySpanStart(cfg SpanConfig) SpanConfig { + return fn(cfg) +} + +// SpanEndOption applies an option to a SpanConfig. These options are +// applicable only when the span is ended. +type SpanEndOption interface { + applySpanEnd(SpanConfig) SpanConfig +} + +// EventConfig is a group of options for an Event. +type EventConfig struct { + attributes []attribute.KeyValue + timestamp time.Time + stackTrace bool +} + +// Attributes describe the associated qualities of an Event. +func (cfg *EventConfig) Attributes() []attribute.KeyValue { + return cfg.attributes +} + +// Timestamp is a time in an Event life-cycle. +func (cfg *EventConfig) Timestamp() time.Time { + return cfg.timestamp +} + +// StackTrace checks whether stack trace capturing is enabled. +func (cfg *EventConfig) StackTrace() bool { + return cfg.stackTrace +} + +// NewEventConfig applies all the EventOptions to a returned EventConfig. If no +// timestamp option is passed, the returned EventConfig will have a Timestamp +// set to the call time, otherwise no validation is performed on the returned +// EventConfig. +func NewEventConfig(options ...EventOption) EventConfig { + var c EventConfig + for _, option := range options { + c = option.applyEvent(c) + } + if c.timestamp.IsZero() { + c.timestamp = time.Now() + } + return c +} + +// EventOption applies span event options to an EventConfig. +type EventOption interface { + applyEvent(EventConfig) EventConfig +} + +// SpanOption are options that can be used at both the beginning and end of a span. +type SpanOption interface { + SpanStartOption + SpanEndOption +} + +// SpanStartEventOption are options that can be used at the start of a span, or with an event. +type SpanStartEventOption interface { + SpanStartOption + EventOption +} + +// SpanEndEventOption are options that can be used at the end of a span, or with an event. +type SpanEndEventOption interface { + SpanEndOption + EventOption +} + +type attributeOption []attribute.KeyValue + +func (o attributeOption) applySpan(c SpanConfig) SpanConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} +func (o attributeOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o attributeOption) applyEvent(c EventConfig) EventConfig { + c.attributes = append(c.attributes, []attribute.KeyValue(o)...) + return c +} + +var _ SpanStartEventOption = attributeOption{} + +// WithAttributes adds the attributes related to a span life-cycle event. +// These attributes are used to describe the work a Span represents when this +// option is provided to a Span's start or end events. Otherwise, these +// attributes provide additional information about the event being recorded +// (e.g. error, state change, processing progress, system event). +// +// If multiple of these options are passed the attributes of each successive +// option will extend the attributes instead of overwriting. There is no +// guarantee of uniqueness in the resulting attributes. +func WithAttributes(attributes ...attribute.KeyValue) SpanStartEventOption { + return attributeOption(attributes) +} + +// SpanEventOption are options that can be used with an event or a span. +type SpanEventOption interface { + SpanOption + EventOption +} + +type timestampOption time.Time + +func (o timestampOption) applySpan(c SpanConfig) SpanConfig { + c.timestamp = time.Time(o) + return c +} +func (o timestampOption) applySpanStart(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } +func (o timestampOption) applyEvent(c EventConfig) EventConfig { + c.timestamp = time.Time(o) + return c +} + +var _ SpanEventOption = timestampOption{} + +// WithTimestamp sets the time of a Span or Event life-cycle moment (e.g. +// started, stopped, errored). +func WithTimestamp(t time.Time) SpanEventOption { + return timestampOption(t) +} + +type stackTraceOption bool + +func (o stackTraceOption) applyEvent(c EventConfig) EventConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpan(c SpanConfig) SpanConfig { + c.stackTrace = bool(o) + return c +} +func (o stackTraceOption) applySpanEnd(c SpanConfig) SpanConfig { return o.applySpan(c) } + +// WithStackTrace sets the flag to capture the error with stack trace (e.g. true, false). +func WithStackTrace(b bool) SpanEndEventOption { + return stackTraceOption(b) +} + +// WithLinks adds links to a Span. The links are added to the existing Span +// links, i.e. this does not overwrite. Links with invalid span context are ignored. +func WithLinks(links ...Link) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.links = append(cfg.links, links...) + return cfg + }) +} + +// WithNewRoot specifies that the Span should be treated as a root Span. Any +// existing parent span context will be ignored when defining the Span's trace +// identifiers. +func WithNewRoot() SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.newRoot = true + return cfg + }) +} + +// WithSpanKind sets the SpanKind of a Span. +func WithSpanKind(kind SpanKind) SpanStartOption { + return spanOptionFunc(func(cfg SpanConfig) SpanConfig { + cfg.spanKind = kind + return cfg + }) +} + +// WithInstrumentationVersion sets the instrumentation version. +func WithInstrumentationVersion(version string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.instrumentationVersion = version + return cfg + }) +} + +// WithSchemaURL sets the schema URL for the Tracer. +func WithSchemaURL(schemaURL string) TracerOption { + return tracerOptionFunc(func(cfg TracerConfig) TracerConfig { + cfg.schemaURL = schemaURL + return cfg + }) +} diff --git a/vendor/go.opentelemetry.io/otel/trace/context.go b/vendor/go.opentelemetry.io/otel/trace/context.go new file mode 100644 index 0000000000..76f9a083c4 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/context.go @@ -0,0 +1,61 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import "context" + +type traceContextKeyType int + +const currentSpanKey traceContextKeyType = iota + +// ContextWithSpan returns a copy of parent with span set as the current Span. +func ContextWithSpan(parent context.Context, span Span) context.Context { + return context.WithValue(parent, currentSpanKey, span) +} + +// ContextWithSpanContext returns a copy of parent with sc as the current +// Span. The Span implementation that wraps sc is non-recording and performs +// no operations other than to return sc as the SpanContext from the +// SpanContext method. +func ContextWithSpanContext(parent context.Context, sc SpanContext) context.Context { + return ContextWithSpan(parent, nonRecordingSpan{sc: sc}) +} + +// ContextWithRemoteSpanContext returns a copy of parent with rsc set explicly +// as a remote SpanContext and as the current Span. The Span implementation +// that wraps rsc is non-recording and performs no operations other than to +// return rsc as the SpanContext from the SpanContext method. +func ContextWithRemoteSpanContext(parent context.Context, rsc SpanContext) context.Context { + return ContextWithSpanContext(parent, rsc.WithRemote(true)) +} + +// SpanFromContext returns the current Span from ctx. +// +// If no Span is currently set in ctx an implementation of a Span that +// performs no operations is returned. +func SpanFromContext(ctx context.Context) Span { + if ctx == nil { + return noopSpan{} + } + if span, ok := ctx.Value(currentSpanKey).(Span); ok { + return span + } + return noopSpan{} +} + +// SpanContextFromContext returns the current Span's SpanContext. +func SpanContextFromContext(ctx context.Context) SpanContext { + return SpanFromContext(ctx).SpanContext() +} diff --git a/vendor/go.opentelemetry.io/otel/trace/doc.go b/vendor/go.opentelemetry.io/otel/trace/doc.go new file mode 100644 index 0000000000..391417718f --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/doc.go @@ -0,0 +1,66 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +/* +Package trace provides an implementation of the tracing part of the +OpenTelemetry API. + +To participate in distributed traces a Span needs to be created for the +operation being performed as part of a traced workflow. It its simplest form: + + var tracer trace.Tracer + + func init() { + tracer = otel.Tracer("instrumentation/package/name") + } + + func operation(ctx context.Context) { + var span trace.Span + ctx, span = tracer.Start(ctx, "operation") + defer span.End() + // ... + } + +A Tracer is unique to the instrumentation and is used to create Spans. +Instrumentation should be designed to accept a TracerProvider from which it +can create its own unique Tracer. Alternatively, the registered global +TracerProvider from the go.opentelemetry.io/otel package can be used as +a default. + + const ( + name = "instrumentation/package/name" + version = "0.1.0" + ) + + type Instrumentation struct { + tracer trace.Tracer + } + + func NewInstrumentation(tp trace.TracerProvider) *Instrumentation { + if tp == nil { + tp = otel.TracerProvider() + } + return &Instrumentation{ + tracer: tp.Tracer(name, trace.WithInstrumentationVersion(version)), + } + } + + func operation(ctx context.Context, inst *Instrumentation) { + var span trace.Span + ctx, span = inst.tracer.Start(ctx, "operation") + defer span.End() + // ... + } +*/ +package trace // import "go.opentelemetry.io/otel/trace" diff --git a/vendor/go.opentelemetry.io/otel/trace/nonrecording.go b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go new file mode 100644 index 0000000000..88fcb81611 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/nonrecording.go @@ -0,0 +1,27 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +// nonRecordingSpan is a minimal implementation of a Span that wraps a +// SpanContext. It performs no operations other than to return the wrapped +// SpanContext. +type nonRecordingSpan struct { + noopSpan + + sc SpanContext +} + +// SpanContext returns the wrapped SpanContext. +func (s nonRecordingSpan) SpanContext() SpanContext { return s.sc } diff --git a/vendor/go.opentelemetry.io/otel/trace/noop.go b/vendor/go.opentelemetry.io/otel/trace/noop.go new file mode 100644 index 0000000000..ad9a9fc5be --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/noop.go @@ -0,0 +1,89 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "context" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +// NewNoopTracerProvider returns an implementation of TracerProvider that +// performs no operations. The Tracer and Spans created from the returned +// TracerProvider also perform no operations. +func NewNoopTracerProvider() TracerProvider { + return noopTracerProvider{} +} + +type noopTracerProvider struct{} + +var _ TracerProvider = noopTracerProvider{} + +// Tracer returns noop implementation of Tracer. +func (p noopTracerProvider) Tracer(string, ...TracerOption) Tracer { + return noopTracer{} +} + +// noopTracer is an implementation of Tracer that preforms no operations. +type noopTracer struct{} + +var _ Tracer = noopTracer{} + +// Start carries forward a non-recording Span, if one is present in the context, otherwise it +// creates a no-op Span. +func (t noopTracer) Start(ctx context.Context, name string, _ ...SpanStartOption) (context.Context, Span) { + span := SpanFromContext(ctx) + if _, ok := span.(nonRecordingSpan); !ok { + // span is likely already a noopSpan, but let's be sure + span = noopSpan{} + } + return ContextWithSpan(ctx, span), span +} + +// noopSpan is an implementation of Span that preforms no operations. +type noopSpan struct{} + +var _ Span = noopSpan{} + +// SpanContext returns an empty span context. +func (noopSpan) SpanContext() SpanContext { return SpanContext{} } + +// IsRecording always returns false. +func (noopSpan) IsRecording() bool { return false } + +// SetStatus does nothing. +func (noopSpan) SetStatus(codes.Code, string) {} + +// SetError does nothing. +func (noopSpan) SetError(bool) {} + +// SetAttributes does nothing. +func (noopSpan) SetAttributes(...attribute.KeyValue) {} + +// End does nothing. +func (noopSpan) End(...SpanEndOption) {} + +// RecordError does nothing. +func (noopSpan) RecordError(error, ...EventOption) {} + +// AddEvent does nothing. +func (noopSpan) AddEvent(string, ...EventOption) {} + +// SetName does nothing. +func (noopSpan) SetName(string) {} + +// TracerProvider returns a no-op TracerProvider +func (noopSpan) TracerProvider() TracerProvider { return noopTracerProvider{} } diff --git a/vendor/go.opentelemetry.io/otel/trace/trace.go b/vendor/go.opentelemetry.io/otel/trace/trace.go new file mode 100644 index 0000000000..0923ceb98d --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/trace.go @@ -0,0 +1,519 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "bytes" + "context" + "encoding/hex" + "encoding/json" + + "go.opentelemetry.io/otel/attribute" + "go.opentelemetry.io/otel/codes" +) + +const ( + // FlagsSampled is a bitmask with the sampled bit set. A SpanContext + // with the sampling bit set means the span is sampled. + FlagsSampled = TraceFlags(0x01) + + errInvalidHexID errorConst = "trace-id and span-id can only contain [0-9a-f] characters, all lowercase" + + errInvalidTraceIDLength errorConst = "hex encoded trace-id must have length equals to 32" + errNilTraceID errorConst = "trace-id can't be all zero" + + errInvalidSpanIDLength errorConst = "hex encoded span-id must have length equals to 16" + errNilSpanID errorConst = "span-id can't be all zero" +) + +type errorConst string + +func (e errorConst) Error() string { + return string(e) +} + +// TraceID is a unique identity of a trace. +// nolint:revive // revive complains about stutter of `trace.TraceID`. +type TraceID [16]byte + +var nilTraceID TraceID +var _ json.Marshaler = nilTraceID + +// IsValid checks whether the trace TraceID is valid. A valid trace ID does +// not consist of zeros only. +func (t TraceID) IsValid() bool { + return !bytes.Equal(t[:], nilTraceID[:]) +} + +// MarshalJSON implements a custom marshal function to encode TraceID +// as a hex string. +func (t TraceID) MarshalJSON() ([]byte, error) { + return json.Marshal(t.String()) +} + +// String returns the hex string representation form of a TraceID +func (t TraceID) String() string { + return hex.EncodeToString(t[:]) +} + +// SpanID is a unique identity of a span in a trace. +type SpanID [8]byte + +var nilSpanID SpanID +var _ json.Marshaler = nilSpanID + +// IsValid checks whether the SpanID is valid. A valid SpanID does not consist +// of zeros only. +func (s SpanID) IsValid() bool { + return !bytes.Equal(s[:], nilSpanID[:]) +} + +// MarshalJSON implements a custom marshal function to encode SpanID +// as a hex string. +func (s SpanID) MarshalJSON() ([]byte, error) { + return json.Marshal(s.String()) +} + +// String returns the hex string representation form of a SpanID +func (s SpanID) String() string { + return hex.EncodeToString(s[:]) +} + +// TraceIDFromHex returns a TraceID from a hex string if it is compliant with +// the W3C trace-context specification. See more at +// https://www.w3.org/TR/trace-context/#trace-id +// nolint:revive // revive complains about stutter of `trace.TraceIDFromHex`. +func TraceIDFromHex(h string) (TraceID, error) { + t := TraceID{} + if len(h) != 32 { + return t, errInvalidTraceIDLength + } + + if err := decodeHex(h, t[:]); err != nil { + return t, err + } + + if !t.IsValid() { + return t, errNilTraceID + } + return t, nil +} + +// SpanIDFromHex returns a SpanID from a hex string if it is compliant +// with the w3c trace-context specification. +// See more at https://www.w3.org/TR/trace-context/#parent-id +func SpanIDFromHex(h string) (SpanID, error) { + s := SpanID{} + if len(h) != 16 { + return s, errInvalidSpanIDLength + } + + if err := decodeHex(h, s[:]); err != nil { + return s, err + } + + if !s.IsValid() { + return s, errNilSpanID + } + return s, nil +} + +func decodeHex(h string, b []byte) error { + for _, r := range h { + switch { + case 'a' <= r && r <= 'f': + continue + case '0' <= r && r <= '9': + continue + default: + return errInvalidHexID + } + } + + decoded, err := hex.DecodeString(h) + if err != nil { + return err + } + + copy(b, decoded) + return nil +} + +// TraceFlags contains flags that can be set on a SpanContext +type TraceFlags byte //nolint:revive // revive complains about stutter of `trace.TraceFlags`. + +// IsSampled returns if the sampling bit is set in the TraceFlags. +func (tf TraceFlags) IsSampled() bool { + return tf&FlagsSampled == FlagsSampled +} + +// WithSampled sets the sampling bit in a new copy of the TraceFlags. +func (tf TraceFlags) WithSampled(sampled bool) TraceFlags { + if sampled { + return tf | FlagsSampled + } + + return tf &^ FlagsSampled +} + +// MarshalJSON implements a custom marshal function to encode TraceFlags +// as a hex string. +func (tf TraceFlags) MarshalJSON() ([]byte, error) { + return json.Marshal(tf.String()) +} + +// String returns the hex string representation form of TraceFlags +func (tf TraceFlags) String() string { + return hex.EncodeToString([]byte{byte(tf)}[:]) +} + +// SpanContextConfig contains mutable fields usable for constructing +// an immutable SpanContext. +type SpanContextConfig struct { + TraceID TraceID + SpanID SpanID + TraceFlags TraceFlags + TraceState TraceState + Remote bool +} + +// NewSpanContext constructs a SpanContext using values from the provided +// SpanContextConfig. +func NewSpanContext(config SpanContextConfig) SpanContext { + return SpanContext{ + traceID: config.TraceID, + spanID: config.SpanID, + traceFlags: config.TraceFlags, + traceState: config.TraceState, + remote: config.Remote, + } +} + +// SpanContext contains identifying trace information about a Span. +type SpanContext struct { + traceID TraceID + spanID SpanID + traceFlags TraceFlags + traceState TraceState + remote bool +} + +var _ json.Marshaler = SpanContext{} + +// IsValid returns if the SpanContext is valid. A valid span context has a +// valid TraceID and SpanID. +func (sc SpanContext) IsValid() bool { + return sc.HasTraceID() && sc.HasSpanID() +} + +// IsRemote indicates whether the SpanContext represents a remotely-created Span. +func (sc SpanContext) IsRemote() bool { + return sc.remote +} + +// WithRemote returns a copy of sc with the Remote property set to remote. +func (sc SpanContext) WithRemote(remote bool) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: remote, + } +} + +// TraceID returns the TraceID from the SpanContext. +func (sc SpanContext) TraceID() TraceID { + return sc.traceID +} + +// HasTraceID checks if the SpanContext has a valid TraceID. +func (sc SpanContext) HasTraceID() bool { + return sc.traceID.IsValid() +} + +// WithTraceID returns a new SpanContext with the TraceID replaced. +func (sc SpanContext) WithTraceID(traceID TraceID) SpanContext { + return SpanContext{ + traceID: traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// SpanID returns the SpanID from the SpanContext. +func (sc SpanContext) SpanID() SpanID { + return sc.spanID +} + +// HasSpanID checks if the SpanContext has a valid SpanID. +func (sc SpanContext) HasSpanID() bool { + return sc.spanID.IsValid() +} + +// WithSpanID returns a new SpanContext with the SpanID replaced. +func (sc SpanContext) WithSpanID(spanID SpanID) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: spanID, + traceFlags: sc.traceFlags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceFlags returns the flags from the SpanContext. +func (sc SpanContext) TraceFlags() TraceFlags { + return sc.traceFlags +} + +// IsSampled returns if the sampling bit is set in the SpanContext's TraceFlags. +func (sc SpanContext) IsSampled() bool { + return sc.traceFlags.IsSampled() +} + +// WithTraceFlags returns a new SpanContext with the TraceFlags replaced. +func (sc SpanContext) WithTraceFlags(flags TraceFlags) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: flags, + traceState: sc.traceState, + remote: sc.remote, + } +} + +// TraceState returns the TraceState from the SpanContext. +func (sc SpanContext) TraceState() TraceState { + return sc.traceState +} + +// WithTraceState returns a new SpanContext with the TraceState replaced. +func (sc SpanContext) WithTraceState(state TraceState) SpanContext { + return SpanContext{ + traceID: sc.traceID, + spanID: sc.spanID, + traceFlags: sc.traceFlags, + traceState: state, + remote: sc.remote, + } +} + +// Equal is a predicate that determines whether two SpanContext values are equal. +func (sc SpanContext) Equal(other SpanContext) bool { + return sc.traceID == other.traceID && + sc.spanID == other.spanID && + sc.traceFlags == other.traceFlags && + sc.traceState.String() == other.traceState.String() && + sc.remote == other.remote +} + +// MarshalJSON implements a custom marshal function to encode a SpanContext. +func (sc SpanContext) MarshalJSON() ([]byte, error) { + return json.Marshal(SpanContextConfig{ + TraceID: sc.traceID, + SpanID: sc.spanID, + TraceFlags: sc.traceFlags, + TraceState: sc.traceState, + Remote: sc.remote, + }) +} + +// Span is the individual component of a trace. It represents a single named +// and timed operation of a workflow that is traced. A Tracer is used to +// create a Span and it is then up to the operation the Span represents to +// properly end the Span when the operation itself ends. +// +// Warning: methods may be added to this interface in minor releases. +type Span interface { + // End completes the Span. The Span is considered complete and ready to be + // delivered through the rest of the telemetry pipeline after this method + // is called. Therefore, updates to the Span are not allowed after this + // method has been called. + End(options ...SpanEndOption) + + // AddEvent adds an event with the provided name and options. + AddEvent(name string, options ...EventOption) + + // IsRecording returns the recording state of the Span. It will return + // true if the Span is active and events can be recorded. + IsRecording() bool + + // RecordError will record err as an exception span event for this span. An + // additional call to SetStatus is required if the Status of the Span should + // be set to Error, as this method does not change the Span status. If this + // span is not being recorded or err is nil then this method does nothing. + RecordError(err error, options ...EventOption) + + // SpanContext returns the SpanContext of the Span. The returned SpanContext + // is usable even after the End method has been called for the Span. + SpanContext() SpanContext + + // SetStatus sets the status of the Span in the form of a code and a + // description, overriding previous values set. The description is only + // included in a status when the code is for an error. + SetStatus(code codes.Code, description string) + + // SetName sets the Span name. + SetName(name string) + + // SetAttributes sets kv as attributes of the Span. If a key from kv + // already exists for an attribute of the Span it will be overwritten with + // the value contained in kv. + SetAttributes(kv ...attribute.KeyValue) + + // TracerProvider returns a TracerProvider that can be used to generate + // additional Spans on the same telemetry pipeline as the current Span. + TracerProvider() TracerProvider +} + +// Link is the relationship between two Spans. The relationship can be within +// the same Trace or across different Traces. +// +// For example, a Link is used in the following situations: +// +// 1. Batch Processing: A batch of operations may contain operations +// associated with one or more traces/spans. Since there can only be one +// parent SpanContext, a Link is used to keep reference to the +// SpanContext of all operations in the batch. +// 2. Public Endpoint: A SpanContext for an in incoming client request on a +// public endpoint should be considered untrusted. In such a case, a new +// trace with its own identity and sampling decision needs to be created, +// but this new trace needs to be related to the original trace in some +// form. A Link is used to keep reference to the original SpanContext and +// track the relationship. +type Link struct { + // SpanContext of the linked Span. + SpanContext SpanContext + + // Attributes describe the aspects of the link. + Attributes []attribute.KeyValue +} + +// LinkFromContext returns a link encapsulating the SpanContext in the provided ctx. +func LinkFromContext(ctx context.Context, attrs ...attribute.KeyValue) Link { + return Link{ + SpanContext: SpanContextFromContext(ctx), + Attributes: attrs, + } +} + +// SpanKind is the role a Span plays in a Trace. +type SpanKind int + +// As a convenience, these match the proto definition, see +// https://github.com/open-telemetry/opentelemetry-proto/blob/30d237e1ff3ab7aa50e0922b5bebdd93505090af/opentelemetry/proto/trace/v1/trace.proto#L101-L129 +// +// The unspecified value is not a valid `SpanKind`. Use `ValidateSpanKind()` +// to coerce a span kind to a valid value. +const ( + // SpanKindUnspecified is an unspecified SpanKind and is not a valid + // SpanKind. SpanKindUnspecified should be replaced with SpanKindInternal + // if it is received. + SpanKindUnspecified SpanKind = 0 + // SpanKindInternal is a SpanKind for a Span that represents an internal + // operation within an application. + SpanKindInternal SpanKind = 1 + // SpanKindServer is a SpanKind for a Span that represents the operation + // of handling a request from a client. + SpanKindServer SpanKind = 2 + // SpanKindClient is a SpanKind for a Span that represents the operation + // of client making a request to a server. + SpanKindClient SpanKind = 3 + // SpanKindProducer is a SpanKind for a Span that represents the operation + // of a producer sending a message to a message broker. Unlike + // SpanKindClient and SpanKindServer, there is often no direct + // relationship between this kind of Span and a SpanKindConsumer kind. A + // SpanKindProducer Span will end once the message is accepted by the + // message broker which might not overlap with the processing of that + // message. + SpanKindProducer SpanKind = 4 + // SpanKindConsumer is a SpanKind for a Span that represents the operation + // of a consumer receiving a message from a message broker. Like + // SpanKindProducer Spans, there is often no direct relationship between + // this Span and the Span that produced the message. + SpanKindConsumer SpanKind = 5 +) + +// ValidateSpanKind returns a valid span kind value. This will coerce +// invalid values into the default value, SpanKindInternal. +func ValidateSpanKind(spanKind SpanKind) SpanKind { + switch spanKind { + case SpanKindInternal, + SpanKindServer, + SpanKindClient, + SpanKindProducer, + SpanKindConsumer: + // valid + return spanKind + default: + return SpanKindInternal + } +} + +// String returns the specified name of the SpanKind in lower-case. +func (sk SpanKind) String() string { + switch sk { + case SpanKindInternal: + return "internal" + case SpanKindServer: + return "server" + case SpanKindClient: + return "client" + case SpanKindProducer: + return "producer" + case SpanKindConsumer: + return "consumer" + default: + return "unspecified" + } +} + +// Tracer is the creator of Spans. +// +// Warning: methods may be added to this interface in minor releases. +type Tracer interface { + // Start creates a span and a context.Context containing the newly-created span. + // + // If the context.Context provided in `ctx` contains a Span then the newly-created + // Span will be a child of that span, otherwise it will be a root span. This behavior + // can be overridden by providing `WithNewRoot()` as a SpanOption, causing the + // newly-created Span to be a root span even if `ctx` contains a Span. + // + // When creating a Span it is recommended to provide all known span attributes using + // the `WithAttributes()` SpanOption as samplers will only have access to the + // attributes provided when a Span is created. + // + // Any Span that is created MUST also be ended. This is the responsibility of the user. + // Implementations of this API may leak memory or other resources if Spans are not ended. + Start(ctx context.Context, spanName string, opts ...SpanStartOption) (context.Context, Span) +} + +// TracerProvider provides access to instrumentation Tracers. +// +// Warning: methods may be added to this interface in minor releases. +type TracerProvider interface { + // Tracer creates an implementation of the Tracer interface. + // The instrumentationName must be the name of the library providing + // instrumentation. This name may be the same as the instrumented code + // only if that code provides built-in instrumentation. If the + // instrumentationName is empty, then a implementation defined default + // name will be used instead. + // + // This method must be concurrency safe. + Tracer(instrumentationName string, opts ...TracerOption) Tracer +} diff --git a/vendor/go.opentelemetry.io/otel/trace/tracestate.go b/vendor/go.opentelemetry.io/otel/trace/tracestate.go new file mode 100644 index 0000000000..86fc62c1d8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/trace/tracestate.go @@ -0,0 +1,217 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package trace // import "go.opentelemetry.io/otel/trace" + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" +) + +var ( + maxListMembers = 32 + + listDelimiter = "," + + // based on the W3C Trace Context specification, see + // https://www.w3.org/TR/trace-context-1/#tracestate-header + noTenantKeyFormat = `[a-z][_0-9a-z\-\*\/]{0,255}` + withTenantKeyFormat = `[a-z0-9][_0-9a-z\-\*\/]{0,240}@[a-z][_0-9a-z\-\*\/]{0,13}` + valueFormat = `[\x20-\x2b\x2d-\x3c\x3e-\x7e]{0,255}[\x21-\x2b\x2d-\x3c\x3e-\x7e]` + + keyRe = regexp.MustCompile(`^((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))$`) + valueRe = regexp.MustCompile(`^(` + valueFormat + `)$`) + memberRe = regexp.MustCompile(`^\s*((` + noTenantKeyFormat + `)|(` + withTenantKeyFormat + `))=(` + valueFormat + `)\s*$`) + + errInvalidKey errorConst = "invalid tracestate key" + errInvalidValue errorConst = "invalid tracestate value" + errInvalidMember errorConst = "invalid tracestate list-member" + errMemberNumber errorConst = "too many list-members in tracestate" + errDuplicate errorConst = "duplicate list-member in tracestate" +) + +type member struct { + Key string + Value string +} + +func newMember(key, value string) (member, error) { + if !keyRe.MatchString(key) { + return member{}, fmt.Errorf("%w: %s", errInvalidKey, key) + } + if !valueRe.MatchString(value) { + return member{}, fmt.Errorf("%w: %s", errInvalidValue, value) + } + return member{Key: key, Value: value}, nil +} + +func parseMember(m string) (member, error) { + matches := memberRe.FindStringSubmatch(m) + if len(matches) != 5 { + return member{}, fmt.Errorf("%w: %s", errInvalidMember, m) + } + + return member{ + Key: matches[1], + Value: matches[4], + }, nil + +} + +// String encodes member into a string compliant with the W3C Trace Context +// specification. +func (m member) String() string { + return fmt.Sprintf("%s=%s", m.Key, m.Value) +} + +// TraceState provides additional vendor-specific trace identification +// information across different distributed tracing systems. It represents an +// immutable list consisting of key/value pairs, each pair is referred to as a +// list-member. +// +// TraceState conforms to the W3C Trace Context specification +// (https://www.w3.org/TR/trace-context-1). All operations that create or copy +// a TraceState do so by validating all input and will only produce TraceState +// that conform to the specification. Specifically, this means that all +// list-member's key/value pairs are valid, no duplicate list-members exist, +// and the maximum number of list-members (32) is not exceeded. +type TraceState struct { //nolint:revive // revive complains about stutter of `trace.TraceState` + // list is the members in order. + list []member +} + +var _ json.Marshaler = TraceState{} + +// ParseTraceState attempts to decode a TraceState from the passed +// string. It returns an error if the input is invalid according to the W3C +// Trace Context specification. +func ParseTraceState(tracestate string) (TraceState, error) { + if tracestate == "" { + return TraceState{}, nil + } + + wrapErr := func(err error) error { + return fmt.Errorf("failed to parse tracestate: %w", err) + } + + var members []member + found := make(map[string]struct{}) + for _, memberStr := range strings.Split(tracestate, listDelimiter) { + if len(memberStr) == 0 { + continue + } + + m, err := parseMember(memberStr) + if err != nil { + return TraceState{}, wrapErr(err) + } + + if _, ok := found[m.Key]; ok { + return TraceState{}, wrapErr(errDuplicate) + } + found[m.Key] = struct{}{} + + members = append(members, m) + if n := len(members); n > maxListMembers { + return TraceState{}, wrapErr(errMemberNumber) + } + } + + return TraceState{list: members}, nil +} + +// MarshalJSON marshals the TraceState into JSON. +func (ts TraceState) MarshalJSON() ([]byte, error) { + return json.Marshal(ts.String()) +} + +// String encodes the TraceState into a string compliant with the W3C +// Trace Context specification. The returned string will be invalid if the +// TraceState contains any invalid members. +func (ts TraceState) String() string { + members := make([]string, len(ts.list)) + for i, m := range ts.list { + members[i] = m.String() + } + return strings.Join(members, listDelimiter) +} + +// Get returns the value paired with key from the corresponding TraceState +// list-member if it exists, otherwise an empty string is returned. +func (ts TraceState) Get(key string) string { + for _, member := range ts.list { + if member.Key == key { + return member.Value + } + } + + return "" +} + +// Insert adds a new list-member defined by the key/value pair to the +// TraceState. If a list-member already exists for the given key, that +// list-member's value is updated. The new or updated list-member is always +// moved to the beginning of the TraceState as specified by the W3C Trace +// Context specification. +// +// If key or value are invalid according to the W3C Trace Context +// specification an error is returned with the original TraceState. +// +// If adding a new list-member means the TraceState would have more members +// than is allowed an error is returned instead with the original TraceState. +func (ts TraceState) Insert(key, value string) (TraceState, error) { + m, err := newMember(key, value) + if err != nil { + return ts, err + } + + cTS := ts.Delete(key) + if cTS.Len()+1 > maxListMembers { + // TODO (MrAlias): When the second version of the Trace Context + // specification is published this needs to not return an error. + // Instead it should drop the "right-most" member and insert the new + // member at the front. + // + // https://github.com/w3c/trace-context/pull/448 + return ts, fmt.Errorf("failed to insert: %w", errMemberNumber) + } + + cTS.list = append(cTS.list, member{}) + copy(cTS.list[1:], cTS.list) + cTS.list[0] = m + + return cTS, nil +} + +// Delete returns a copy of the TraceState with the list-member identified by +// key removed. +func (ts TraceState) Delete(key string) TraceState { + members := make([]member, ts.Len()) + copy(members, ts.list) + for i, member := range ts.list { + if member.Key == key { + members = append(members[:i], members[i+1:]...) + // TraceState should contain no duplicate members. + break + } + } + return TraceState{list: members} +} + +// Len returns the number of list-members in the TraceState. +func (ts TraceState) Len() int { + return len(ts.list) +} diff --git a/vendor/go.opentelemetry.io/otel/verify_examples.sh b/vendor/go.opentelemetry.io/otel/verify_examples.sh new file mode 100644 index 0000000000..dbb61a4227 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/verify_examples.sh @@ -0,0 +1,85 @@ +#!/bin/bash + +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -euo pipefail + +cd $(dirname $0) +TOOLS_DIR=$(pwd)/.tools + +if [ -z "${GOPATH}" ] ; then + printf "GOPATH is not defined.\n" + exit -1 +fi + +if [ ! -d "${GOPATH}" ] ; then + printf "GOPATH ${GOPATH} is invalid \n" + exit -1 +fi + +# Pre-requisites +if ! git diff --quiet; then \ + git status + printf "\n\nError: working tree is not clean\n" + exit -1 +fi + +if [ "$(git tag --contains $(git log -1 --pretty=format:"%H"))" = "" ] ; then + printf "$(git log -1)" + printf "\n\nError: HEAD is not pointing to a tagged version" +fi + +make ${TOOLS_DIR}/gojq + +DIR_TMP="${GOPATH}/src/oteltmp/" +rm -rf $DIR_TMP +mkdir -p $DIR_TMP + +printf "Copy examples to ${DIR_TMP}\n" +cp -a ./example ${DIR_TMP} + +# Update go.mod files +printf "Update go.mod: rename module and remove replace\n" + +PACKAGE_DIRS=$(find . -mindepth 2 -type f -name 'go.mod' -exec dirname {} \; | egrep 'example' | sed 's/^\.\///' | sort) + +for dir in $PACKAGE_DIRS; do + printf " Update go.mod for $dir\n" + (cd "${DIR_TMP}/${dir}" && \ + # replaces is ("mod1" "mod2" …) + replaces=($(go mod edit -json | ${TOOLS_DIR}/gojq '.Replace[].Old.Path')) && \ + # strip double quotes + replaces=("${replaces[@]%\"}") && \ + replaces=("${replaces[@]#\"}") && \ + # make an array (-dropreplace=mod1 -dropreplace=mod2 …) + dropreplaces=("${replaces[@]/#/-dropreplace=}") && \ + go mod edit -module "oteltmp/${dir}" "${dropreplaces[@]}" && \ + go mod tidy) +done +printf "Update done:\n\n" + +# Build directories that contain main package. These directories are different than +# directories that contain go.mod files. +printf "Build examples:\n" +EXAMPLES=$(./get_main_pkgs.sh ./example) +for ex in $EXAMPLES; do + printf " Build $ex in ${DIR_TMP}/${ex}\n" + (cd "${DIR_TMP}/${ex}" && \ + go build .) +done + +# Cleanup +printf "Remove copied files.\n" +rm -rf $DIR_TMP diff --git a/vendor/go.opentelemetry.io/otel/version.go b/vendor/go.opentelemetry.io/otel/version.go new file mode 100644 index 0000000000..a09bcbb5e8 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/version.go @@ -0,0 +1,20 @@ +// Copyright The OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package otel // import "go.opentelemetry.io/otel" + +// Version is the current release version of OpenTelemetry in use. +func Version() string { + return "1.4.1" +} diff --git a/vendor/go.opentelemetry.io/otel/versions.yaml b/vendor/go.opentelemetry.io/otel/versions.yaml new file mode 100644 index 0000000000..3f06f29934 --- /dev/null +++ b/vendor/go.opentelemetry.io/otel/versions.yaml @@ -0,0 +1,60 @@ +# Copyright The OpenTelemetry Authors +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +module-sets: + stable-v1: + version: v1.4.1 + modules: + - go.opentelemetry.io/otel + - go.opentelemetry.io/otel/bridge/opentracing + - go.opentelemetry.io/otel/example/fib + - go.opentelemetry.io/otel/example/jaeger + - go.opentelemetry.io/otel/example/namedtracer + - go.opentelemetry.io/otel/example/otel-collector + - go.opentelemetry.io/otel/example/passthrough + - go.opentelemetry.io/otel/example/zipkin + - go.opentelemetry.io/otel/exporters/jaeger + - go.opentelemetry.io/otel/exporters/zipkin + - go.opentelemetry.io/otel/exporters/otlp/otlptrace + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc + - go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp + - go.opentelemetry.io/otel/exporters/otlp/internal/retry + - go.opentelemetry.io/otel/exporters/stdout/stdouttrace + - go.opentelemetry.io/otel/trace + - go.opentelemetry.io/otel/sdk + experimental-metrics: + version: v0.27.0 + modules: + - go.opentelemetry.io/otel/example/prometheus + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetricgrpc + - go.opentelemetry.io/otel/exporters/otlp/otlpmetric/otlpmetrichttp + - go.opentelemetry.io/otel/exporters/prometheus + - go.opentelemetry.io/otel/exporters/stdout/stdoutmetric + - go.opentelemetry.io/otel/internal/metric + - go.opentelemetry.io/otel/metric + - go.opentelemetry.io/otel/sdk/export/metric + - go.opentelemetry.io/otel/sdk/metric + experimental-schema: + version: v0.0.2 + modules: + - go.opentelemetry.io/otel/schema + bridge: + version: v0.27.1 + modules: + - go.opentelemetry.io/otel/bridge/opencensus + - go.opentelemetry.io/otel/bridge/opencensus/test + - go.opentelemetry.io/otel/example/opencensus +excluded-modules: + - go.opentelemetry.io/otel/internal/tools diff --git a/vendor/go.opentelemetry.io/proto/otlp/LICENSE b/vendor/go.opentelemetry.io/proto/otlp/LICENSE new file mode 100644 index 0000000000..261eeb9e9f --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go new file mode 100644 index 0000000000..07f7e9b1fa --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_config.pb.go @@ -0,0 +1,573 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.13.0 +// source: opentelemetry/proto/trace/v1/trace_config.proto + +package v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// How spans should be sampled: +// - Always off +// - Always on +// - Always follow the parent Span's decision (off if no parent). +type ConstantSampler_ConstantDecision int32 + +const ( + ConstantSampler_ALWAYS_OFF ConstantSampler_ConstantDecision = 0 + ConstantSampler_ALWAYS_ON ConstantSampler_ConstantDecision = 1 + ConstantSampler_ALWAYS_PARENT ConstantSampler_ConstantDecision = 2 +) + +// Enum value maps for ConstantSampler_ConstantDecision. +var ( + ConstantSampler_ConstantDecision_name = map[int32]string{ + 0: "ALWAYS_OFF", + 1: "ALWAYS_ON", + 2: "ALWAYS_PARENT", + } + ConstantSampler_ConstantDecision_value = map[string]int32{ + "ALWAYS_OFF": 0, + "ALWAYS_ON": 1, + "ALWAYS_PARENT": 2, + } +) + +func (x ConstantSampler_ConstantDecision) Enum() *ConstantSampler_ConstantDecision { + p := new(ConstantSampler_ConstantDecision) + *p = x + return p +} + +func (x ConstantSampler_ConstantDecision) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (ConstantSampler_ConstantDecision) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0].Descriptor() +} + +func (ConstantSampler_ConstantDecision) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes[0] +} + +func (x ConstantSampler_ConstantDecision) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use ConstantSampler_ConstantDecision.Descriptor instead. +func (ConstantSampler_ConstantDecision) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1, 0} +} + +// Global configuration of the trace service. All fields must be specified, or +// the default (zero) values will be used for each type. +type TraceConfig struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The global default sampler used to make decisions on span sampling. + // + // Types that are assignable to Sampler: + // *TraceConfig_ConstantSampler + // *TraceConfig_TraceIdRatioBased + // *TraceConfig_RateLimitingSampler + Sampler isTraceConfig_Sampler `protobuf_oneof:"sampler"` + // The global default max number of attributes per span. + MaxNumberOfAttributes int64 `protobuf:"varint,4,opt,name=max_number_of_attributes,json=maxNumberOfAttributes,proto3" json:"max_number_of_attributes,omitempty"` + // The global default max number of annotation events per span. + MaxNumberOfTimedEvents int64 `protobuf:"varint,5,opt,name=max_number_of_timed_events,json=maxNumberOfTimedEvents,proto3" json:"max_number_of_timed_events,omitempty"` + // The global default max number of attributes per timed event. + MaxNumberOfAttributesPerTimedEvent int64 `protobuf:"varint,6,opt,name=max_number_of_attributes_per_timed_event,json=maxNumberOfAttributesPerTimedEvent,proto3" json:"max_number_of_attributes_per_timed_event,omitempty"` + // The global default max number of link entries per span. + MaxNumberOfLinks int64 `protobuf:"varint,7,opt,name=max_number_of_links,json=maxNumberOfLinks,proto3" json:"max_number_of_links,omitempty"` + // The global default max number of attributes per span. + MaxNumberOfAttributesPerLink int64 `protobuf:"varint,8,opt,name=max_number_of_attributes_per_link,json=maxNumberOfAttributesPerLink,proto3" json:"max_number_of_attributes_per_link,omitempty"` +} + +func (x *TraceConfig) Reset() { + *x = TraceConfig{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceConfig) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceConfig) ProtoMessage() {} + +func (x *TraceConfig) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceConfig.ProtoReflect.Descriptor instead. +func (*TraceConfig) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{0} +} + +func (m *TraceConfig) GetSampler() isTraceConfig_Sampler { + if m != nil { + return m.Sampler + } + return nil +} + +func (x *TraceConfig) GetConstantSampler() *ConstantSampler { + if x, ok := x.GetSampler().(*TraceConfig_ConstantSampler); ok { + return x.ConstantSampler + } + return nil +} + +func (x *TraceConfig) GetTraceIdRatioBased() *TraceIdRatioBased { + if x, ok := x.GetSampler().(*TraceConfig_TraceIdRatioBased); ok { + return x.TraceIdRatioBased + } + return nil +} + +func (x *TraceConfig) GetRateLimitingSampler() *RateLimitingSampler { + if x, ok := x.GetSampler().(*TraceConfig_RateLimitingSampler); ok { + return x.RateLimitingSampler + } + return nil +} + +func (x *TraceConfig) GetMaxNumberOfAttributes() int64 { + if x != nil { + return x.MaxNumberOfAttributes + } + return 0 +} + +func (x *TraceConfig) GetMaxNumberOfTimedEvents() int64 { + if x != nil { + return x.MaxNumberOfTimedEvents + } + return 0 +} + +func (x *TraceConfig) GetMaxNumberOfAttributesPerTimedEvent() int64 { + if x != nil { + return x.MaxNumberOfAttributesPerTimedEvent + } + return 0 +} + +func (x *TraceConfig) GetMaxNumberOfLinks() int64 { + if x != nil { + return x.MaxNumberOfLinks + } + return 0 +} + +func (x *TraceConfig) GetMaxNumberOfAttributesPerLink() int64 { + if x != nil { + return x.MaxNumberOfAttributesPerLink + } + return 0 +} + +type isTraceConfig_Sampler interface { + isTraceConfig_Sampler() +} + +type TraceConfig_ConstantSampler struct { + ConstantSampler *ConstantSampler `protobuf:"bytes,1,opt,name=constant_sampler,json=constantSampler,proto3,oneof"` +} + +type TraceConfig_TraceIdRatioBased struct { + TraceIdRatioBased *TraceIdRatioBased `protobuf:"bytes,2,opt,name=trace_id_ratio_based,json=traceIdRatioBased,proto3,oneof"` +} + +type TraceConfig_RateLimitingSampler struct { + RateLimitingSampler *RateLimitingSampler `protobuf:"bytes,3,opt,name=rate_limiting_sampler,json=rateLimitingSampler,proto3,oneof"` +} + +func (*TraceConfig_ConstantSampler) isTraceConfig_Sampler() {} + +func (*TraceConfig_TraceIdRatioBased) isTraceConfig_Sampler() {} + +func (*TraceConfig_RateLimitingSampler) isTraceConfig_Sampler() {} + +// Sampler that always makes a constant decision on span sampling. +type ConstantSampler struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Decision ConstantSampler_ConstantDecision `protobuf:"varint,1,opt,name=decision,proto3,enum=opentelemetry.proto.trace.v1.ConstantSampler_ConstantDecision" json:"decision,omitempty"` +} + +func (x *ConstantSampler) Reset() { + *x = ConstantSampler{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ConstantSampler) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ConstantSampler) ProtoMessage() {} + +func (x *ConstantSampler) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ConstantSampler.ProtoReflect.Descriptor instead. +func (*ConstantSampler) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{1} +} + +func (x *ConstantSampler) GetDecision() ConstantSampler_ConstantDecision { + if x != nil { + return x.Decision + } + return ConstantSampler_ALWAYS_OFF +} + +// Sampler that tries to uniformly sample traces with a given ratio. +// The ratio of sampling a trace is equal to that of the specified ratio. +type TraceIdRatioBased struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The desired ratio of sampling. Must be within [0.0, 1.0]. + SamplingRatio float64 `protobuf:"fixed64,1,opt,name=samplingRatio,proto3" json:"samplingRatio,omitempty"` +} + +func (x *TraceIdRatioBased) Reset() { + *x = TraceIdRatioBased{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TraceIdRatioBased) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TraceIdRatioBased) ProtoMessage() {} + +func (x *TraceIdRatioBased) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TraceIdRatioBased.ProtoReflect.Descriptor instead. +func (*TraceIdRatioBased) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{2} +} + +func (x *TraceIdRatioBased) GetSamplingRatio() float64 { + if x != nil { + return x.SamplingRatio + } + return 0 +} + +// Sampler that tries to sample with a rate per time window. +type RateLimitingSampler struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Rate per second. + Qps int64 `protobuf:"varint,1,opt,name=qps,proto3" json:"qps,omitempty"` +} + +func (x *RateLimitingSampler) Reset() { + *x = RateLimitingSampler{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *RateLimitingSampler) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*RateLimitingSampler) ProtoMessage() {} + +func (x *RateLimitingSampler) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use RateLimitingSampler.ProtoReflect.Descriptor instead. +func (*RateLimitingSampler) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP(), []int{3} +} + +func (x *RateLimitingSampler) GetQps() int64 { + if x != nil { + return x.Qps + } + return 0 +} + +var File_opentelemetry_proto_trace_v1_trace_config_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = []byte{ + 0x0a, 0x2f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x5f, 0x63, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x22, + 0x84, 0x05, 0x0a, 0x0b, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, 0x12, + 0x5a, 0x0a, 0x10, 0x63, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, 0x74, 0x5f, 0x73, 0x61, 0x6d, 0x70, + 0x6c, 0x65, 0x72, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2d, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, 0x6e, + 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x48, 0x00, 0x52, 0x0f, 0x63, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x62, 0x0a, 0x14, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x5f, 0x72, 0x61, 0x74, 0x69, 0x6f, 0x5f, 0x62, 0x61, + 0x73, 0x65, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, + 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x48, 0x00, 0x52, 0x11, 0x74, 0x72, + 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, 0x64, 0x12, + 0x67, 0x0a, 0x15, 0x72, 0x61, 0x74, 0x65, 0x5f, 0x6c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, + 0x5f, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x31, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x61, + 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, + 0x72, 0x48, 0x00, 0x52, 0x13, 0x72, 0x61, 0x74, 0x65, 0x4c, 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, + 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x18, 0x6d, 0x61, 0x78, 0x5f, + 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x01, 0x28, 0x03, 0x52, 0x15, 0x6d, 0x61, 0x78, 0x4e, + 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, + 0x73, 0x12, 0x3a, 0x0a, 0x1a, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, + 0x6f, 0x66, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, + 0x05, 0x20, 0x01, 0x28, 0x03, 0x52, 0x16, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x4f, 0x66, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x12, 0x54, 0x0a, + 0x28, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, 0x65, 0x72, 0x5f, 0x74, 0x69, + 0x6d, 0x65, 0x64, 0x5f, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, + 0x22, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x54, 0x69, 0x6d, 0x65, 0x64, 0x45, 0x76, + 0x65, 0x6e, 0x74, 0x12, 0x2d, 0x0a, 0x13, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, + 0x72, 0x5f, 0x6f, 0x66, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x07, 0x20, 0x01, 0x28, 0x03, + 0x52, 0x10, 0x6d, 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x4c, 0x69, 0x6e, + 0x6b, 0x73, 0x12, 0x47, 0x0a, 0x21, 0x6d, 0x61, 0x78, 0x5f, 0x6e, 0x75, 0x6d, 0x62, 0x65, 0x72, + 0x5f, 0x6f, 0x66, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x70, + 0x65, 0x72, 0x5f, 0x6c, 0x69, 0x6e, 0x6b, 0x18, 0x08, 0x20, 0x01, 0x28, 0x03, 0x52, 0x1c, 0x6d, + 0x61, 0x78, 0x4e, 0x75, 0x6d, 0x62, 0x65, 0x72, 0x4f, 0x66, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x50, 0x65, 0x72, 0x4c, 0x69, 0x6e, 0x6b, 0x42, 0x09, 0x0a, 0x07, 0x73, + 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x22, 0xb3, 0x01, 0x0a, 0x0f, 0x43, 0x6f, 0x6e, 0x73, 0x74, + 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x5a, 0x0a, 0x08, 0x64, 0x65, + 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x3e, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x2e, 0x43, 0x6f, 0x6e, 0x73, + 0x74, 0x61, 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x52, 0x08, 0x64, 0x65, + 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x22, 0x44, 0x0a, 0x10, 0x43, 0x6f, 0x6e, 0x73, 0x74, 0x61, + 0x6e, 0x74, 0x44, 0x65, 0x63, 0x69, 0x73, 0x69, 0x6f, 0x6e, 0x12, 0x0e, 0x0a, 0x0a, 0x41, 0x4c, + 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x46, 0x46, 0x10, 0x00, 0x12, 0x0d, 0x0a, 0x09, 0x41, 0x4c, + 0x57, 0x41, 0x59, 0x53, 0x5f, 0x4f, 0x4e, 0x10, 0x01, 0x12, 0x11, 0x0a, 0x0d, 0x41, 0x4c, 0x57, + 0x41, 0x59, 0x53, 0x5f, 0x50, 0x41, 0x52, 0x45, 0x4e, 0x54, 0x10, 0x02, 0x22, 0x39, 0x0a, 0x11, + 0x54, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x42, 0x61, 0x73, 0x65, + 0x64, 0x12, 0x24, 0x0a, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, 0x6e, 0x67, 0x52, 0x61, 0x74, + 0x69, 0x6f, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x69, + 0x6e, 0x67, 0x52, 0x61, 0x74, 0x69, 0x6f, 0x22, 0x27, 0x0a, 0x13, 0x52, 0x61, 0x74, 0x65, 0x4c, + 0x69, 0x6d, 0x69, 0x74, 0x69, 0x6e, 0x67, 0x53, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x72, 0x12, 0x10, + 0x0a, 0x03, 0x71, 0x70, 0x73, 0x18, 0x01, 0x20, 0x01, 0x28, 0x03, 0x52, 0x03, 0x71, 0x70, 0x73, + 0x42, 0x68, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, + 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x42, 0x10, 0x54, 0x72, 0x61, 0x63, 0x65, 0x43, 0x6f, 0x6e, 0x66, 0x69, 0x67, + 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, + 0x72, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce sync.Once + file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc +) + +func file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData) + }) + return file_opentelemetry_proto_trace_v1_trace_config_proto_rawDescData +} + +var file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes = make([]protoimpl.EnumInfo, 1) +var file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes = make([]protoimpl.MessageInfo, 4) +var file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = []interface{}{ + (ConstantSampler_ConstantDecision)(0), // 0: opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision + (*TraceConfig)(nil), // 1: opentelemetry.proto.trace.v1.TraceConfig + (*ConstantSampler)(nil), // 2: opentelemetry.proto.trace.v1.ConstantSampler + (*TraceIdRatioBased)(nil), // 3: opentelemetry.proto.trace.v1.TraceIdRatioBased + (*RateLimitingSampler)(nil), // 4: opentelemetry.proto.trace.v1.RateLimitingSampler +} +var file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = []int32{ + 2, // 0: opentelemetry.proto.trace.v1.TraceConfig.constant_sampler:type_name -> opentelemetry.proto.trace.v1.ConstantSampler + 3, // 1: opentelemetry.proto.trace.v1.TraceConfig.trace_id_ratio_based:type_name -> opentelemetry.proto.trace.v1.TraceIdRatioBased + 4, // 2: opentelemetry.proto.trace.v1.TraceConfig.rate_limiting_sampler:type_name -> opentelemetry.proto.trace.v1.RateLimitingSampler + 0, // 3: opentelemetry.proto.trace.v1.ConstantSampler.decision:type_name -> opentelemetry.proto.trace.v1.ConstantSampler.ConstantDecision + 4, // [4:4] is the sub-list for method output_type + 4, // [4:4] is the sub-list for method input_type + 4, // [4:4] is the sub-list for extension type_name + 4, // [4:4] is the sub-list for extension extendee + 0, // [0:4] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_trace_v1_trace_config_proto_init() } +func file_opentelemetry_proto_trace_v1_trace_config_proto_init() { + if File_opentelemetry_proto_trace_v1_trace_config_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceConfig); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ConstantSampler); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TraceIdRatioBased); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*RateLimitingSampler); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*TraceConfig_ConstantSampler)(nil), + (*TraceConfig_TraceIdRatioBased)(nil), + (*TraceConfig_RateLimitingSampler)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc, + NumEnums: 1, + NumMessages: 4, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs, + EnumInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_enumTypes, + MessageInfos: file_opentelemetry_proto_trace_v1_trace_config_proto_msgTypes, + }.Build() + File_opentelemetry_proto_trace_v1_trace_config_proto = out.File + file_opentelemetry_proto_trace_v1_trace_config_proto_rawDesc = nil + file_opentelemetry_proto_trace_v1_trace_config_proto_goTypes = nil + file_opentelemetry_proto_trace_v1_trace_config_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go new file mode 100644 index 0000000000..402614f1d3 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.go @@ -0,0 +1,252 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.13.0 +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +package v1 + +import ( + proto "github.com/golang/protobuf/proto" + v1 "go.opentelemetry.io/proto/otlp/trace/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +type ExportTraceServiceRequest struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain one + // element. Intermediary nodes (such as OpenTelemetry Collector) that receive + // data from multiple origins typically batch the data before forwarding further and + // in that case this array will contain multiple elements. + ResourceSpans []*v1.ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *ExportTraceServiceRequest) Reset() { + *x = ExportTraceServiceRequest{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceRequest) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceRequest) ProtoMessage() {} + +func (x *ExportTraceServiceRequest) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceRequest.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceRequest) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{0} +} + +func (x *ExportTraceServiceRequest) GetResourceSpans() []*v1.ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +type ExportTraceServiceResponse struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields +} + +func (x *ExportTraceServiceResponse) Reset() { + *x = ExportTraceServiceResponse{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ExportTraceServiceResponse) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ExportTraceServiceResponse) ProtoMessage() {} + +func (x *ExportTraceServiceResponse) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ExportTraceServiceResponse.ProtoReflect.Descriptor instead. +func (*ExportTraceServiceResponse) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP(), []int{1} +} + +var File_opentelemetry_proto_collector_trace_v1_trace_service_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = []byte{ + 0x0a, 0x3a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, + 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x26, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, + 0x76, 0x31, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x6f, + 0x0a, 0x19, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, + 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x12, 0x52, 0x0a, 0x0e, 0x72, + 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, + 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, + 0x1c, 0x0a, 0x1a, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, 0x32, 0xa2, 0x01, + 0x0a, 0x0c, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x12, 0x91, + 0x01, 0x0a, 0x06, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x12, 0x41, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, + 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x71, 0x75, 0x65, 0x73, 0x74, 0x1a, 0x42, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x45, 0x78, 0x70, 0x6f, 0x72, 0x74, 0x54, 0x72, 0x61, 0x63, + 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x52, 0x65, 0x73, 0x70, 0x6f, 0x6e, 0x73, 0x65, + 0x22, 0x00, 0x42, 0x73, 0x0a, 0x29, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6c, + 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, + 0x11, 0x54, 0x72, 0x61, 0x63, 0x65, 0x53, 0x65, 0x72, 0x76, 0x69, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x31, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6c, 0x6c, 0x65, 0x63, 0x74, 0x6f, 0x72, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce sync.Once + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc +) + +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData) + }) + return file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDescData +} + +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes = make([]protoimpl.MessageInfo, 2) +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = []interface{}{ + (*ExportTraceServiceRequest)(nil), // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + (*ExportTraceServiceResponse)(nil), // 1: opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + (*v1.ResourceSpans)(nil), // 2: opentelemetry.proto.trace.v1.ResourceSpans +} +var file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = []int32{ + 2, // 0: opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 0, // 1: opentelemetry.proto.collector.trace.v1.TraceService.Export:input_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceRequest + 1, // 2: opentelemetry.proto.collector.trace.v1.TraceService.Export:output_type -> opentelemetry.proto.collector.trace.v1.ExportTraceServiceResponse + 2, // [2:3] is the sub-list for method output_type + 1, // [1:2] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() } +func file_opentelemetry_proto_collector_trace_v1_trace_service_proto_init() { + if File_opentelemetry_proto_collector_trace_v1_trace_service_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceRequest); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ExportTraceServiceResponse); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc, + NumEnums: 0, + NumMessages: 2, + NumExtensions: 0, + NumServices: 1, + }, + GoTypes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_collector_trace_v1_trace_service_proto_msgTypes, + }.Build() + File_opentelemetry_proto_collector_trace_v1_trace_service_proto = out.File + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_rawDesc = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_goTypes = nil + file_opentelemetry_proto_collector_trace_v1_trace_service_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go new file mode 100644 index 0000000000..18dff3d03e --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service.pb.gw.go @@ -0,0 +1,169 @@ +// Code generated by protoc-gen-grpc-gateway. DO NOT EDIT. +// source: opentelemetry/proto/collector/trace/v1/trace_service.proto + +/* +Package v1 is a reverse proxy. + +It translates gRPC into RESTful JSON APIs. +*/ +package v1 + +import ( + "context" + "io" + "net/http" + + "github.com/golang/protobuf/descriptor" + "github.com/golang/protobuf/proto" + "github.com/grpc-ecosystem/grpc-gateway/runtime" + "github.com/grpc-ecosystem/grpc-gateway/utilities" + "google.golang.org/grpc" + "google.golang.org/grpc/codes" + "google.golang.org/grpc/grpclog" + "google.golang.org/grpc/metadata" + "google.golang.org/grpc/status" +) + +// Suppress "imported and not used" errors +var _ codes.Code +var _ io.Reader +var _ status.Status +var _ = runtime.String +var _ = utilities.NewDoubleArray +var _ = descriptor.ForMessage +var _ = metadata.Join + +func request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, client TraceServiceClient, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := client.Export(ctx, &protoReq, grpc.Header(&metadata.HeaderMD), grpc.Trailer(&metadata.TrailerMD)) + return msg, metadata, err + +} + +func local_request_TraceService_Export_0(ctx context.Context, marshaler runtime.Marshaler, server TraceServiceServer, req *http.Request, pathParams map[string]string) (proto.Message, runtime.ServerMetadata, error) { + var protoReq ExportTraceServiceRequest + var metadata runtime.ServerMetadata + + newReader, berr := utilities.IOReaderFactory(req.Body) + if berr != nil { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", berr) + } + if err := marshaler.NewDecoder(newReader()).Decode(&protoReq); err != nil && err != io.EOF { + return nil, metadata, status.Errorf(codes.InvalidArgument, "%v", err) + } + + msg, err := server.Export(ctx, &protoReq) + return msg, metadata, err + +} + +// RegisterTraceServiceHandlerServer registers the http handlers for service TraceService to "mux". +// UnaryRPC :call TraceServiceServer directly. +// StreamingRPC :currently unsupported pending https://github.com/grpc/grpc-go/issues/906. +// Note that using this registration option will cause many gRPC library features to stop working. Consider using RegisterTraceServiceHandlerFromEndpoint instead. +func RegisterTraceServiceHandlerServer(ctx context.Context, mux *runtime.ServeMux, server TraceServiceServer) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + var stream runtime.ServerTransportStream + ctx = grpc.NewContextWithServerTransportStream(ctx, &stream) + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateIncomingContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := local_request_TraceService_Export_0(rctx, inboundMarshaler, server, req, pathParams) + md.HeaderMD, md.TrailerMD = metadata.Join(md.HeaderMD, stream.Header()), metadata.Join(md.TrailerMD, stream.Trailer()) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +// RegisterTraceServiceHandlerFromEndpoint is same as RegisterTraceServiceHandler but +// automatically dials to "endpoint" and closes the connection when "ctx" gets done. +func RegisterTraceServiceHandlerFromEndpoint(ctx context.Context, mux *runtime.ServeMux, endpoint string, opts []grpc.DialOption) (err error) { + conn, err := grpc.Dial(endpoint, opts...) + if err != nil { + return err + } + defer func() { + if err != nil { + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + return + } + go func() { + <-ctx.Done() + if cerr := conn.Close(); cerr != nil { + grpclog.Infof("Failed to close conn to %s: %v", endpoint, cerr) + } + }() + }() + + return RegisterTraceServiceHandler(ctx, mux, conn) +} + +// RegisterTraceServiceHandler registers the http handlers for service TraceService to "mux". +// The handlers forward requests to the grpc endpoint over "conn". +func RegisterTraceServiceHandler(ctx context.Context, mux *runtime.ServeMux, conn *grpc.ClientConn) error { + return RegisterTraceServiceHandlerClient(ctx, mux, NewTraceServiceClient(conn)) +} + +// RegisterTraceServiceHandlerClient registers the http handlers for service TraceService +// to "mux". The handlers forward requests to the grpc endpoint over the given implementation of "TraceServiceClient". +// Note: the gRPC framework executes interceptors within the gRPC handler. If the passed in "TraceServiceClient" +// doesn't go through the normal gRPC flow (creating a gRPC client etc.) then it will be up to the passed in +// "TraceServiceClient" to call the correct interceptors. +func RegisterTraceServiceHandlerClient(ctx context.Context, mux *runtime.ServeMux, client TraceServiceClient) error { + + mux.Handle("POST", pattern_TraceService_Export_0, func(w http.ResponseWriter, req *http.Request, pathParams map[string]string) { + ctx, cancel := context.WithCancel(req.Context()) + defer cancel() + inboundMarshaler, outboundMarshaler := runtime.MarshalerForRequest(mux, req) + rctx, err := runtime.AnnotateContext(ctx, mux, req) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + resp, md, err := request_TraceService_Export_0(rctx, inboundMarshaler, client, req, pathParams) + ctx = runtime.NewServerMetadataContext(ctx, md) + if err != nil { + runtime.HTTPError(ctx, mux, outboundMarshaler, w, req, err) + return + } + + forward_TraceService_Export_0(ctx, mux, outboundMarshaler, w, req, resp, mux.GetForwardResponseOptions()...) + + }) + + return nil +} + +var ( + pattern_TraceService_Export_0 = runtime.MustPattern(runtime.NewPattern(1, []int{2, 0, 2, 1}, []string{"v1", "trace"}, "", runtime.AssumeColonVerbOpt(true))) +) + +var ( + forward_TraceService_Export_0 = runtime.ForwardResponseMessage +) diff --git a/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go new file mode 100644 index 0000000000..4e4c24c0c8 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/collector/trace/v1/trace_service_grpc.pb.go @@ -0,0 +1,101 @@ +// Code generated by protoc-gen-go-grpc. DO NOT EDIT. + +package v1 + +import ( + context "context" + grpc "google.golang.org/grpc" + codes "google.golang.org/grpc/codes" + status "google.golang.org/grpc/status" +) + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion7 + +// TraceServiceClient is the client API for TraceService service. +// +// For semantics around ctx use and closing/ending streaming RPCs, please refer to https://pkg.go.dev/google.golang.org/grpc/?tab=doc#ClientConn.NewStream. +type TraceServiceClient interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) +} + +type traceServiceClient struct { + cc grpc.ClientConnInterface +} + +func NewTraceServiceClient(cc grpc.ClientConnInterface) TraceServiceClient { + return &traceServiceClient{cc} +} + +func (c *traceServiceClient) Export(ctx context.Context, in *ExportTraceServiceRequest, opts ...grpc.CallOption) (*ExportTraceServiceResponse, error) { + out := new(ExportTraceServiceResponse) + err := c.cc.Invoke(ctx, "/opentelemetry.proto.collector.trace.v1.TraceService/Export", in, out, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// TraceServiceServer is the server API for TraceService service. +// All implementations must embed UnimplementedTraceServiceServer +// for forward compatibility +type TraceServiceServer interface { + // For performance reasons, it is recommended to keep this RPC + // alive for the entire life of the application. + Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) + mustEmbedUnimplementedTraceServiceServer() +} + +// UnimplementedTraceServiceServer must be embedded to have forward compatible implementations. +type UnimplementedTraceServiceServer struct { +} + +func (UnimplementedTraceServiceServer) Export(context.Context, *ExportTraceServiceRequest) (*ExportTraceServiceResponse, error) { + return nil, status.Errorf(codes.Unimplemented, "method Export not implemented") +} +func (UnimplementedTraceServiceServer) mustEmbedUnimplementedTraceServiceServer() {} + +// UnsafeTraceServiceServer may be embedded to opt out of forward compatibility for this service. +// Use of this interface is not recommended, as added methods to TraceServiceServer will +// result in compilation errors. +type UnsafeTraceServiceServer interface { + mustEmbedUnimplementedTraceServiceServer() +} + +func RegisterTraceServiceServer(s grpc.ServiceRegistrar, srv TraceServiceServer) { + s.RegisterService(&_TraceService_serviceDesc, srv) +} + +func _TraceService_Export_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ExportTraceServiceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(TraceServiceServer).Export(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/opentelemetry.proto.collector.trace.v1.TraceService/Export", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(TraceServiceServer).Export(ctx, req.(*ExportTraceServiceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _TraceService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "opentelemetry.proto.collector.trace.v1.TraceService", + HandlerType: (*TraceServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Export", + Handler: _TraceService_Export_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "opentelemetry/proto/collector/trace/v1/trace_service.proto", +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go new file mode 100644 index 0000000000..de75b592ca --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/common/v1/common.pb.go @@ -0,0 +1,679 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.13.0 +// source: opentelemetry/proto/common/v1/common.proto + +package v1 + +import ( + proto "github.com/golang/protobuf/proto" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// AnyValue is used to represent any type of attribute value. AnyValue may contain a +// primitive value such as a string or integer or it may contain an arbitrary nested +// object containing arrays, key-value lists and primitives. +type AnyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The value is one of the listed fields. It is valid for all values to be unspecified + // in which case this AnyValue is considered to be "empty". + // + // Types that are assignable to Value: + // *AnyValue_StringValue + // *AnyValue_BoolValue + // *AnyValue_IntValue + // *AnyValue_DoubleValue + // *AnyValue_ArrayValue + // *AnyValue_KvlistValue + // *AnyValue_BytesValue + Value isAnyValue_Value `protobuf_oneof:"value"` +} + +func (x *AnyValue) Reset() { + *x = AnyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *AnyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*AnyValue) ProtoMessage() {} + +func (x *AnyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use AnyValue.ProtoReflect.Descriptor instead. +func (*AnyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{0} +} + +func (m *AnyValue) GetValue() isAnyValue_Value { + if m != nil { + return m.Value + } + return nil +} + +func (x *AnyValue) GetStringValue() string { + if x, ok := x.GetValue().(*AnyValue_StringValue); ok { + return x.StringValue + } + return "" +} + +func (x *AnyValue) GetBoolValue() bool { + if x, ok := x.GetValue().(*AnyValue_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (x *AnyValue) GetIntValue() int64 { + if x, ok := x.GetValue().(*AnyValue_IntValue); ok { + return x.IntValue + } + return 0 +} + +func (x *AnyValue) GetDoubleValue() float64 { + if x, ok := x.GetValue().(*AnyValue_DoubleValue); ok { + return x.DoubleValue + } + return 0 +} + +func (x *AnyValue) GetArrayValue() *ArrayValue { + if x, ok := x.GetValue().(*AnyValue_ArrayValue); ok { + return x.ArrayValue + } + return nil +} + +func (x *AnyValue) GetKvlistValue() *KeyValueList { + if x, ok := x.GetValue().(*AnyValue_KvlistValue); ok { + return x.KvlistValue + } + return nil +} + +func (x *AnyValue) GetBytesValue() []byte { + if x, ok := x.GetValue().(*AnyValue_BytesValue); ok { + return x.BytesValue + } + return nil +} + +type isAnyValue_Value interface { + isAnyValue_Value() +} + +type AnyValue_StringValue struct { + StringValue string `protobuf:"bytes,1,opt,name=string_value,json=stringValue,proto3,oneof"` +} + +type AnyValue_BoolValue struct { + BoolValue bool `protobuf:"varint,2,opt,name=bool_value,json=boolValue,proto3,oneof"` +} + +type AnyValue_IntValue struct { + IntValue int64 `protobuf:"varint,3,opt,name=int_value,json=intValue,proto3,oneof"` +} + +type AnyValue_DoubleValue struct { + DoubleValue float64 `protobuf:"fixed64,4,opt,name=double_value,json=doubleValue,proto3,oneof"` +} + +type AnyValue_ArrayValue struct { + ArrayValue *ArrayValue `protobuf:"bytes,5,opt,name=array_value,json=arrayValue,proto3,oneof"` +} + +type AnyValue_KvlistValue struct { + KvlistValue *KeyValueList `protobuf:"bytes,6,opt,name=kvlist_value,json=kvlistValue,proto3,oneof"` +} + +type AnyValue_BytesValue struct { + BytesValue []byte `protobuf:"bytes,7,opt,name=bytes_value,json=bytesValue,proto3,oneof"` +} + +func (*AnyValue_StringValue) isAnyValue_Value() {} + +func (*AnyValue_BoolValue) isAnyValue_Value() {} + +func (*AnyValue_IntValue) isAnyValue_Value() {} + +func (*AnyValue_DoubleValue) isAnyValue_Value() {} + +func (*AnyValue_ArrayValue) isAnyValue_Value() {} + +func (*AnyValue_KvlistValue) isAnyValue_Value() {} + +func (*AnyValue_BytesValue) isAnyValue_Value() {} + +// ArrayValue is a list of AnyValue messages. We need ArrayValue as a message +// since oneof in AnyValue does not allow repeated fields. +type ArrayValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Array of values. The array may be empty (contain 0 elements). + Values []*AnyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *ArrayValue) Reset() { + *x = ArrayValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ArrayValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ArrayValue) ProtoMessage() {} + +func (x *ArrayValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ArrayValue.ProtoReflect.Descriptor instead. +func (*ArrayValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{1} +} + +func (x *ArrayValue) GetValues() []*AnyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValueList is a list of KeyValue messages. We need KeyValueList as a message +// since `oneof` in AnyValue does not allow repeated fields. Everywhere else where we need +// a list of KeyValue messages (e.g. in Span) we use `repeated KeyValue` directly to +// avoid unnecessary extra wrapping (which slows down the protocol). The 2 approaches +// are semantically equivalent. +type KeyValueList struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A collection of key/value pairs of key-value pairs. The list may be empty (may + // contain 0 elements). + Values []*KeyValue `protobuf:"bytes,1,rep,name=values,proto3" json:"values,omitempty"` +} + +func (x *KeyValueList) Reset() { + *x = KeyValueList{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValueList) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValueList) ProtoMessage() {} + +func (x *KeyValueList) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValueList.ProtoReflect.Descriptor instead. +func (*KeyValueList) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{2} +} + +func (x *KeyValueList) GetValues() []*KeyValue { + if x != nil { + return x.Values + } + return nil +} + +// KeyValue is a key-value pair that is used to store Span attributes, Link +// attributes, etc. +type KeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value *AnyValue `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *KeyValue) Reset() { + *x = KeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *KeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*KeyValue) ProtoMessage() {} + +func (x *KeyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use KeyValue.ProtoReflect.Descriptor instead. +func (*KeyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{3} +} + +func (x *KeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *KeyValue) GetValue() *AnyValue { + if x != nil { + return x.Value + } + return nil +} + +// StringKeyValue is a pair of key/value strings. This is the simpler (and faster) version +// of KeyValue that only supports string values. +// +// Deprecated: Do not use. +type StringKeyValue struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + Key string `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value string `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (x *StringKeyValue) Reset() { + *x = StringKeyValue{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *StringKeyValue) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*StringKeyValue) ProtoMessage() {} + +func (x *StringKeyValue) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use StringKeyValue.ProtoReflect.Descriptor instead. +func (*StringKeyValue) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{4} +} + +func (x *StringKeyValue) GetKey() string { + if x != nil { + return x.Key + } + return "" +} + +func (x *StringKeyValue) GetValue() string { + if x != nil { + return x.Value + } + return "" +} + +// InstrumentationLibrary is a message representing the instrumentation library information +// such as the fully qualified name and version. +type InstrumentationLibrary struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An empty instrumentation library name means the name is unknown. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + Version string `protobuf:"bytes,2,opt,name=version,proto3" json:"version,omitempty"` +} + +func (x *InstrumentationLibrary) Reset() { + *x = InstrumentationLibrary{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InstrumentationLibrary) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstrumentationLibrary) ProtoMessage() {} + +func (x *InstrumentationLibrary) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_common_v1_common_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstrumentationLibrary.ProtoReflect.Descriptor instead. +func (*InstrumentationLibrary) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP(), []int{5} +} + +func (x *InstrumentationLibrary) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *InstrumentationLibrary) GetVersion() string { + if x != nil { + return x.Version + } + return "" +} + +var File_opentelemetry_proto_common_v1_common_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_common_v1_common_proto_rawDesc = []byte{ + 0x0a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1d, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x22, 0xe0, 0x02, 0x0a, 0x08, + 0x41, 0x6e, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, 0x0c, 0x73, 0x74, 0x72, 0x69, + 0x6e, 0x67, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x48, 0x00, + 0x52, 0x0b, 0x73, 0x74, 0x72, 0x69, 0x6e, 0x67, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1f, 0x0a, + 0x0a, 0x62, 0x6f, 0x6f, 0x6c, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x08, 0x48, 0x00, 0x52, 0x09, 0x62, 0x6f, 0x6f, 0x6c, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x1d, + 0x0a, 0x09, 0x69, 0x6e, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, + 0x03, 0x48, 0x00, 0x52, 0x08, 0x69, 0x6e, 0x74, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x23, 0x0a, + 0x0c, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x04, 0x20, + 0x01, 0x28, 0x01, 0x48, 0x00, 0x52, 0x0b, 0x64, 0x6f, 0x75, 0x62, 0x6c, 0x65, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x4c, 0x0a, 0x0b, 0x61, 0x72, 0x72, 0x61, 0x79, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x48, 0x00, 0x52, 0x0a, 0x61, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, + 0x12, 0x50, 0x0a, 0x0c, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x5f, 0x76, 0x61, 0x6c, 0x75, 0x65, + 0x18, 0x06, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, + 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, + 0x69, 0x73, 0x74, 0x48, 0x00, 0x52, 0x0b, 0x6b, 0x76, 0x6c, 0x69, 0x73, 0x74, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x12, 0x21, 0x0a, 0x0b, 0x62, 0x79, 0x74, 0x65, 0x73, 0x5f, 0x76, 0x61, 0x6c, 0x75, + 0x65, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0c, 0x48, 0x00, 0x52, 0x0a, 0x62, 0x79, 0x74, 0x65, 0x73, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x42, 0x07, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x4d, + 0x0a, 0x0a, 0x41, 0x72, 0x72, 0x61, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x3f, 0x0a, 0x06, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, + 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x4f, 0x0a, + 0x0c, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x4c, 0x69, 0x73, 0x74, 0x12, 0x3f, 0x0a, + 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, + 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, + 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, + 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x06, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x73, 0x22, 0x5b, + 0x0a, 0x08, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, 0x03, 0x6b, 0x65, + 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, 0x3d, 0x0a, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, + 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x41, 0x6e, 0x79, 0x56, + 0x61, 0x6c, 0x75, 0x65, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x3c, 0x0a, 0x0e, 0x53, + 0x74, 0x72, 0x69, 0x6e, 0x67, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x10, 0x0a, + 0x03, 0x6b, 0x65, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x03, 0x6b, 0x65, 0x79, 0x12, + 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, + 0x76, 0x61, 0x6c, 0x75, 0x65, 0x3a, 0x02, 0x18, 0x01, 0x22, 0x46, 0x0a, 0x16, 0x49, 0x6e, 0x73, + 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, + 0x61, 0x72, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x18, 0x0a, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, + 0x6f, 0x6e, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x07, 0x76, 0x65, 0x72, 0x73, 0x69, 0x6f, + 0x6e, 0x42, 0x5b, 0x0a, 0x20, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, + 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, + 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x42, 0x0b, 0x43, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x28, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x62, 0x06, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce sync.Once + file_opentelemetry_proto_common_v1_common_proto_rawDescData = file_opentelemetry_proto_common_v1_common_proto_rawDesc +) + +func file_opentelemetry_proto_common_v1_common_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_common_v1_common_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_common_v1_common_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_common_v1_common_proto_rawDescData) + }) + return file_opentelemetry_proto_common_v1_common_proto_rawDescData +} + +var file_opentelemetry_proto_common_v1_common_proto_msgTypes = make([]protoimpl.MessageInfo, 6) +var file_opentelemetry_proto_common_v1_common_proto_goTypes = []interface{}{ + (*AnyValue)(nil), // 0: opentelemetry.proto.common.v1.AnyValue + (*ArrayValue)(nil), // 1: opentelemetry.proto.common.v1.ArrayValue + (*KeyValueList)(nil), // 2: opentelemetry.proto.common.v1.KeyValueList + (*KeyValue)(nil), // 3: opentelemetry.proto.common.v1.KeyValue + (*StringKeyValue)(nil), // 4: opentelemetry.proto.common.v1.StringKeyValue + (*InstrumentationLibrary)(nil), // 5: opentelemetry.proto.common.v1.InstrumentationLibrary +} +var file_opentelemetry_proto_common_v1_common_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.common.v1.AnyValue.array_value:type_name -> opentelemetry.proto.common.v1.ArrayValue + 2, // 1: opentelemetry.proto.common.v1.AnyValue.kvlist_value:type_name -> opentelemetry.proto.common.v1.KeyValueList + 0, // 2: opentelemetry.proto.common.v1.ArrayValue.values:type_name -> opentelemetry.proto.common.v1.AnyValue + 3, // 3: opentelemetry.proto.common.v1.KeyValueList.values:type_name -> opentelemetry.proto.common.v1.KeyValue + 0, // 4: opentelemetry.proto.common.v1.KeyValue.value:type_name -> opentelemetry.proto.common.v1.AnyValue + 5, // [5:5] is the sub-list for method output_type + 5, // [5:5] is the sub-list for method input_type + 5, // [5:5] is the sub-list for extension type_name + 5, // [5:5] is the sub-list for extension extendee + 0, // [0:5] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_common_v1_common_proto_init() } +func file_opentelemetry_proto_common_v1_common_proto_init() { + if File_opentelemetry_proto_common_v1_common_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*AnyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ArrayValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValueList); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*KeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*StringKeyValue); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InstrumentationLibrary); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + file_opentelemetry_proto_common_v1_common_proto_msgTypes[0].OneofWrappers = []interface{}{ + (*AnyValue_StringValue)(nil), + (*AnyValue_BoolValue)(nil), + (*AnyValue_IntValue)(nil), + (*AnyValue_DoubleValue)(nil), + (*AnyValue_ArrayValue)(nil), + (*AnyValue_KvlistValue)(nil), + (*AnyValue_BytesValue)(nil), + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_common_v1_common_proto_rawDesc, + NumEnums: 0, + NumMessages: 6, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_common_v1_common_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_common_v1_common_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_common_v1_common_proto_msgTypes, + }.Build() + File_opentelemetry_proto_common_v1_common_proto = out.File + file_opentelemetry_proto_common_v1_common_proto_rawDesc = nil + file_opentelemetry_proto_common_v1_common_proto_goTypes = nil + file_opentelemetry_proto_common_v1_common_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go new file mode 100644 index 0000000000..ac347acb23 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/resource/v1/resource.pb.go @@ -0,0 +1,194 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.13.0 +// source: opentelemetry/proto/resource/v1/resource.proto + +package v1 + +import ( + proto "github.com/golang/protobuf/proto" + v1 "go.opentelemetry.io/proto/otlp/common/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// Resource information. +type Resource struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // Set of labels that describe the resource. + Attributes []*v1.KeyValue `protobuf:"bytes,1,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, then + // no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,2,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Resource) Reset() { + *x = Resource{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Resource) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Resource) ProtoMessage() {} + +func (x *Resource) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Resource.ProtoReflect.Descriptor instead. +func (*Resource) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP(), []int{0} +} + +func (x *Resource) GetAttributes() []*v1.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Resource) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_resource_v1_resource_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = []byte{ + 0x0a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, + 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x12, 0x1f, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, + 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, + 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x8d, 0x01, + 0x0a, 0x08, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, + 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, + 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, + 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, + 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x42, 0x61, 0x0a, + 0x22, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, + 0x2e, 0x76, 0x31, 0x42, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x50, 0x72, 0x6f, + 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x2a, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, + 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2f, 0x76, 0x31, + 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce sync.Once + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = file_opentelemetry_proto_resource_v1_resource_proto_rawDesc +) + +func file_opentelemetry_proto_resource_v1_resource_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_resource_v1_resource_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_resource_v1_resource_proto_rawDescData) + }) + return file_opentelemetry_proto_resource_v1_resource_proto_rawDescData +} + +var file_opentelemetry_proto_resource_v1_resource_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_opentelemetry_proto_resource_v1_resource_proto_goTypes = []interface{}{ + (*Resource)(nil), // 0: opentelemetry.proto.resource.v1.Resource + (*v1.KeyValue)(nil), // 1: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = []int32{ + 1, // 0: opentelemetry.proto.resource.v1.Resource.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_resource_v1_resource_proto_init() } +func file_opentelemetry_proto_resource_v1_resource_proto_init() { + if File_opentelemetry_proto_resource_v1_resource_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_resource_v1_resource_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Resource); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_resource_v1_resource_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_resource_v1_resource_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_resource_v1_resource_proto_depIdxs, + MessageInfos: file_opentelemetry_proto_resource_v1_resource_proto_msgTypes, + }.Build() + File_opentelemetry_proto_resource_v1_resource_proto = out.File + file_opentelemetry_proto_resource_v1_resource_proto_rawDesc = nil + file_opentelemetry_proto_resource_v1_resource_proto_goTypes = nil + file_opentelemetry_proto_resource_v1_resource_proto_depIdxs = nil +} diff --git a/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go new file mode 100644 index 0000000000..abf0b4c168 --- /dev/null +++ b/vendor/go.opentelemetry.io/proto/otlp/trace/v1/trace.pb.go @@ -0,0 +1,1150 @@ +// Copyright 2019, OpenTelemetry Authors +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.23.0 +// protoc v3.13.0 +// source: opentelemetry/proto/trace/v1/trace.proto + +package v1 + +import ( + proto "github.com/golang/protobuf/proto" + v11 "go.opentelemetry.io/proto/otlp/common/v1" + v1 "go.opentelemetry.io/proto/otlp/resource/v1" + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + reflect "reflect" + sync "sync" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// This is a compile-time assertion that a sufficiently up-to-date version +// of the legacy proto package is being used. +const _ = proto.ProtoPackageIsVersion4 + +// SpanKind is the type of span. Can be used to specify additional relationships between spans +// in addition to a parent/child relationship. +type Span_SpanKind int32 + +const ( + // Unspecified. Do NOT use as default. + // Implementations MAY assume SpanKind to be INTERNAL when receiving UNSPECIFIED. + Span_SPAN_KIND_UNSPECIFIED Span_SpanKind = 0 + // Indicates that the span represents an internal operation within an application, + // as opposed to an operation happening at the boundaries. Default value. + Span_SPAN_KIND_INTERNAL Span_SpanKind = 1 + // Indicates that the span covers server-side handling of an RPC or other + // remote network request. + Span_SPAN_KIND_SERVER Span_SpanKind = 2 + // Indicates that the span describes a request to some remote service. + Span_SPAN_KIND_CLIENT Span_SpanKind = 3 + // Indicates that the span describes a producer sending a message to a broker. + // Unlike CLIENT and SERVER, there is often no direct critical path latency relationship + // between producer and consumer spans. A PRODUCER span ends when the message was accepted + // by the broker while the logical processing of the message might span a much longer time. + Span_SPAN_KIND_PRODUCER Span_SpanKind = 4 + // Indicates that the span describes consumer receiving a message from a broker. + // Like the PRODUCER kind, there is often no direct critical path latency relationship + // between producer and consumer spans. + Span_SPAN_KIND_CONSUMER Span_SpanKind = 5 +) + +// Enum value maps for Span_SpanKind. +var ( + Span_SpanKind_name = map[int32]string{ + 0: "SPAN_KIND_UNSPECIFIED", + 1: "SPAN_KIND_INTERNAL", + 2: "SPAN_KIND_SERVER", + 3: "SPAN_KIND_CLIENT", + 4: "SPAN_KIND_PRODUCER", + 5: "SPAN_KIND_CONSUMER", + } + Span_SpanKind_value = map[string]int32{ + "SPAN_KIND_UNSPECIFIED": 0, + "SPAN_KIND_INTERNAL": 1, + "SPAN_KIND_SERVER": 2, + "SPAN_KIND_CLIENT": 3, + "SPAN_KIND_PRODUCER": 4, + "SPAN_KIND_CONSUMER": 5, + } +) + +func (x Span_SpanKind) Enum() *Span_SpanKind { + p := new(Span_SpanKind) + *p = x + return p +} + +func (x Span_SpanKind) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Span_SpanKind) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0].Descriptor() +} + +func (Span_SpanKind) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[0] +} + +func (x Span_SpanKind) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Span_SpanKind.Descriptor instead. +func (Span_SpanKind) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +// For the semantics of status codes see +// https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/trace/api.md#set-status +type Status_StatusCode int32 + +const ( + // The default status. + Status_STATUS_CODE_UNSET Status_StatusCode = 0 + // The Span has been validated by an Application developers or Operator to have + // completed successfully. + Status_STATUS_CODE_OK Status_StatusCode = 1 + // The Span contains an error. + Status_STATUS_CODE_ERROR Status_StatusCode = 2 +) + +// Enum value maps for Status_StatusCode. +var ( + Status_StatusCode_name = map[int32]string{ + 0: "STATUS_CODE_UNSET", + 1: "STATUS_CODE_OK", + 2: "STATUS_CODE_ERROR", + } + Status_StatusCode_value = map[string]int32{ + "STATUS_CODE_UNSET": 0, + "STATUS_CODE_OK": 1, + "STATUS_CODE_ERROR": 2, + } +) + +func (x Status_StatusCode) Enum() *Status_StatusCode { + p := new(Status_StatusCode) + *p = x + return p +} + +func (x Status_StatusCode) String() string { + return protoimpl.X.EnumStringOf(x.Descriptor(), protoreflect.EnumNumber(x)) +} + +func (Status_StatusCode) Descriptor() protoreflect.EnumDescriptor { + return file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1].Descriptor() +} + +func (Status_StatusCode) Type() protoreflect.EnumType { + return &file_opentelemetry_proto_trace_v1_trace_proto_enumTypes[1] +} + +func (x Status_StatusCode) Number() protoreflect.EnumNumber { + return protoreflect.EnumNumber(x) +} + +// Deprecated: Use Status_StatusCode.Descriptor instead. +func (Status_StatusCode) EnumDescriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4, 0} +} + +// TracesData represents the traces data that can be stored in a persistent storage, +// OR can be embedded by other protocols that transfer OTLP traces data but do +// not implement the OTLP protocol. +// +// The main difference between this message and collector protocol is that +// in this message there will not be any "control" or "metadata" specific to +// OTLP protocol. +// +// When new fields are added into this message, the OTLP request MUST be updated +// as well. +type TracesData struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // An array of ResourceSpans. + // For data coming from a single resource this array will typically contain + // one element. Intermediary nodes that receive data from multiple origins + // typically batch the data before forwarding further and in that case this + // array will contain multiple elements. + ResourceSpans []*ResourceSpans `protobuf:"bytes,1,rep,name=resource_spans,json=resourceSpans,proto3" json:"resource_spans,omitempty"` +} + +func (x *TracesData) Reset() { + *x = TracesData{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *TracesData) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*TracesData) ProtoMessage() {} + +func (x *TracesData) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use TracesData.ProtoReflect.Descriptor instead. +func (*TracesData) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{0} +} + +func (x *TracesData) GetResourceSpans() []*ResourceSpans { + if x != nil { + return x.ResourceSpans + } + return nil +} + +// A collection of InstrumentationLibrarySpans from a Resource. +type ResourceSpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The resource for the spans in this message. + // If this field is not set then no resource info is known. + Resource *v1.Resource `protobuf:"bytes,1,opt,name=resource,proto3" json:"resource,omitempty"` + // A list of InstrumentationLibrarySpans that originate from a resource. + InstrumentationLibrarySpans []*InstrumentationLibrarySpans `protobuf:"bytes,2,rep,name=instrumentation_library_spans,json=instrumentationLibrarySpans,proto3" json:"instrumentation_library_spans,omitempty"` + // This schema_url applies to the data in the "resource" field. It does not apply + // to the data in the "instrumentation_library_spans" field which have their own + // schema_url field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *ResourceSpans) Reset() { + *x = ResourceSpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *ResourceSpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*ResourceSpans) ProtoMessage() {} + +func (x *ResourceSpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use ResourceSpans.ProtoReflect.Descriptor instead. +func (*ResourceSpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{1} +} + +func (x *ResourceSpans) GetResource() *v1.Resource { + if x != nil { + return x.Resource + } + return nil +} + +func (x *ResourceSpans) GetInstrumentationLibrarySpans() []*InstrumentationLibrarySpans { + if x != nil { + return x.InstrumentationLibrarySpans + } + return nil +} + +func (x *ResourceSpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// A collection of Spans produced by an InstrumentationLibrary. +type InstrumentationLibrarySpans struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The instrumentation library information for the spans in this message. + // Semantically when InstrumentationLibrary isn't set, it is equivalent with + // an empty instrumentation library name (unknown). + InstrumentationLibrary *v11.InstrumentationLibrary `protobuf:"bytes,1,opt,name=instrumentation_library,json=instrumentationLibrary,proto3" json:"instrumentation_library,omitempty"` + // A list of Spans that originate from an instrumentation library. + Spans []*Span `protobuf:"bytes,2,rep,name=spans,proto3" json:"spans,omitempty"` + // This schema_url applies to all spans and span events in the "spans" field. + SchemaUrl string `protobuf:"bytes,3,opt,name=schema_url,json=schemaUrl,proto3" json:"schema_url,omitempty"` +} + +func (x *InstrumentationLibrarySpans) Reset() { + *x = InstrumentationLibrarySpans{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *InstrumentationLibrarySpans) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*InstrumentationLibrarySpans) ProtoMessage() {} + +func (x *InstrumentationLibrarySpans) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use InstrumentationLibrarySpans.ProtoReflect.Descriptor instead. +func (*InstrumentationLibrarySpans) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{2} +} + +func (x *InstrumentationLibrarySpans) GetInstrumentationLibrary() *v11.InstrumentationLibrary { + if x != nil { + return x.InstrumentationLibrary + } + return nil +} + +func (x *InstrumentationLibrarySpans) GetSpans() []*Span { + if x != nil { + return x.Spans + } + return nil +} + +func (x *InstrumentationLibrarySpans) GetSchemaUrl() string { + if x != nil { + return x.SchemaUrl + } + return "" +} + +// Span represents a single operation within a trace. Spans can be +// nested to form a trace tree. Spans may also be linked to other spans +// from the same or different trace and form graphs. Often, a trace +// contains a root span that describes the end-to-end latency, and one +// or more subspans for its sub-operations. A trace can also contain +// multiple root spans, or none at all. Spans do not need to be +// contiguous - there may be gaps or overlaps between spans in a trace. +// +// The next available field id is 17. +type Span struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier for a trace. All spans from the same trace share + // the same `trace_id`. The ID is a 16-byte array. An ID with all zeroes + // is considered invalid. + // + // This field is semantically required. Receiver should generate new + // random trace_id if empty or invalid trace_id was received. + // + // This field is required. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for a span within a trace, assigned when the span + // is created. The ID is an 8-byte array. An ID with all zeroes is considered + // invalid. + // + // This field is semantically required. Receiver should generate new + // random span_id if empty or invalid span_id was received. + // + // This field is required. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // trace_state conveys information about request position in multiple distributed tracing graphs. + // It is a trace_state in w3c-trace-context format: https://www.w3.org/TR/trace-context/#tracestate-header + // See also https://github.com/w3c/distributed-tracing for more details about this field. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // The `span_id` of this span's parent span. If this is a root span, then this + // field must be empty. The ID is an 8-byte array. + ParentSpanId []byte `protobuf:"bytes,4,opt,name=parent_span_id,json=parentSpanId,proto3" json:"parent_span_id,omitempty"` + // A description of the span's operation. + // + // For example, the name can be a qualified method name or a file name + // and a line number where the operation is called. A best practice is to use + // the same display name at the same call point in an application. + // This makes it easier to correlate spans in different traces. + // + // This field is semantically required to be set to non-empty string. + // Empty value is equivalent to an unknown span name. + // + // This field is required. + Name string `protobuf:"bytes,5,opt,name=name,proto3" json:"name,omitempty"` + // Distinguishes between spans generated in a particular context. For example, + // two spans with the same name may be distinguished using `CLIENT` (caller) + // and `SERVER` (callee) to identify queueing latency associated with the span. + Kind Span_SpanKind `protobuf:"varint,6,opt,name=kind,proto3,enum=opentelemetry.proto.trace.v1.Span_SpanKind" json:"kind,omitempty"` + // start_time_unix_nano is the start time of the span. On the client side, this is the time + // kept by the local machine where the span execution starts. On the server side, this + // is the time when the server's application handler starts running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + StartTimeUnixNano uint64 `protobuf:"fixed64,7,opt,name=start_time_unix_nano,json=startTimeUnixNano,proto3" json:"start_time_unix_nano,omitempty"` + // end_time_unix_nano is the end time of the span. On the client side, this is the time + // kept by the local machine where the span execution ends. On the server side, this + // is the time when the server application handler stops running. + // Value is UNIX Epoch time in nanoseconds since 00:00:00 UTC on 1 January 1970. + // + // This field is semantically required and it is expected that end_time >= start_time. + EndTimeUnixNano uint64 `protobuf:"fixed64,8,opt,name=end_time_unix_nano,json=endTimeUnixNano,proto3" json:"end_time_unix_nano,omitempty"` + // attributes is a collection of key/value pairs. Note, global attributes + // like server name can be set using the resource API. Examples of attributes: + // + // "/http/user_agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.98 Safari/537.36" + // "/http/server_latency": 300 + // "abc.com/myattribute": true + // "abc.com/score": 10.239 + // + // The OpenTelemetry API specification further restricts the allowed value types: + // https://github.com/open-telemetry/opentelemetry-specification/blob/main/specification/common/common.md#attributes + Attributes []*v11.KeyValue `protobuf:"bytes,9,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of attributes that were discarded. Attributes + // can be discarded because their keys are too long or because there are too many + // attributes. If this value is 0, then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,10,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` + // events is a collection of Event items. + Events []*Span_Event `protobuf:"bytes,11,rep,name=events,proto3" json:"events,omitempty"` + // dropped_events_count is the number of dropped events. If the value is 0, then no + // events were dropped. + DroppedEventsCount uint32 `protobuf:"varint,12,opt,name=dropped_events_count,json=droppedEventsCount,proto3" json:"dropped_events_count,omitempty"` + // links is a collection of Links, which are references from this span to a span + // in the same or different trace. + Links []*Span_Link `protobuf:"bytes,13,rep,name=links,proto3" json:"links,omitempty"` + // dropped_links_count is the number of dropped links after the maximum size was + // enforced. If this value is 0, then no links were dropped. + DroppedLinksCount uint32 `protobuf:"varint,14,opt,name=dropped_links_count,json=droppedLinksCount,proto3" json:"dropped_links_count,omitempty"` + // An optional final status for this span. Semantically when Status isn't set, it means + // span's status code is unset, i.e. assume STATUS_CODE_UNSET (code = 0). + Status *Status `protobuf:"bytes,15,opt,name=status,proto3" json:"status,omitempty"` +} + +func (x *Span) Reset() { + *x = Span{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span) ProtoMessage() {} + +func (x *Span) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span.ProtoReflect.Descriptor instead. +func (*Span) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3} +} + +func (x *Span) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span) GetParentSpanId() []byte { + if x != nil { + return x.ParentSpanId + } + return nil +} + +func (x *Span) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span) GetKind() Span_SpanKind { + if x != nil { + return x.Kind + } + return Span_SPAN_KIND_UNSPECIFIED +} + +func (x *Span) GetStartTimeUnixNano() uint64 { + if x != nil { + return x.StartTimeUnixNano + } + return 0 +} + +func (x *Span) GetEndTimeUnixNano() uint64 { + if x != nil { + return x.EndTimeUnixNano + } + return 0 +} + +func (x *Span) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +func (x *Span) GetEvents() []*Span_Event { + if x != nil { + return x.Events + } + return nil +} + +func (x *Span) GetDroppedEventsCount() uint32 { + if x != nil { + return x.DroppedEventsCount + } + return 0 +} + +func (x *Span) GetLinks() []*Span_Link { + if x != nil { + return x.Links + } + return nil +} + +func (x *Span) GetDroppedLinksCount() uint32 { + if x != nil { + return x.DroppedLinksCount + } + return 0 +} + +func (x *Span) GetStatus() *Status { + if x != nil { + return x.Status + } + return nil +} + +// The Status type defines a logical error model that is suitable for different +// programming environments, including REST APIs and RPC APIs. +type Status struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A developer-facing human readable error message. + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` + // The status code. + Code Status_StatusCode `protobuf:"varint,3,opt,name=code,proto3,enum=opentelemetry.proto.trace.v1.Status_StatusCode" json:"code,omitempty"` +} + +func (x *Status) Reset() { + *x = Status{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Status) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Status) ProtoMessage() {} + +func (x *Status) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Status.ProtoReflect.Descriptor instead. +func (*Status) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{4} +} + +func (x *Status) GetMessage() string { + if x != nil { + return x.Message + } + return "" +} + +func (x *Status) GetCode() Status_StatusCode { + if x != nil { + return x.Code + } + return Status_STATUS_CODE_UNSET +} + +// Event is a time-stamped annotation of the span, consisting of user-supplied +// text description and key-value pairs. +type Span_Event struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // time_unix_nano is the time the event occurred. + TimeUnixNano uint64 `protobuf:"fixed64,1,opt,name=time_unix_nano,json=timeUnixNano,proto3" json:"time_unix_nano,omitempty"` + // name of the event. + // This field is semantically required to be set to non-empty string. + Name string `protobuf:"bytes,2,opt,name=name,proto3" json:"name,omitempty"` + // attributes is a collection of attribute key/value pairs on the event. + Attributes []*v11.KeyValue `protobuf:"bytes,3,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,4,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Span_Event) Reset() { + *x = Span_Event{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Event) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Event) ProtoMessage() {} + +func (x *Span_Event) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Event.ProtoReflect.Descriptor instead. +func (*Span_Event) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 0} +} + +func (x *Span_Event) GetTimeUnixNano() uint64 { + if x != nil { + return x.TimeUnixNano + } + return 0 +} + +func (x *Span_Event) GetName() string { + if x != nil { + return x.Name + } + return "" +} + +func (x *Span_Event) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Event) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +// A pointer from the current span to another span in the same trace or in a +// different trace. For example, this can be used in batching operations, +// where a single batch handler processes multiple requests from different +// traces or when the handler receives a request from a different project. +type Span_Link struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // A unique identifier of a trace that this linked span is part of. The ID is a + // 16-byte array. + TraceId []byte `protobuf:"bytes,1,opt,name=trace_id,json=traceId,proto3" json:"trace_id,omitempty"` + // A unique identifier for the linked span. The ID is an 8-byte array. + SpanId []byte `protobuf:"bytes,2,opt,name=span_id,json=spanId,proto3" json:"span_id,omitempty"` + // The trace_state associated with the link. + TraceState string `protobuf:"bytes,3,opt,name=trace_state,json=traceState,proto3" json:"trace_state,omitempty"` + // attributes is a collection of attribute key/value pairs on the link. + Attributes []*v11.KeyValue `protobuf:"bytes,4,rep,name=attributes,proto3" json:"attributes,omitempty"` + // dropped_attributes_count is the number of dropped attributes. If the value is 0, + // then no attributes were dropped. + DroppedAttributesCount uint32 `protobuf:"varint,5,opt,name=dropped_attributes_count,json=droppedAttributesCount,proto3" json:"dropped_attributes_count,omitempty"` +} + +func (x *Span_Link) Reset() { + *x = Span_Link{} + if protoimpl.UnsafeEnabled { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *Span_Link) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*Span_Link) ProtoMessage() {} + +func (x *Span_Link) ProtoReflect() protoreflect.Message { + mi := &file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use Span_Link.ProtoReflect.Descriptor instead. +func (*Span_Link) Descriptor() ([]byte, []int) { + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP(), []int{3, 1} +} + +func (x *Span_Link) GetTraceId() []byte { + if x != nil { + return x.TraceId + } + return nil +} + +func (x *Span_Link) GetSpanId() []byte { + if x != nil { + return x.SpanId + } + return nil +} + +func (x *Span_Link) GetTraceState() string { + if x != nil { + return x.TraceState + } + return "" +} + +func (x *Span_Link) GetAttributes() []*v11.KeyValue { + if x != nil { + return x.Attributes + } + return nil +} + +func (x *Span_Link) GetDroppedAttributesCount() uint32 { + if x != nil { + return x.DroppedAttributesCount + } + return 0 +} + +var File_opentelemetry_proto_trace_v1_trace_proto protoreflect.FileDescriptor + +var file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = []byte{ + 0x0a, 0x28, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x1c, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x1a, 0x2a, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, + 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x63, 0x6f, + 0x6d, 0x6d, 0x6f, 0x6e, 0x2f, 0x76, 0x31, 0x2f, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x1a, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x2f, 0x76, 0x31, 0x2f, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x22, 0x60, 0x0a, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x73, 0x44, 0x61, + 0x74, 0x61, 0x12, 0x52, 0x0a, 0x0e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x73, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x2b, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, 0x6f, 0x75, 0x72, + 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x52, 0x0d, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, + 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x22, 0xf4, 0x01, 0x0a, 0x0d, 0x52, 0x65, 0x73, 0x6f, 0x75, + 0x72, 0x63, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x45, 0x0a, 0x08, 0x72, 0x65, 0x73, 0x6f, + 0x75, 0x72, 0x63, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x29, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x52, 0x65, 0x73, + 0x6f, 0x75, 0x72, 0x63, 0x65, 0x52, 0x08, 0x72, 0x65, 0x73, 0x6f, 0x75, 0x72, 0x63, 0x65, 0x12, + 0x7d, 0x0a, 0x1d, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x73, + 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x39, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, + 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, + 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, + 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, + 0x73, 0x52, 0x1b, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, + 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, + 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, + 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0xe6, 0x01, + 0x0a, 0x1b, 0x49, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, + 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x53, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x6e, 0x0a, + 0x17, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, + 0x5f, 0x6c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x18, 0x01, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x35, + 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, + 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x49, + 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, + 0x62, 0x72, 0x61, 0x72, 0x79, 0x52, 0x16, 0x69, 0x6e, 0x73, 0x74, 0x72, 0x75, 0x6d, 0x65, 0x6e, + 0x74, 0x61, 0x74, 0x69, 0x6f, 0x6e, 0x4c, 0x69, 0x62, 0x72, 0x61, 0x72, 0x79, 0x12, 0x38, 0x0a, + 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x18, 0x02, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x22, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, + 0x52, 0x05, 0x73, 0x70, 0x61, 0x6e, 0x73, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x63, 0x68, 0x65, 0x6d, + 0x61, 0x5f, 0x75, 0x72, 0x6c, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x09, 0x73, 0x63, 0x68, + 0x65, 0x6d, 0x61, 0x55, 0x72, 0x6c, 0x22, 0x9c, 0x0a, 0x0a, 0x04, 0x53, 0x70, 0x61, 0x6e, 0x12, + 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, 0x01, 0x28, + 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, 0x73, 0x70, + 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, 0x70, 0x61, + 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, 0x74, 0x61, + 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, 0x65, 0x53, + 0x74, 0x61, 0x74, 0x65, 0x12, 0x24, 0x0a, 0x0e, 0x70, 0x61, 0x72, 0x65, 0x6e, 0x74, 0x5f, 0x73, + 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x0c, 0x70, 0x61, + 0x72, 0x65, 0x6e, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, + 0x6d, 0x65, 0x18, 0x05, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x3f, + 0x0a, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2b, 0x2e, 0x6f, + 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, + 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x52, 0x04, 0x6b, 0x69, 0x6e, 0x64, 0x12, + 0x2f, 0x0a, 0x14, 0x73, 0x74, 0x61, 0x72, 0x74, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, + 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x07, 0x20, 0x01, 0x28, 0x06, 0x52, 0x11, 0x73, + 0x74, 0x61, 0x72, 0x74, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, + 0x12, 0x2b, 0x0a, 0x12, 0x65, 0x6e, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, + 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x08, 0x20, 0x01, 0x28, 0x06, 0x52, 0x0f, 0x65, 0x6e, + 0x64, 0x54, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, 0x6f, 0x12, 0x47, 0x0a, + 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x18, 0x09, 0x20, 0x03, 0x28, + 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, + 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, + 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, + 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, + 0x6e, 0x74, 0x18, 0x0a, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, + 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, + 0x12, 0x40, 0x0a, 0x06, 0x65, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x0b, + 0x32, 0x28, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, + 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, + 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x52, 0x06, 0x65, 0x76, 0x65, 0x6e, + 0x74, 0x73, 0x12, 0x30, 0x0a, 0x14, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x65, 0x76, + 0x65, 0x6e, 0x74, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0c, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x12, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x73, 0x43, + 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x3d, 0x0a, 0x05, 0x6c, 0x69, 0x6e, 0x6b, 0x73, 0x18, 0x0d, 0x20, + 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x70, 0x61, 0x6e, 0x2e, 0x4c, 0x69, 0x6e, 0x6b, 0x52, 0x05, 0x6c, 0x69, + 0x6e, 0x6b, 0x73, 0x12, 0x2e, 0x0a, 0x13, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x6c, + 0x69, 0x6e, 0x6b, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x11, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x4c, 0x69, 0x6e, 0x6b, 0x73, 0x43, 0x6f, + 0x75, 0x6e, 0x74, 0x12, 0x3c, 0x0a, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, 0x73, 0x18, 0x0f, 0x20, + 0x01, 0x28, 0x0b, 0x32, 0x24, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, + 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, + 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x52, 0x06, 0x73, 0x74, 0x61, 0x74, 0x75, + 0x73, 0x1a, 0xc4, 0x01, 0x0a, 0x05, 0x45, 0x76, 0x65, 0x6e, 0x74, 0x12, 0x24, 0x0a, 0x0e, 0x74, + 0x69, 0x6d, 0x65, 0x5f, 0x75, 0x6e, 0x69, 0x78, 0x5f, 0x6e, 0x61, 0x6e, 0x6f, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x06, 0x52, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x55, 0x6e, 0x69, 0x78, 0x4e, 0x61, 0x6e, + 0x6f, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, + 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, 0x6c, + 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, 0x38, + 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0d, + 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, + 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x1a, 0xde, 0x01, 0x0a, 0x04, 0x4c, 0x69, 0x6e, + 0x6b, 0x12, 0x19, 0x0a, 0x08, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x69, 0x64, 0x18, 0x01, 0x20, + 0x01, 0x28, 0x0c, 0x52, 0x07, 0x74, 0x72, 0x61, 0x63, 0x65, 0x49, 0x64, 0x12, 0x17, 0x0a, 0x07, + 0x73, 0x70, 0x61, 0x6e, 0x5f, 0x69, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, 0x06, 0x73, + 0x70, 0x61, 0x6e, 0x49, 0x64, 0x12, 0x1f, 0x0a, 0x0b, 0x74, 0x72, 0x61, 0x63, 0x65, 0x5f, 0x73, + 0x74, 0x61, 0x74, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x09, 0x52, 0x0a, 0x74, 0x72, 0x61, 0x63, + 0x65, 0x53, 0x74, 0x61, 0x74, 0x65, 0x12, 0x47, 0x0a, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x27, 0x2e, 0x6f, 0x70, 0x65, + 0x6e, 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, + 0x2e, 0x63, 0x6f, 0x6d, 0x6d, 0x6f, 0x6e, 0x2e, 0x76, 0x31, 0x2e, 0x4b, 0x65, 0x79, 0x56, 0x61, + 0x6c, 0x75, 0x65, 0x52, 0x0a, 0x61, 0x74, 0x74, 0x72, 0x69, 0x62, 0x75, 0x74, 0x65, 0x73, 0x12, + 0x38, 0x0a, 0x18, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x5f, 0x61, 0x74, 0x74, 0x72, 0x69, + 0x62, 0x75, 0x74, 0x65, 0x73, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x05, 0x20, 0x01, 0x28, + 0x0d, 0x52, 0x16, 0x64, 0x72, 0x6f, 0x70, 0x70, 0x65, 0x64, 0x41, 0x74, 0x74, 0x72, 0x69, 0x62, + 0x75, 0x74, 0x65, 0x73, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0x99, 0x01, 0x0a, 0x08, 0x53, 0x70, + 0x61, 0x6e, 0x4b, 0x69, 0x6e, 0x64, 0x12, 0x19, 0x0a, 0x15, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, + 0x49, 0x4e, 0x44, 0x5f, 0x55, 0x4e, 0x53, 0x50, 0x45, 0x43, 0x49, 0x46, 0x49, 0x45, 0x44, 0x10, + 0x00, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x49, + 0x4e, 0x54, 0x45, 0x52, 0x4e, 0x41, 0x4c, 0x10, 0x01, 0x12, 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, + 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x53, 0x45, 0x52, 0x56, 0x45, 0x52, 0x10, 0x02, 0x12, + 0x14, 0x0a, 0x10, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4c, 0x49, + 0x45, 0x4e, 0x54, 0x10, 0x03, 0x12, 0x16, 0x0a, 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, + 0x4e, 0x44, 0x5f, 0x50, 0x52, 0x4f, 0x44, 0x55, 0x43, 0x45, 0x52, 0x10, 0x04, 0x12, 0x16, 0x0a, + 0x12, 0x53, 0x50, 0x41, 0x4e, 0x5f, 0x4b, 0x49, 0x4e, 0x44, 0x5f, 0x43, 0x4f, 0x4e, 0x53, 0x55, + 0x4d, 0x45, 0x52, 0x10, 0x05, 0x22, 0xbd, 0x01, 0x0a, 0x06, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, + 0x12, 0x18, 0x0a, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, + 0x09, 0x52, 0x07, 0x6d, 0x65, 0x73, 0x73, 0x61, 0x67, 0x65, 0x12, 0x43, 0x0a, 0x04, 0x63, 0x6f, + 0x64, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x2f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, 0x74, + 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x2e, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x2e, 0x53, + 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x52, 0x04, 0x63, 0x6f, 0x64, 0x65, 0x22, + 0x4e, 0x0a, 0x0a, 0x53, 0x74, 0x61, 0x74, 0x75, 0x73, 0x43, 0x6f, 0x64, 0x65, 0x12, 0x15, 0x0a, + 0x11, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x55, 0x4e, 0x53, + 0x45, 0x54, 0x10, 0x00, 0x12, 0x12, 0x0a, 0x0e, 0x53, 0x54, 0x41, 0x54, 0x55, 0x53, 0x5f, 0x43, + 0x4f, 0x44, 0x45, 0x5f, 0x4f, 0x4b, 0x10, 0x01, 0x12, 0x15, 0x0a, 0x11, 0x53, 0x54, 0x41, 0x54, + 0x55, 0x53, 0x5f, 0x43, 0x4f, 0x44, 0x45, 0x5f, 0x45, 0x52, 0x52, 0x4f, 0x52, 0x10, 0x02, 0x4a, + 0x04, 0x08, 0x01, 0x10, 0x02, 0x42, 0x58, 0x0a, 0x1f, 0x69, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, + 0x74, 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2e, + 0x74, 0x72, 0x61, 0x63, 0x65, 0x2e, 0x76, 0x31, 0x42, 0x0a, 0x54, 0x72, 0x61, 0x63, 0x65, 0x50, + 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x27, 0x67, 0x6f, 0x2e, 0x6f, 0x70, 0x65, 0x6e, 0x74, + 0x65, 0x6c, 0x65, 0x6d, 0x65, 0x74, 0x72, 0x79, 0x2e, 0x69, 0x6f, 0x2f, 0x70, 0x72, 0x6f, 0x74, + 0x6f, 0x2f, 0x6f, 0x74, 0x6c, 0x70, 0x2f, 0x74, 0x72, 0x61, 0x63, 0x65, 0x2f, 0x76, 0x31, 0x62, + 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce sync.Once + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = file_opentelemetry_proto_trace_v1_trace_proto_rawDesc +) + +func file_opentelemetry_proto_trace_v1_trace_proto_rawDescGZIP() []byte { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescOnce.Do(func() { + file_opentelemetry_proto_trace_v1_trace_proto_rawDescData = protoimpl.X.CompressGZIP(file_opentelemetry_proto_trace_v1_trace_proto_rawDescData) + }) + return file_opentelemetry_proto_trace_v1_trace_proto_rawDescData +} + +var file_opentelemetry_proto_trace_v1_trace_proto_enumTypes = make([]protoimpl.EnumInfo, 2) +var file_opentelemetry_proto_trace_v1_trace_proto_msgTypes = make([]protoimpl.MessageInfo, 7) +var file_opentelemetry_proto_trace_v1_trace_proto_goTypes = []interface{}{ + (Span_SpanKind)(0), // 0: opentelemetry.proto.trace.v1.Span.SpanKind + (Status_StatusCode)(0), // 1: opentelemetry.proto.trace.v1.Status.StatusCode + (*TracesData)(nil), // 2: opentelemetry.proto.trace.v1.TracesData + (*ResourceSpans)(nil), // 3: opentelemetry.proto.trace.v1.ResourceSpans + (*InstrumentationLibrarySpans)(nil), // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans + (*Span)(nil), // 5: opentelemetry.proto.trace.v1.Span + (*Status)(nil), // 6: opentelemetry.proto.trace.v1.Status + (*Span_Event)(nil), // 7: opentelemetry.proto.trace.v1.Span.Event + (*Span_Link)(nil), // 8: opentelemetry.proto.trace.v1.Span.Link + (*v1.Resource)(nil), // 9: opentelemetry.proto.resource.v1.Resource + (*v11.InstrumentationLibrary)(nil), // 10: opentelemetry.proto.common.v1.InstrumentationLibrary + (*v11.KeyValue)(nil), // 11: opentelemetry.proto.common.v1.KeyValue +} +var file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = []int32{ + 3, // 0: opentelemetry.proto.trace.v1.TracesData.resource_spans:type_name -> opentelemetry.proto.trace.v1.ResourceSpans + 9, // 1: opentelemetry.proto.trace.v1.ResourceSpans.resource:type_name -> opentelemetry.proto.resource.v1.Resource + 4, // 2: opentelemetry.proto.trace.v1.ResourceSpans.instrumentation_library_spans:type_name -> opentelemetry.proto.trace.v1.InstrumentationLibrarySpans + 10, // 3: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.instrumentation_library:type_name -> opentelemetry.proto.common.v1.InstrumentationLibrary + 5, // 4: opentelemetry.proto.trace.v1.InstrumentationLibrarySpans.spans:type_name -> opentelemetry.proto.trace.v1.Span + 0, // 5: opentelemetry.proto.trace.v1.Span.kind:type_name -> opentelemetry.proto.trace.v1.Span.SpanKind + 11, // 6: opentelemetry.proto.trace.v1.Span.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 7, // 7: opentelemetry.proto.trace.v1.Span.events:type_name -> opentelemetry.proto.trace.v1.Span.Event + 8, // 8: opentelemetry.proto.trace.v1.Span.links:type_name -> opentelemetry.proto.trace.v1.Span.Link + 6, // 9: opentelemetry.proto.trace.v1.Span.status:type_name -> opentelemetry.proto.trace.v1.Status + 1, // 10: opentelemetry.proto.trace.v1.Status.code:type_name -> opentelemetry.proto.trace.v1.Status.StatusCode + 11, // 11: opentelemetry.proto.trace.v1.Span.Event.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 11, // 12: opentelemetry.proto.trace.v1.Span.Link.attributes:type_name -> opentelemetry.proto.common.v1.KeyValue + 13, // [13:13] is the sub-list for method output_type + 13, // [13:13] is the sub-list for method input_type + 13, // [13:13] is the sub-list for extension type_name + 13, // [13:13] is the sub-list for extension extendee + 0, // [0:13] is the sub-list for field type_name +} + +func init() { file_opentelemetry_proto_trace_v1_trace_proto_init() } +func file_opentelemetry_proto_trace_v1_trace_proto_init() { + if File_opentelemetry_proto_trace_v1_trace_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*TracesData); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[1].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*ResourceSpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[2].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*InstrumentationLibrarySpans); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[3].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[4].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Status); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[5].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Event); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + file_opentelemetry_proto_trace_v1_trace_proto_msgTypes[6].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*Span_Link); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_opentelemetry_proto_trace_v1_trace_proto_rawDesc, + NumEnums: 2, + NumMessages: 7, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_opentelemetry_proto_trace_v1_trace_proto_goTypes, + DependencyIndexes: file_opentelemetry_proto_trace_v1_trace_proto_depIdxs, + EnumInfos: file_opentelemetry_proto_trace_v1_trace_proto_enumTypes, + MessageInfos: file_opentelemetry_proto_trace_v1_trace_proto_msgTypes, + }.Build() + File_opentelemetry_proto_trace_v1_trace_proto = out.File + file_opentelemetry_proto_trace_v1_trace_proto_rawDesc = nil + file_opentelemetry_proto_trace_v1_trace_proto_goTypes = nil + file_opentelemetry_proto_trace_v1_trace_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go new file mode 100644 index 0000000000..7ea5ced87e --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/api/httpbody/httpbody.pb.go @@ -0,0 +1,236 @@ +// Copyright 2015 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Code generated by protoc-gen-go. DO NOT EDIT. +// versions: +// protoc-gen-go v1.26.0 +// protoc v3.12.2 +// source: google/api/httpbody.proto + +package httpbody + +import ( + reflect "reflect" + sync "sync" + + protoreflect "google.golang.org/protobuf/reflect/protoreflect" + protoimpl "google.golang.org/protobuf/runtime/protoimpl" + anypb "google.golang.org/protobuf/types/known/anypb" +) + +const ( + // Verify that this generated code is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(20 - protoimpl.MinVersion) + // Verify that runtime/protoimpl is sufficiently up-to-date. + _ = protoimpl.EnforceVersion(protoimpl.MaxVersion - 20) +) + +// Message that represents an arbitrary HTTP body. It should only be used for +// payload formats that can't be represented as JSON, such as raw binary or +// an HTML page. +// +// +// This message can be used both in streaming and non-streaming API methods in +// the request as well as the response. +// +// It can be used as a top-level request field, which is convenient if one +// wants to extract parameters from either the URL or HTTP template into the +// request fields and also want access to the raw HTTP body. +// +// Example: +// +// message GetResourceRequest { +// // A unique request id. +// string request_id = 1; +// +// // The raw HTTP body is bound to this field. +// google.api.HttpBody http_body = 2; +// +// } +// +// service ResourceService { +// rpc GetResource(GetResourceRequest) +// returns (google.api.HttpBody); +// rpc UpdateResource(google.api.HttpBody) +// returns (google.protobuf.Empty); +// +// } +// +// Example with streaming methods: +// +// service CaldavService { +// rpc GetCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// rpc UpdateCalendar(stream google.api.HttpBody) +// returns (stream google.api.HttpBody); +// +// } +// +// Use of this type only changes how the request and response bodies are +// handled, all other features will continue to work unchanged. +type HttpBody struct { + state protoimpl.MessageState + sizeCache protoimpl.SizeCache + unknownFields protoimpl.UnknownFields + + // The HTTP Content-Type header value specifying the content type of the body. + ContentType string `protobuf:"bytes,1,opt,name=content_type,json=contentType,proto3" json:"content_type,omitempty"` + // The HTTP request/response body as raw binary. + Data []byte `protobuf:"bytes,2,opt,name=data,proto3" json:"data,omitempty"` + // Application specific response metadata. Must be set in the first response + // for streaming APIs. + Extensions []*anypb.Any `protobuf:"bytes,3,rep,name=extensions,proto3" json:"extensions,omitempty"` +} + +func (x *HttpBody) Reset() { + *x = HttpBody{} + if protoimpl.UnsafeEnabled { + mi := &file_google_api_httpbody_proto_msgTypes[0] + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + ms.StoreMessageInfo(mi) + } +} + +func (x *HttpBody) String() string { + return protoimpl.X.MessageStringOf(x) +} + +func (*HttpBody) ProtoMessage() {} + +func (x *HttpBody) ProtoReflect() protoreflect.Message { + mi := &file_google_api_httpbody_proto_msgTypes[0] + if protoimpl.UnsafeEnabled && x != nil { + ms := protoimpl.X.MessageStateOf(protoimpl.Pointer(x)) + if ms.LoadMessageInfo() == nil { + ms.StoreMessageInfo(mi) + } + return ms + } + return mi.MessageOf(x) +} + +// Deprecated: Use HttpBody.ProtoReflect.Descriptor instead. +func (*HttpBody) Descriptor() ([]byte, []int) { + return file_google_api_httpbody_proto_rawDescGZIP(), []int{0} +} + +func (x *HttpBody) GetContentType() string { + if x != nil { + return x.ContentType + } + return "" +} + +func (x *HttpBody) GetData() []byte { + if x != nil { + return x.Data + } + return nil +} + +func (x *HttpBody) GetExtensions() []*anypb.Any { + if x != nil { + return x.Extensions + } + return nil +} + +var File_google_api_httpbody_proto protoreflect.FileDescriptor + +var file_google_api_httpbody_proto_rawDesc = []byte{ + 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, + 0x70, 0x62, 0x6f, 0x64, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0a, 0x67, 0x6f, 0x6f, + 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x1a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, + 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, + 0x74, 0x6f, 0x22, 0x77, 0x0a, 0x08, 0x48, 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x12, 0x21, + 0x0a, 0x0c, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x5f, 0x74, 0x79, 0x70, 0x65, 0x18, 0x01, + 0x20, 0x01, 0x28, 0x09, 0x52, 0x0b, 0x63, 0x6f, 0x6e, 0x74, 0x65, 0x6e, 0x74, 0x54, 0x79, 0x70, + 0x65, 0x12, 0x12, 0x0a, 0x04, 0x64, 0x61, 0x74, 0x61, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0c, 0x52, + 0x04, 0x64, 0x61, 0x74, 0x61, 0x12, 0x34, 0x0a, 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, + 0x6f, 0x6e, 0x73, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x14, 0x2e, 0x67, 0x6f, 0x6f, 0x67, + 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x41, 0x6e, 0x79, 0x52, + 0x0a, 0x65, 0x78, 0x74, 0x65, 0x6e, 0x73, 0x69, 0x6f, 0x6e, 0x73, 0x42, 0x68, 0x0a, 0x0e, 0x63, + 0x6f, 0x6d, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x61, 0x70, 0x69, 0x42, 0x0d, 0x48, + 0x74, 0x74, 0x70, 0x42, 0x6f, 0x64, 0x79, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x50, 0x01, 0x5a, 0x3b, + 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, + 0x67, 0x2f, 0x67, 0x65, 0x6e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x2f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, + 0x65, 0x61, 0x70, 0x69, 0x73, 0x2f, 0x61, 0x70, 0x69, 0x2f, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, + 0x64, 0x79, 0x3b, 0x68, 0x74, 0x74, 0x70, 0x62, 0x6f, 0x64, 0x79, 0xf8, 0x01, 0x01, 0xa2, 0x02, + 0x04, 0x47, 0x41, 0x50, 0x49, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, +} + +var ( + file_google_api_httpbody_proto_rawDescOnce sync.Once + file_google_api_httpbody_proto_rawDescData = file_google_api_httpbody_proto_rawDesc +) + +func file_google_api_httpbody_proto_rawDescGZIP() []byte { + file_google_api_httpbody_proto_rawDescOnce.Do(func() { + file_google_api_httpbody_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_api_httpbody_proto_rawDescData) + }) + return file_google_api_httpbody_proto_rawDescData +} + +var file_google_api_httpbody_proto_msgTypes = make([]protoimpl.MessageInfo, 1) +var file_google_api_httpbody_proto_goTypes = []interface{}{ + (*HttpBody)(nil), // 0: google.api.HttpBody + (*anypb.Any)(nil), // 1: google.protobuf.Any +} +var file_google_api_httpbody_proto_depIdxs = []int32{ + 1, // 0: google.api.HttpBody.extensions:type_name -> google.protobuf.Any + 1, // [1:1] is the sub-list for method output_type + 1, // [1:1] is the sub-list for method input_type + 1, // [1:1] is the sub-list for extension type_name + 1, // [1:1] is the sub-list for extension extendee + 0, // [0:1] is the sub-list for field type_name +} + +func init() { file_google_api_httpbody_proto_init() } +func file_google_api_httpbody_proto_init() { + if File_google_api_httpbody_proto != nil { + return + } + if !protoimpl.UnsafeEnabled { + file_google_api_httpbody_proto_msgTypes[0].Exporter = func(v interface{}, i int) interface{} { + switch v := v.(*HttpBody); i { + case 0: + return &v.state + case 1: + return &v.sizeCache + case 2: + return &v.unknownFields + default: + return nil + } + } + } + type x struct{} + out := protoimpl.TypeBuilder{ + File: protoimpl.DescBuilder{ + GoPackagePath: reflect.TypeOf(x{}).PkgPath(), + RawDescriptor: file_google_api_httpbody_proto_rawDesc, + NumEnums: 0, + NumMessages: 1, + NumExtensions: 0, + NumServices: 0, + }, + GoTypes: file_google_api_httpbody_proto_goTypes, + DependencyIndexes: file_google_api_httpbody_proto_depIdxs, + MessageInfos: file_google_api_httpbody_proto_msgTypes, + }.Build() + File_google_api_httpbody_proto = out.File + file_google_api_httpbody_proto_rawDesc = nil + file_google_api_httpbody_proto_goTypes = nil + file_google_api_httpbody_proto_depIdxs = nil +} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go new file mode 100644 index 0000000000..d10ad66533 --- /dev/null +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.go @@ -0,0 +1,23 @@ +// Copyright 2020 Google LLC +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package field_mask aliases all exported identifiers in +// package "google.golang.org/protobuf/types/known/fieldmaskpb". +package field_mask + +import "google.golang.org/protobuf/types/known/fieldmaskpb" + +type FieldMask = fieldmaskpb.FieldMask + +var File_google_protobuf_field_mask_proto = fieldmaskpb.File_google_protobuf_field_mask_proto diff --git a/vendor/google.golang.org/grpc/attributes/attributes.go b/vendor/google.golang.org/grpc/attributes/attributes.go index 6ff2792ee4..ae13ddac14 100644 --- a/vendor/google.golang.org/grpc/attributes/attributes.go +++ b/vendor/google.golang.org/grpc/attributes/attributes.go @@ -69,7 +69,9 @@ func (a *Attributes) Value(key interface{}) interface{} { // bool' is implemented for a value in the attributes, it is called to // determine if the value matches the one stored in the other attributes. If // Equal is not implemented, standard equality is used to determine if the two -// values are equal. +// values are equal. Note that some types (e.g. maps) aren't comparable by +// default, so they must be wrapped in a struct, or in an alias type, with Equal +// defined. func (a *Attributes) Equal(o *Attributes) bool { if a == nil && o == nil { return true diff --git a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go index 50cc9da4a9..cb4b3c203c 100644 --- a/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go +++ b/vendor/google.golang.org/grpc/balancer/grpclb/grpc_lb_v1/load_balancer_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/lb/v1/load_balancer.proto diff --git a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go index a02c458281..fd55176b9b 100644 --- a/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go +++ b/vendor/google.golang.org/grpc/credentials/alts/internal/proto/grpc_gcp/handshaker_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/gcp/handshaker.proto diff --git a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go index 22a8f996a6..4fbed12565 100644 --- a/vendor/google.golang.org/grpc/credentials/insecure/insecure.go +++ b/vendor/google.golang.org/grpc/credentials/insecure/insecure.go @@ -18,11 +18,6 @@ // Package insecure provides an implementation of the // credentials.TransportCredentials interface which disables transport security. -// -// Experimental -// -// Notice: This package is EXPERIMENTAL and may be changed or removed in a -// later release. package insecure import ( diff --git a/vendor/google.golang.org/grpc/dialoptions.go b/vendor/google.golang.org/grpc/dialoptions.go index 063f1e903c..c4bf09f9e9 100644 --- a/vendor/google.golang.org/grpc/dialoptions.go +++ b/vendor/google.golang.org/grpc/dialoptions.go @@ -272,7 +272,7 @@ func withBackoff(bs internalbackoff.Strategy) DialOption { }) } -// WithBlock returns a DialOption which makes caller of Dial blocks until the +// WithBlock returns a DialOption which makes callers of Dial block until the // underlying connection is up. Without this, Dial returns immediately and // connecting the server happens in background. func WithBlock() DialOption { @@ -304,7 +304,7 @@ func WithReturnConnectionError() DialOption { // WithCredentialsBundle or WithPerRPCCredentials) which require transport // security is incompatible and will cause grpc.Dial() to fail. // -// Deprecated: use insecure.NewCredentials() instead. +// Deprecated: use WithTransportCredentials and insecure.NewCredentials() instead. // Will be supported throughout 1.x. func WithInsecure() DialOption { return newFuncDialOption(func(o *dialOptions) { diff --git a/vendor/google.golang.org/grpc/grpclog/loggerv2.go b/vendor/google.golang.org/grpc/grpclog/loggerv2.go index 34098bb8eb..7c1f664090 100644 --- a/vendor/google.golang.org/grpc/grpclog/loggerv2.go +++ b/vendor/google.golang.org/grpc/grpclog/loggerv2.go @@ -248,12 +248,12 @@ func (g *loggerT) V(l int) bool { // later release. type DepthLoggerV2 interface { LoggerV2 - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go index bdc3ae284e..69f525d1ba 100644 --- a/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go +++ b/vendor/google.golang.org/grpc/health/grpc_health_v1/health_grpc.pb.go @@ -1,6 +1,6 @@ // Code generated by protoc-gen-go-grpc. DO NOT EDIT. // versions: -// - protoc-gen-go-grpc v1.1.0 +// - protoc-gen-go-grpc v1.2.0 // - protoc v3.14.0 // source: grpc/health/v1/health.proto diff --git a/vendor/google.golang.org/grpc/internal/envconfig/xds.go b/vendor/google.golang.org/grpc/internal/envconfig/xds.go index 93522d716d..9bad03cec6 100644 --- a/vendor/google.golang.org/grpc/internal/envconfig/xds.go +++ b/vendor/google.golang.org/grpc/internal/envconfig/xds.go @@ -42,6 +42,7 @@ const ( aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER" rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC" federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION" + rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB" c2pResolverTestOnlyTrafficDirectorURIEnv = "GRPC_TEST_ONLY_GOOGLE_C2P_RESOLVER_TRAFFIC_DIRECTOR_URI" ) @@ -85,6 +86,12 @@ var ( // XDSFederation indicates whether federation support is enabled. XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true") + // XDSRLS indicates whether processing of Cluster Specifier plugins and + // support for the RLS CLuster Specifier is enabled, which can be enabled by + // setting the environment variable "GRPC_EXPERIMENTAL_XDS_RLS_LB" to + // "true". + XDSRLS = strings.EqualFold(os.Getenv(rlsInXDSEnv), "true") + // C2PResolverTestOnlyTrafficDirectorURI is the TD URI for testing. C2PResolverTestOnlyTrafficDirectorURI = os.Getenv(c2pResolverTestOnlyTrafficDirectorURIEnv) ) diff --git a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go index e6f975cbf6..30a3b4258f 100644 --- a/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go +++ b/vendor/google.golang.org/grpc/internal/grpclog/grpclog.go @@ -115,12 +115,12 @@ type LoggerV2 interface { // Notice: This type is EXPERIMENTAL and may be changed or removed in a // later release. type DepthLoggerV2 interface { - // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Print. + // InfoDepth logs to INFO log at the specified depth. Arguments are handled in the manner of fmt.Println. InfoDepth(depth int, args ...interface{}) - // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Print. + // WarningDepth logs to WARNING log at the specified depth. Arguments are handled in the manner of fmt.Println. WarningDepth(depth int, args ...interface{}) - // ErrorDetph logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Print. + // ErrorDepth logs to ERROR log at the specified depth. Arguments are handled in the manner of fmt.Println. ErrorDepth(depth int, args ...interface{}) - // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Print. + // FatalDepth logs to FATAL log at the specified depth. Arguments are handled in the manner of fmt.Println. FatalDepth(depth int, args ...interface{}) } diff --git a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go index 2810a8ba2f..7a092b2b80 100644 --- a/vendor/google.golang.org/grpc/internal/grpcutil/regex.go +++ b/vendor/google.golang.org/grpc/internal/grpcutil/regex.go @@ -20,9 +20,12 @@ package grpcutil import "regexp" -// FullMatchWithRegex returns whether the full string matches the regex provided. -func FullMatchWithRegex(re *regexp.Regexp, string string) bool { +// FullMatchWithRegex returns whether the full text matches the regex provided. +func FullMatchWithRegex(re *regexp.Regexp, text string) bool { + if len(text) == 0 { + return re.MatchString(text) + } re.Longest() - rem := re.FindString(string) - return len(rem) == len(string) + rem := re.FindString(text) + return len(rem) == len(text) } diff --git a/vendor/google.golang.org/grpc/regenerate.sh b/vendor/google.golang.org/grpc/regenerate.sh index a0a71aae96..58c802f8ae 100644 --- a/vendor/google.golang.org/grpc/regenerate.sh +++ b/vendor/google.golang.org/grpc/regenerate.sh @@ -76,7 +76,21 @@ SOURCES=( # These options of the form 'Mfoo.proto=bar' instruct the codegen to use an # import path of 'bar' in the generated code when 'foo.proto' is imported in # one of the sources. -OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core +# +# Note that the protos listed here are all for testing purposes. All protos to +# be used externally should have a go_package option (and they don't need to be +# listed here). +OPTS=Mgrpc/service_config/service_config.proto=/internal/proto/grpc_service_config,\ +Mgrpc/core/stats.proto=google.golang.org/grpc/interop/grpc_testing/core,\ +Mgrpc/testing/benchmark_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/stats.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/report_qps_scenario_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/messages.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/worker_service.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/control.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/test.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/payloads.proto=google.golang.org/grpc/interop/grpc_testing,\ +Mgrpc/testing/empty.proto=google.golang.org/grpc/interop/grpc_testing for src in ${SOURCES[@]}; do echo "protoc ${src}" @@ -85,7 +99,6 @@ for src in ${SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done @@ -96,7 +109,6 @@ for src in ${LEGACY_SOURCES[@]}; do -I${WORKDIR}/grpc-proto \ -I${WORKDIR}/googleapis \ -I${WORKDIR}/protobuf/src \ - -I${WORKDIR}/istio \ ${src} done diff --git a/vendor/google.golang.org/grpc/version.go b/vendor/google.golang.org/grpc/version.go index 8ef0958797..9d3fd73da9 100644 --- a/vendor/google.golang.org/grpc/version.go +++ b/vendor/google.golang.org/grpc/version.go @@ -19,4 +19,4 @@ package grpc // Version is the current grpc version. -const Version = "1.43.0" +const Version = "1.44.1-dev" diff --git a/vendor/modules.txt b/vendor/modules.txt index b53091b60c..241d62c81e 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -54,8 +54,14 @@ github.com/Microsoft/hcsshim/pkg/ociwclayer # github.com/RackSec/srslog v0.0.0-20180709174129-a4725f04ec91 ## explicit github.com/RackSec/srslog +# github.com/agext/levenshtein v1.2.3 +## explicit +github.com/agext/levenshtein # github.com/akutz/memconn v0.1.0 ## explicit +# github.com/armon/circbuf v0.0.0-20190214190532-5111143e8da2 +## explicit +github.com/armon/circbuf # github.com/armon/go-metrics v0.0.0-20180917152333-f0300d1749da => github.com/armon/go-metrics v0.0.0-20150106224455-eb0af217e5e9 ## explicit github.com/armon/go-metrics @@ -174,6 +180,7 @@ github.com/containerd/containerd/content/proxy github.com/containerd/containerd/contrib/nvidia github.com/containerd/containerd/defaults github.com/containerd/containerd/diff +github.com/containerd/containerd/diff/walking github.com/containerd/containerd/errdefs github.com/containerd/containerd/events github.com/containerd/containerd/events/exchange @@ -182,6 +189,7 @@ github.com/containerd/containerd/gc github.com/containerd/containerd/identifiers github.com/containerd/containerd/images github.com/containerd/containerd/images/archive +github.com/containerd/containerd/images/converter github.com/containerd/containerd/labels github.com/containerd/containerd/leases github.com/containerd/containerd/leases/proxy @@ -229,6 +237,13 @@ github.com/containerd/fifo # github.com/containerd/go-runc v1.0.0 ## explicit; go 1.13 github.com/containerd/go-runc +# github.com/containerd/stargz-snapshotter v0.11.2 +## explicit; go 1.16 +github.com/containerd/stargz-snapshotter/snapshot/overlayutils +# github.com/containerd/stargz-snapshotter/estargz v0.11.2 +## explicit; go 1.16 +github.com/containerd/stargz-snapshotter/estargz +github.com/containerd/stargz-snapshotter/estargz/errorutil # github.com/containerd/ttrpc v1.1.0 ## explicit; go 1.13 github.com/containerd/ttrpc @@ -365,6 +380,9 @@ github.com/docker/swarmkit/xnet # github.com/dustin/go-humanize v1.0.0 ## explicit github.com/dustin/go-humanize +# github.com/felixge/httpsnoop v1.0.2 +## explicit; go 1.13 +github.com/felixge/httpsnoop # github.com/fernet/fernet-go v0.0.0-20180830025343-9eac43b88a5e ## explicit github.com/fernet/fernet-go @@ -374,13 +392,20 @@ github.com/fluent/fluent-logger-golang/fluent # github.com/fsnotify/fsnotify v1.5.1 ## explicit; go 1.13 github.com/fsnotify/fsnotify +# github.com/go-logr/logr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/logr +github.com/go-logr/logr/funcr +# github.com/go-logr/stdr v1.2.2 +## explicit; go 1.16 +github.com/go-logr/stdr # github.com/godbus/dbus/v5 v5.0.6 ## explicit; go 1.12 github.com/godbus/dbus/v5 # github.com/gofrs/flock v0.8.1 ## explicit github.com/gofrs/flock -# github.com/gogo/googleapis v1.4.0 => github.com/gogo/googleapis v1.3.2 +# github.com/gogo/googleapis v1.4.1 ## explicit; go 1.12 github.com/gogo/googleapis/google/rpc # github.com/gogo/protobuf v1.3.2 @@ -400,6 +425,7 @@ github.com/golang/gddo/httputil/header github.com/golang/groupcache/lru # github.com/golang/protobuf v1.5.2 ## explicit; go 1.9 +github.com/golang/protobuf/descriptor github.com/golang/protobuf/jsonpb github.com/golang/protobuf/proto github.com/golang/protobuf/protoc-gen-go/descriptor @@ -448,13 +474,15 @@ github.com/grpc-ecosystem/go-grpc-middleware # github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 ## explicit github.com/grpc-ecosystem/go-grpc-prometheus -# github.com/grpc-ecosystem/grpc-opentracing v0.0.0-20180507213350-8e809c8a8645 -## explicit -github.com/grpc-ecosystem/grpc-opentracing/go/otgrpc +# github.com/grpc-ecosystem/grpc-gateway v1.16.0 +## explicit; go 1.14 +github.com/grpc-ecosystem/grpc-gateway/internal +github.com/grpc-ecosystem/grpc-gateway/runtime +github.com/grpc-ecosystem/grpc-gateway/utilities # github.com/hashicorp/errwrap v1.1.0 ## explicit github.com/hashicorp/errwrap -# github.com/hashicorp/go-immutable-radix v1.0.0 => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe +# github.com/hashicorp/go-immutable-radix v1.3.1 ## explicit github.com/hashicorp/go-immutable-radix # github.com/hashicorp/go-memdb v0.0.0-20161216180745-cb9a474f84cc @@ -491,7 +519,7 @@ github.com/ishidawataru/sctp # github.com/jmespath/go-jmespath v0.3.0 ## explicit; go 1.14 github.com/jmespath/go-jmespath -# github.com/klauspost/compress v1.14.3 +# github.com/klauspost/compress v1.15.0 ## explicit; go 1.15 github.com/klauspost/compress github.com/klauspost/compress/fse @@ -508,14 +536,15 @@ github.com/miekg/dns # github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible ## explicit github.com/mistifyio/go-zfs -# github.com/mitchellh/hashstructure v1.0.0 -## explicit -github.com/mitchellh/hashstructure -# github.com/moby/buildkit v0.8.2-0.20210615162540-9f254e18360a -## explicit; go 1.13 +# github.com/mitchellh/hashstructure/v2 v2.0.2 +## explicit; go 1.14 +github.com/mitchellh/hashstructure/v2 +# github.com/moby/buildkit v0.10.0 +## explicit; go 1.17 github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/types github.com/moby/buildkit/cache +github.com/moby/buildkit/cache/config github.com/moby/buildkit/cache/contenthash github.com/moby/buildkit/cache/metadata github.com/moby/buildkit/cache/remotecache @@ -549,7 +578,6 @@ github.com/moby/buildkit/frontend/dockerfile/parser github.com/moby/buildkit/frontend/dockerfile/shell github.com/moby/buildkit/frontend/gateway github.com/moby/buildkit/frontend/gateway/client -github.com/moby/buildkit/frontend/gateway/errdefs github.com/moby/buildkit/frontend/gateway/forwarder github.com/moby/buildkit/frontend/gateway/grpcclient github.com/moby/buildkit/frontend/gateway/pb @@ -580,33 +608,49 @@ github.com/moby/buildkit/source github.com/moby/buildkit/source/git github.com/moby/buildkit/source/http github.com/moby/buildkit/source/local +github.com/moby/buildkit/source/types github.com/moby/buildkit/util/apicaps github.com/moby/buildkit/util/apicaps/pb github.com/moby/buildkit/util/appdefaults github.com/moby/buildkit/util/archutil +github.com/moby/buildkit/util/bklog +github.com/moby/buildkit/util/buildinfo +github.com/moby/buildkit/util/buildinfo/types github.com/moby/buildkit/util/compression github.com/moby/buildkit/util/cond github.com/moby/buildkit/util/contentutil github.com/moby/buildkit/util/entitlements github.com/moby/buildkit/util/entitlements/security +github.com/moby/buildkit/util/estargz github.com/moby/buildkit/util/flightcontrol github.com/moby/buildkit/util/gitutil github.com/moby/buildkit/util/grpcerrors github.com/moby/buildkit/util/imageutil github.com/moby/buildkit/util/leaseutil github.com/moby/buildkit/util/network +github.com/moby/buildkit/util/overlay github.com/moby/buildkit/util/progress +github.com/moby/buildkit/util/progress/controller github.com/moby/buildkit/util/progress/logs github.com/moby/buildkit/util/pull/pullprogress +github.com/moby/buildkit/util/push github.com/moby/buildkit/util/resolver +github.com/moby/buildkit/util/resolver/config +github.com/moby/buildkit/util/resolver/limited github.com/moby/buildkit/util/resolver/retryhandler github.com/moby/buildkit/util/rootless/specconv github.com/moby/buildkit/util/sshutil github.com/moby/buildkit/util/stack +github.com/moby/buildkit/util/suggest github.com/moby/buildkit/util/system github.com/moby/buildkit/util/throttle github.com/moby/buildkit/util/tracing +github.com/moby/buildkit/util/tracing/exec +github.com/moby/buildkit/util/tracing/otlptracegrpc +github.com/moby/buildkit/util/tracing/transform +github.com/moby/buildkit/util/urlutil github.com/moby/buildkit/util/winlayers +github.com/moby/buildkit/version github.com/moby/buildkit/worker # github.com/moby/ipvs v1.0.1 ## explicit; go 1.13 @@ -636,7 +680,7 @@ github.com/morikuni/aec # github.com/opencontainers/go-digest v1.0.0 ## explicit; go 1.13 github.com/opencontainers/go-digest -# github.com/opencontainers/image-spec v1.0.2 +# github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 ## explicit github.com/opencontainers/image-spec/identity github.com/opencontainers/image-spec/specs-go @@ -657,14 +701,6 @@ github.com/opencontainers/selinux/go-selinux github.com/opencontainers/selinux/go-selinux/label github.com/opencontainers/selinux/pkg/pwalk github.com/opencontainers/selinux/pkg/pwalkdir -# github.com/opentracing-contrib/go-stdlib v1.0.0 -## explicit; go 1.14 -github.com/opentracing-contrib/go-stdlib/nethttp -# github.com/opentracing/opentracing-go v1.2.0 -## explicit; go 1.14 -github.com/opentracing/opentracing-go -github.com/opentracing/opentracing-go/ext -github.com/opentracing/opentracing-go/log # github.com/pelletier/go-toml v1.9.4 ## explicit; go 1.12 github.com/pelletier/go-toml @@ -676,7 +712,7 @@ github.com/philhofer/fwd # github.com/pkg/errors v0.9.1 ## explicit github.com/pkg/errors -# github.com/prometheus/client_golang v1.11.0 => github.com/prometheus/client_golang v1.6.0 +# github.com/prometheus/client_golang v1.12.1 => github.com/prometheus/client_golang v1.6.0 ## explicit; go 1.11 github.com/prometheus/client_golang/prometheus github.com/prometheus/client_golang/prometheus/internal @@ -684,7 +720,7 @@ github.com/prometheus/client_golang/prometheus/promhttp # github.com/prometheus/client_model v0.2.0 ## explicit; go 1.9 github.com/prometheus/client_model/go -# github.com/prometheus/common v0.30.0 +# github.com/prometheus/common v0.32.1 ## explicit; go 1.13 github.com/prometheus/common/expfmt github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg @@ -724,6 +760,9 @@ github.com/tinylib/msgp/msgp github.com/tonistiigi/fsutil github.com/tonistiigi/fsutil/copy github.com/tonistiigi/fsutil/types +# github.com/tonistiigi/go-archvariant v1.0.0 +## explicit; go 1.17 +github.com/tonistiigi/go-archvariant # github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea ## explicit github.com/tonistiigi/units @@ -783,6 +822,58 @@ go.opencensus.io/trace go.opencensus.io/trace/internal go.opencensus.io/trace/propagation go.opencensus.io/trace/tracestate +# go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.29.0 +## explicit; go 1.16 +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc +go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc/internal +# go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.29.0 +## explicit; go 1.16 +go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace +# go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 +## explicit; go 1.16 +go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp +# go.opentelemetry.io/otel v1.4.1 +## explicit; go 1.16 +go.opentelemetry.io/otel +go.opentelemetry.io/otel/attribute +go.opentelemetry.io/otel/baggage +go.opentelemetry.io/otel/codes +go.opentelemetry.io/otel/internal +go.opentelemetry.io/otel/internal/baggage +go.opentelemetry.io/otel/internal/global +go.opentelemetry.io/otel/propagation +go.opentelemetry.io/otel/semconv/v1.7.0 +# go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.4.1 +## explicit; go 1.16 +go.opentelemetry.io/otel/exporters/otlp/otlptrace +go.opentelemetry.io/otel/exporters/otlp/otlptrace/internal/tracetransform +# go.opentelemetry.io/otel/internal/metric v0.27.0 +## explicit; go 1.16 +go.opentelemetry.io/otel/internal/metric/global +go.opentelemetry.io/otel/internal/metric/registry +# go.opentelemetry.io/otel/metric v0.27.0 +## explicit; go 1.16 +go.opentelemetry.io/otel/metric +go.opentelemetry.io/otel/metric/global +go.opentelemetry.io/otel/metric/number +go.opentelemetry.io/otel/metric/sdkapi +go.opentelemetry.io/otel/metric/unit +# go.opentelemetry.io/otel/sdk v1.4.1 +## explicit; go 1.16 +go.opentelemetry.io/otel/sdk/instrumentation +go.opentelemetry.io/otel/sdk/internal +go.opentelemetry.io/otel/sdk/internal/env +go.opentelemetry.io/otel/sdk/resource +go.opentelemetry.io/otel/sdk/trace +# go.opentelemetry.io/otel/trace v1.4.1 +## explicit; go 1.16 +go.opentelemetry.io/otel/trace +# go.opentelemetry.io/proto/otlp v0.12.0 +## explicit; go 1.14 +go.opentelemetry.io/proto/otlp/collector/trace/v1 +go.opentelemetry.io/proto/otlp/common/v1 +go.opentelemetry.io/proto/otlp/resource/v1 +go.opentelemetry.io/proto/otlp/trace/v1 # go.uber.org/atomic v1.7.0 ## explicit; go 1.13 go.uber.org/atomic @@ -905,13 +996,15 @@ google.golang.org/appengine/urlfetch google.golang.org/genproto/googleapis/api google.golang.org/genproto/googleapis/api/annotations google.golang.org/genproto/googleapis/api/distribution +google.golang.org/genproto/googleapis/api/httpbody google.golang.org/genproto/googleapis/api/label google.golang.org/genproto/googleapis/api/metric google.golang.org/genproto/googleapis/api/monitoredres google.golang.org/genproto/googleapis/logging/type google.golang.org/genproto/googleapis/logging/v2 google.golang.org/genproto/googleapis/rpc/status -# google.golang.org/grpc v1.43.0 +google.golang.org/genproto/protobuf/field_mask +# google.golang.org/grpc v1.44.0 ## explicit; go 1.14 google.golang.org/grpc google.golang.org/grpc/attributes @@ -1030,8 +1123,6 @@ gotest.tools/v3/skip # github.com/armon/go-radix => github.com/armon/go-radix v0.0.0-20150105235045-e39d623f12e8 # github.com/coreos/go-systemd => github.com/coreos/go-systemd v0.0.0-20180511133405-39ca1b05acc7 # github.com/coreos/pkg => github.com/coreos/pkg v0.0.0-20180108230652-97fdf19511ea -# github.com/gogo/googleapis => github.com/gogo/googleapis v1.3.2 -# github.com/hashicorp/go-immutable-radix => github.com/tonistiigi/go-immutable-radix v0.0.0-20170803185627-826af9ccf0fe # github.com/hashicorp/go-msgpack => github.com/hashicorp/go-msgpack v0.0.0-20140221154404-71c2886f5a67 # github.com/hashicorp/go-multierror => github.com/hashicorp/go-multierror v1.0.0 # github.com/hashicorp/serf => github.com/hashicorp/serf v0.7.1-0.20160317193612-598c54895cc5