diff --git a/.golangci.yml b/.golangci.yml index e464d9c980..c8785de838 100644 --- a/.golangci.yml +++ b/.golangci.yml @@ -4,12 +4,12 @@ run: linters: disable-all: true enable: - - misspell + - errorlint - gofmt - goimports + - govet - ineffassign + - misspell - revive - unconvert - unused - - govet - diff --git a/agent/agent.go b/agent/agent.go index 60df8a602c..28356a6a75 100644 --- a/agent/agent.go +++ b/agent/agent.go @@ -3,6 +3,8 @@ package agent import ( "bytes" "context" + "errors" + "fmt" "math/rand" "reflect" "sync" @@ -11,7 +13,6 @@ import ( "github.com/moby/swarmkit/v2/agent/exec" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/log" - "github.com/pkg/errors" ) const ( @@ -467,7 +468,7 @@ func (a *Agent) handleSessionMessage(ctx context.Context, message *api.SessionMe if !same { a.keys = message.NetworkBootstrapKeys if err := a.config.Executor.SetNetworkBootstrapKeys(a.keys); err != nil { - return errors.Wrap(err, "configuring network key failed") + return fmt.Errorf("configuring network key failed: %w", err) } } } @@ -517,7 +518,7 @@ func (a *Agent) UpdateTaskStatus(ctx context.Context, taskID string, status *api go func() { err := session.sendTaskStatus(ctx, taskID, status) if err != nil { - if err == errTaskUnknown { + if errors.Is(err, errTaskUnknown) { err = nil // dispatcher no longer cares about this task. } else { log.G(ctx).WithError(err).Error("closing session after fatal error") diff --git a/agent/config.go b/agent/config.go index 3afb2383a5..dcdfd32a29 100644 --- a/agent/config.go +++ b/agent/config.go @@ -1,11 +1,12 @@ package agent import ( + "errors" + "github.com/docker/go-events" "github.com/moby/swarmkit/v2/agent/exec" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/connectionbroker" - "github.com/pkg/errors" bolt "go.etcd.io/bbolt" "google.golang.org/grpc/credentials" ) diff --git a/agent/csi/plugin/manager.go b/agent/csi/plugin/manager.go index b4fc047263..66ce5594f2 100644 --- a/agent/csi/plugin/manager.go +++ b/agent/csi/plugin/manager.go @@ -2,6 +2,7 @@ package plugin import ( "context" + "errors" "fmt" "sync" @@ -57,7 +58,7 @@ func (pm *pluginManager) Get(name string) (NodePlugin, error) { plugin, err := pm.getPlugin(name) if err != nil { - return nil, fmt.Errorf("cannot get plugin %v: %v", name, err) + return nil, fmt.Errorf("cannot get plugin %v: %w", name, err) } return plugin, nil @@ -110,7 +111,7 @@ func (pm *pluginManager) getPlugin(name string) (NodePlugin, error) { pa, ok := pc.(plugin.AddrPlugin) if !ok { - return nil, fmt.Errorf("plugin does not implement PluginAddr interface") + return nil, errors.New("plugin does not implement PluginAddr interface") } p := pm.newNodePluginFunc(name, pa, pm.secrets) diff --git a/agent/csi/volumes.go b/agent/csi/volumes.go index 97539286df..bb175310c7 100644 --- a/agent/csi/volumes.go +++ b/agent/csi/volumes.go @@ -2,6 +2,7 @@ package csi import ( "context" + "errors" "fmt" "sync" "time" @@ -131,7 +132,7 @@ func (r *volumes) Get(volumeID string) (string, error) { if vs, ok := r.volumes[volumeID]; ok { if vs.remove { // TODO(dperny): use a structured error - return "", fmt.Errorf("volume being removed") + return "", errors.New("volume being removed") } if p, err := r.plugins.Get(vs.volume.Driver.Name); err == nil { diff --git a/agent/exec/controller.go b/agent/exec/controller.go index 2837377245..16d026d41f 100644 --- a/agent/exec/controller.go +++ b/agent/exec/controller.go @@ -2,6 +2,7 @@ package exec import ( "context" + "errors" "fmt" "time" @@ -9,7 +10,6 @@ import ( "github.com/moby/swarmkit/v2/api/equality" "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/protobuf/ptypes" - "github.com/pkg/errors" ) // Controller controls execution of a task. @@ -197,7 +197,7 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus, exitCode = ec.ExitCode() } - if cause := errors.Cause(err); cause == context.DeadlineExceeded || cause == context.Canceled { + if errors.Is(err, context.DeadlineExceeded) || errors.Is(err, context.Canceled) { return retry() } @@ -308,13 +308,13 @@ func Do(ctx context.Context, task *api.Task, ctlr Controller) (*api.TaskStatus, // the following states may proceed past desired state. switch status.State { case api.TaskStatePreparing: - if err := ctlr.Prepare(ctx); err != nil && err != ErrTaskPrepared { + if err := ctlr.Prepare(ctx); err != nil && !errors.Is(err, ErrTaskPrepared) { return fatal(err) } return transition(api.TaskStateReady, "prepared") case api.TaskStateStarting: - if err := ctlr.Start(ctx); err != nil && err != ErrTaskStarted { + if err := ctlr.Start(ctx); err != nil && !errors.Is(err, ErrTaskStarted) { return fatal(err) } @@ -355,6 +355,5 @@ func logStateChange(ctx context.Context, desired, previous, next api.TaskState) } func contextDoneError(err error) bool { - cause := errors.Cause(err) - return cause == context.Canceled || cause == context.DeadlineExceeded + return errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) } diff --git a/agent/exec/errors.go b/agent/exec/errors.go index f57629161f..fbb17f7f90 100644 --- a/agent/exec/errors.go +++ b/agent/exec/errors.go @@ -1,6 +1,6 @@ package exec -import "github.com/pkg/errors" +import "errors" var ( // ErrRuntimeUnsupported encountered when a task requires a runtime diff --git a/agent/exec/errors_test.go b/agent/exec/errors_test.go index 45a237fdd4..1695ea12c9 100644 --- a/agent/exec/errors_test.go +++ b/agent/exec/errors_test.go @@ -3,17 +3,15 @@ package exec import ( "fmt" "testing" - - "github.com/pkg/errors" ) func TestIsTemporary(t *testing.T) { err := fmt.Errorf("err") err1 := MakeTemporary(fmt.Errorf("err1: %w", err)) err2 := fmt.Errorf("err2: %w", err1) - err3 := errors.Wrap(err2, "err3") + err3 := fmt.Errorf("err3: %w", err2) err4 := fmt.Errorf("err4: %w", err3) - err5 := errors.Wrap(err4, "err5") + err5 := fmt.Errorf("err5: %w", err4) if IsTemporary(nil) { t.Error("expected error to not be a temporary error") diff --git a/agent/task.go b/agent/task.go index a000410445..8952f41b5d 100644 --- a/agent/task.go +++ b/agent/task.go @@ -2,6 +2,7 @@ package agent import ( "context" + "errors" "sync" "time" @@ -160,12 +161,12 @@ func (tm *taskManager) run(ctx context.Context) { default: } - switch err { - case exec.ErrTaskNoop: + switch { + case errors.Is(err, exec.ErrTaskNoop): if !updated { continue // wait till getting pumped via update. } - case exec.ErrTaskRetry: + case errors.Is(err, exec.ErrTaskRetry): // TODO(stevvooe): Add exponential backoff with random jitter // here. For now, this backoff is enough to keep the task // manager from running away with the CPU. @@ -173,7 +174,7 @@ func (tm *taskManager) run(ctx context.Context) { errs <- nil // repump this branch, with no err }) continue - case nil, context.Canceled, context.DeadlineExceeded: + case err == nil, errors.Is(err, context.Canceled), errors.Is(err, context.DeadlineExceeded): // no log in this case default: log.G(ctx).WithError(err).Error("task operation failed") diff --git a/agent/worker.go b/agent/worker.go index a004a44a13..61f19e93f3 100644 --- a/agent/worker.go +++ b/agent/worker.go @@ -2,6 +2,7 @@ package agent import ( "context" + "errors" "sync" "github.com/moby/swarmkit/v2/agent/exec" @@ -239,7 +240,7 @@ func reconcileTaskState(ctx context.Context, w *worker, assignments []*api.Assig } if mgr, ok := w.taskManagers[task.ID]; ok { - if err := mgr.Update(ctx, task); err != nil && err != ErrClosed { + if err := mgr.Update(ctx, task); err != nil && !errors.Is(err, ErrClosed) { log.G(ctx).WithError(err).Error("failed updating assigned task") } } else { @@ -247,7 +248,7 @@ func reconcileTaskState(ctx context.Context, w *worker, assignments []*api.Assig // storage and replace it with our status, if we have it. status, err := GetTaskStatus(tx, task.ID) if err != nil { - if err != errTaskUnknown { + if !errors.Is(err, errTaskUnknown) { return err } @@ -569,7 +570,7 @@ func (w *worker) updateTaskStatus(ctx context.Context, tx *bolt.Tx, taskID strin // dance of too-tightly-coupled concurrent parts, fixing tht race is // fraught with hazards. instead, we'll recognize that it can occur, // log the error, and then ignore it. - if err == errTaskUnknown { + if errors.Is(err, errTaskUnknown) { // log at info level. debug logging in docker is already really // verbose, so many people disable it. the race that causes this // behavior should be very rare, but if it occurs, we should know diff --git a/api/genericresource/resource_management.go b/api/genericresource/resource_management.go index 86a358363f..144658330c 100644 --- a/api/genericresource/resource_management.go +++ b/api/genericresource/resource_management.go @@ -1,6 +1,7 @@ package genericresource import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -15,7 +16,7 @@ func Claim(nodeAvailableResources, taskAssigned *[]*api.GenericResource, for _, res := range taskReservations { tr := res.GetDiscreteResourceSpec() if tr == nil { - return fmt.Errorf("task should only hold Discrete type") + return errors.New("task should only hold Discrete type") } // Select the resources @@ -86,7 +87,7 @@ func Reclaim(nodeAvailableResources *[]*api.GenericResource, taskAssigned, nodeR func reclaimResources(nodeAvailableResources *[]*api.GenericResource, taskAssigned []*api.GenericResource) error { // The node could have been updated if nodeAvailableResources == nil { - return fmt.Errorf("node no longer has any resources") + return errors.New("node no longer has any resources") } for _, res := range taskAssigned { diff --git a/api/genericresource/validate.go b/api/genericresource/validate.go index 909ac3e7ee..7507e8035b 100644 --- a/api/genericresource/validate.go +++ b/api/genericresource/validate.go @@ -1,6 +1,7 @@ package genericresource import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -24,7 +25,7 @@ func ValidateTask(resources *api.Resources) error { func HasEnough(nodeRes []*api.GenericResource, taskRes *api.GenericResource) (bool, error) { t := taskRes.GetDiscreteResourceSpec() if t == nil { - return false, fmt.Errorf("task should only hold Discrete type") + return false, errors.New("task should only hold Discrete type") } if nodeRes == nil { diff --git a/api/storeobject.go b/api/storeobject.go index f7e483d973..4d6dfa1357 100644 --- a/api/storeobject.go +++ b/api/storeobject.go @@ -75,7 +75,7 @@ func customIndexer(kind string, annotations *Annotations) (bool, [][]byte, error func fromArgs(args ...interface{}) ([]byte, error) { if len(args) != 1 { - return nil, fmt.Errorf("must provide only a single argument") + return nil, errors.New("must provide only a single argument") } arg, ok := args[0].(string) if !ok { diff --git a/ca/certificates.go b/ca/certificates.go index 0e92b2e08a..8e007199a9 100644 --- a/ca/certificates.go +++ b/ca/certificates.go @@ -12,6 +12,7 @@ import ( "crypto/x509" "encoding/asn1" "encoding/pem" + "errors" "fmt" "io" "os" @@ -30,7 +31,6 @@ import ( "github.com/moby/swarmkit/v2/connectionbroker" "github.com/moby/swarmkit/v2/ioutils" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/codes" "google.golang.org/grpc/credentials" @@ -199,13 +199,13 @@ func (rca *RootCA) Signer() (*LocalSigner, error) { func (rca *RootCA) IssueAndSaveNewCertificates(kw KeyWriter, cn, ou, org string) (*tls.Certificate, *IssuerInfo, error) { csr, key, err := GenerateNewCSR() if err != nil { - return nil, nil, errors.Wrap(err, "error when generating new node certs") + return nil, nil, fmt.Errorf("error when generating new node certs: %w", err) } // Obtain a signed Certificate certChain, err := rca.ParseValidateAndSignCSR(csr, cn, ou, org) if err != nil { - return nil, nil, errors.Wrap(err, "failed to sign node certificate") + return nil, nil, fmt.Errorf("failed to sign node certificate: %w", err) } signer, err := rca.Signer() if err != nil { // should never happen, since if ParseValidateAndSignCSR did not fail this root CA must have a signer @@ -235,7 +235,7 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, kw KeyWrit // Create a new key/pair and CSR csr, key, err := GenerateNewCSR() if err != nil { - return nil, nil, errors.Wrap(err, "error when generating new node certs") + return nil, nil, fmt.Errorf("error when generating new node certs: %w", err) } // Get the remote manager to issue a CA signed certificate for this node @@ -275,7 +275,8 @@ func (rca *RootCA) RequestAndSaveNewCertificates(ctx context.Context, kw KeyWrit // TODO(cyli): - right now we need the invalid certificate in order to determine whether or not we should // download a new root, because we only want to do that in the case of workers. When we have a single // codepath for updating the root CAs for both managers and workers, this snippet can go. - if _, ok := err.(x509.UnknownAuthorityError); ok { + var unknownAuthorityError x509.UnknownAuthorityError + if errors.As(err, &unknownAuthorityError) { if parsedCerts, parseErr := helpers.ParseCertificatesPEM(signedCert); parseErr == nil && len(parsedCerts) > 0 { return nil, nil, x509UnknownAuthError{ error: err, @@ -397,7 +398,7 @@ func (rca *RootCA) ParseValidateAndSignCSR(csrBytes []byte, cn, ou, org string) } cert, err := signer.Sign(signRequest) if err != nil { - return nil, errors.Wrap(err, "failed to sign node certificate") + return nil, fmt.Errorf("failed to sign node certificate: %w", err) } return append(cert, rca.Intermediates...), nil @@ -423,7 +424,7 @@ func (rca *RootCA) CrossSignCACertificate(otherCAPEM []byte) ([]byte, error) { template.SignatureAlgorithm = signer.parsedCert.SignatureAlgorithm // make sure we can sign with the signer key derBytes, err := x509.CreateCertificate(cryptorand.Reader, template, signer.parsedCert, template.PublicKey, signer.cryptoSigner) if err != nil { - return nil, errors.Wrap(err, "could not cross-sign new CA certificate using old CA material") + return nil, fmt.Errorf("could not cross-sign new CA certificate using old CA material: %w", err) } return pem.EncodeToMemory(&pem.Block{ @@ -448,7 +449,7 @@ func NewRootCA(rootCertBytes, signCertBytes, signKeyBytes []byte, certExpiry tim // Parse all the certificates in the cert bundle parsedCerts, err := helpers.ParseCertificatesPEM(rootCertBytes) if err != nil { - return RootCA{}, errors.Wrap(err, "invalid root certificates") + return RootCA{}, fmt.Errorf("invalid root certificates: %w", err) } // Check to see if we have at least one valid cert if len(parsedCerts) < 1 { @@ -465,7 +466,7 @@ func NewRootCA(rootCertBytes, signCertBytes, signKeyBytes []byte, certExpiry tim selfpool := x509.NewCertPool() selfpool.AddCert(cert) if _, err := cert.Verify(x509.VerifyOptions{Roots: selfpool}); err != nil { - return RootCA{}, errors.Wrap(err, "error while validating Root CA Certificate") + return RootCA{}, fmt.Errorf("error while validating Root CA Certificate: %w", err) } pool.AddCert(cert) } @@ -480,7 +481,7 @@ func NewRootCA(rootCertBytes, signCertBytes, signKeyBytes []byte, certExpiry tim if len(intermediates) > 0 { parsedIntermediates, _, err = ValidateCertChain(pool, intermediates, false) if err != nil { - return RootCA{}, errors.Wrap(err, "invalid intermediate chain") + return RootCA{}, fmt.Errorf("invalid intermediate chain: %w", err) } intermediatePool = x509.NewCertPool() for _, cert := range parsedIntermediates { @@ -534,30 +535,30 @@ func ValidateCertChain(rootPool *x509.CertPool, certs []byte, allowExpired bool) // Manual expiry validation because we want more information on which certificate in the chain is expired, and // because this is an easier way to allow expired certs. if now.Before(cert.NotBefore) { - return nil, nil, errors.Wrapf( + return nil, nil, fmt.Errorf( + "certificate (%d - %s) not valid before %s, and it is currently %s: %w", + i+1, cert.Subject.CommonName, cert.NotBefore.UTC().Format(time.RFC1123), now.Format(time.RFC1123), x509.CertificateInvalidError{ Cert: cert, Reason: x509.Expired, - }, - "certificate (%d - %s) not valid before %s, and it is currently %s", - i+1, cert.Subject.CommonName, cert.NotBefore.UTC().Format(time.RFC1123), now.Format(time.RFC1123)) + }) } if !allowExpired && now.After(cert.NotAfter) { - return nil, nil, errors.Wrapf( + return nil, nil, fmt.Errorf( + "certificate (%d - %s) not valid after %s, and it is currently %s: %w", + i+1, cert.Subject.CommonName, cert.NotAfter.UTC().Format(time.RFC1123), now.Format(time.RFC1123), x509.CertificateInvalidError{ Cert: cert, Reason: x509.Expired, - }, - "certificate (%d - %s) not valid after %s, and it is currently %s", - i+1, cert.Subject.CommonName, cert.NotAfter.UTC().Format(time.RFC1123), now.Format(time.RFC1123)) + }) } if i > 0 { // check that the previous cert was signed by this cert prevCert := parsedCerts[i-1] if err := prevCert.CheckSignatureFrom(cert); err != nil { - return nil, nil, errors.Wrapf(err, "certificates do not form a chain: (%d - %s) is not signed by (%d - %s)", - i, prevCert.Subject.CommonName, i+1, cert.Subject.CommonName) + return nil, nil, fmt.Errorf("certificates do not form a chain: (%d - %s) is not signed by (%d - %s): %w", + i, prevCert.Subject.CommonName, i+1, cert.Subject.CommonName, err) } if intermediatePool == nil { @@ -595,7 +596,8 @@ func ValidateCertChain(rootPool *x509.CertPool, certs []byte, allowExpired bool) return parsedCerts, chains, nil } } - if invalid, ok := err.(x509.CertificateInvalidError); ok && invalid.Reason == x509.Expired { + var invalid x509.CertificateInvalidError + if errors.As(err, &invalid) { return nil, nil, errors.New("there is no time span for which all of the certificates, including a root, are valid") } return nil, nil, err @@ -616,7 +618,7 @@ func newLocalSigner(keyBytes, certBytes []byte, certExpiry time.Duration, rootPo parsedCerts, err := helpers.ParseCertificatesPEM(certBytes) if err != nil { - return nil, errors.Wrap(err, "invalid signing CA cert") + return nil, fmt.Errorf("invalid signing CA cert: %w", err) } if len(parsedCerts) == 0 { return nil, errors.New("no valid signing CA certificates found") @@ -629,13 +631,13 @@ func newLocalSigner(keyBytes, certBytes []byte, certExpiry time.Duration, rootPo Intermediates: intermediatePool, } if _, err := parsedCerts[0].Verify(opts); err != nil { - return nil, errors.Wrap(err, "error while validating signing CA certificate against roots and intermediates") + return nil, fmt.Errorf("error while validating signing CA certificate against roots and intermediates: %w", err) } // The key should not be encrypted, but it could be in PKCS8 format rather than PKCS1 priv, err := helpers.ParsePrivateKeyPEM(keyBytes) if err != nil { - return nil, errors.Wrap(err, "malformed private key") + return nil, fmt.Errorf("malformed private key: %w", err) } // We will always use the first certificate inside of the root bundle as the active one @@ -748,13 +750,13 @@ func GetRemoteCA(ctx context.Context, d digest.Digest, connBroker *connectionbro if d != "" { verifier := d.Verifier() if err != nil { - return RootCA{}, errors.Wrap(err, "unexpected error getting digest verifier") + return RootCA{}, fmt.Errorf("unexpected error getting digest verifier: %w", err) } io.Copy(verifier, bytes.NewReader(response.Certificate)) if !verifier.Verified() { - return RootCA{}, errors.Errorf("remote CA does not match fingerprint. Expected: %s", d.Encoded()) + return RootCA{}, fmt.Errorf("remote CA does not match fingerprint. Expected: %s", d.Encoded()) } } diff --git a/ca/config.go b/ca/config.go index 6e0c8a041b..702e69f276 100644 --- a/ca/config.go +++ b/ca/config.go @@ -5,6 +5,7 @@ import ( cryptorand "crypto/rand" "crypto/tls" "crypto/x509" + "errors" "fmt" "math/big" "math/rand" @@ -16,7 +17,6 @@ import ( cfconfig "github.com/cloudflare/cfssl/config" events "github.com/docker/go-events" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "google.golang.org/grpc/credentials" "github.com/moby/swarmkit/v2/api" @@ -173,7 +173,7 @@ func validateRootCAAndTLSCert(rootCA *RootCA, tlsKeyPair *tls.Certificate) error for i, derBytes := range tlsKeyPair.Certificate { parsed, err := x509.ParseCertificate(derBytes) if err != nil { - return errors.Wrap(err, "could not validate new root certificates due to parse error") + return fmt.Errorf("could not validate new root certificates due to parse error: %w", err) } if i == 0 { leafCert = parsed @@ -189,7 +189,7 @@ func validateRootCAAndTLSCert(rootCA *RootCA, tlsKeyPair *tls.Certificate) error Intermediates: intermediatePool, } if _, err := leafCert.Verify(opts); err != nil { - return errors.Wrap(err, "new root CA does not match existing TLS credentials") + return fmt.Errorf("new root CA does not match existing TLS credentials: %w", err) } return nil } @@ -273,20 +273,20 @@ func (s *SecurityConfig) updateTLSCredentials(certificate *tls.Certificate, issu certs := []tls.Certificate{*certificate} clientConfig, err := NewClientTLSConfig(certs, s.rootCA.Pool, ManagerRole) if err != nil { - return errors.Wrap(err, "failed to create a new client config using the new root CA") + return fmt.Errorf("failed to create a new client config using the new root CA: %w", err) } serverConfig, err := NewServerTLSConfig(certs, s.rootCA.Pool) if err != nil { - return errors.Wrap(err, "failed to create a new server config using the new root CA") + return fmt.Errorf("failed to create a new server config using the new root CA: %w", err) } if err := s.ClientTLSCreds.loadNewTLSConfig(clientConfig); err != nil { - return errors.Wrap(err, "failed to update the client credentials") + return fmt.Errorf("failed to update the client credentials: %w", err) } if err := s.ServerTLSCreds.loadNewTLSConfig(serverConfig); err != nil { - return errors.Wrap(err, "failed to update the server TLS credentials") + return fmt.Errorf("failed to update the server TLS credentials: %w", err) } s.certificate = certificate @@ -364,7 +364,7 @@ func GenerateJoinToken(rootCA *RootCA, fips bool) string { var secretBytes [generatedSecretEntropyBytes]byte if _, err := cryptorand.Read(secretBytes[:]); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) + panic(fmt.Errorf("failed to read random bytes: %w", err)) } var nn, dgst big.Int @@ -513,8 +513,8 @@ func (rootCA RootCA) CreateSecurityConfig(ctx context.Context, krw *KeyReadWrite proposedRole := ManagerRole tlsKeyPair, issuerInfo, err := rootCA.IssueAndSaveNewCertificates(krw, cn, proposedRole, org) - switch errors.Cause(err) { - case ErrNoValidSigner: + switch { + case errors.Is(err, ErrNoValidSigner): config.RetryInterval = GetCertRetryInterval // Request certificate issuance from a remote CA. // Last argument is nil because at this point we don't have any valid TLS creds @@ -523,7 +523,7 @@ func (rootCA RootCA) CreateSecurityConfig(ctx context.Context, krw *KeyReadWrite log.G(ctx).WithError(err).Error("failed to request and save new certificate") return nil, nil, err } - case nil: + case err == nil: log.G(ctx).WithFields(log.Fields{ "node.id": cn, "node.role": proposedRole, @@ -592,7 +592,8 @@ func RenewTLSConfigNow(ctx context.Context, s *SecurityConfig, connBroker *conne ConnBroker: connBroker, Credentials: s.ClientTLSCreds, }) - if wrappedError, ok := err.(x509UnknownAuthError); ok { + var wrappedError x509UnknownAuthError + if errors.As(err, &wrappedError) { var newErr error tlsKeyPair, issuerInfo, newErr = updateRootThenUpdateCert(ctx, s, connBroker, rootPaths, wrappedError.failedLeafCert) if newErr != nil { @@ -703,7 +704,7 @@ func ParseRole(apiRole api.NodeRole) (string, error) { case api.NodeRoleWorker: return WorkerRole, nil default: - return "", errors.Errorf("failed to parse api role: %v", apiRole) + return "", fmt.Errorf("failed to parse api role: %v", apiRole) } } @@ -715,6 +716,6 @@ func FormatRole(role string) (api.NodeRole, error) { case strings.ToLower(WorkerRole): return api.NodeRoleWorker, nil default: - return 0, errors.Errorf("failed to parse role: %s", role) + return 0, fmt.Errorf("failed to parse role: %s", role) } } diff --git a/ca/config_test.go b/ca/config_test.go index 728a141f8f..704a61a193 100644 --- a/ca/config_test.go +++ b/ca/config_test.go @@ -5,6 +5,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "net" "os" "path/filepath" @@ -25,7 +26,6 @@ import ( "github.com/moby/swarmkit/v2/manager/state" "github.com/moby/swarmkit/v2/manager/state/store" "github.com/moby/swarmkit/v2/testutils" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" ) @@ -236,11 +236,13 @@ func TestLoadSecurityConfigExpiredCert(t *testing.T) { _, _, err = ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, false) require.Error(t, err) - require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + var cie1 x509.CertificateInvalidError + require.ErrorAs(t, err, &cie1) _, _, err = ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, true) require.Error(t, err) - require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + var cie2 x509.CertificateInvalidError + require.ErrorAs(t, err, &cie2) // a cert that is expired is not valid if expiry is not allowed invalidCert = cautils.ReDateCert(t, certBytes, tc.RootCA.Certs, s.Key, now.Add(-2*time.Minute), now.Add(-1*time.Minute)) @@ -248,7 +250,8 @@ func TestLoadSecurityConfigExpiredCert(t *testing.T) { _, _, err = ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, false) require.Error(t, err) - require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + var cie3 x509.CertificateInvalidError + require.ErrorAs(t, err, &cie3) // but it is valid if expiry is allowed _, cancel, err := ca.LoadSecurityConfig(tc.Context, tc.RootCA, krw, true) @@ -798,7 +801,8 @@ func TestRenewTLSConfigUpdatesRootNonUnknownAuthError(t *testing.T) { err = ca.RenewTLSConfigNow(tc.Context, secConfig, fakeCAServer.getConnBroker(), tc.Paths.RootCA) require.Error(t, err) - require.IsType(t, x509.CertificateInvalidError{}, errors.Cause(err)) + var cie x509.CertificateInvalidError + require.ErrorAs(t, err, &cie) require.NoError(t, <-signErr) } diff --git a/ca/external.go b/ca/external.go index 82c73d4b12..85e18b2978 100644 --- a/ca/external.go +++ b/ca/external.go @@ -9,6 +9,8 @@ import ( "encoding/hex" "encoding/json" "encoding/pem" + "errors" + "fmt" "io" "net/http" "sync" @@ -19,7 +21,6 @@ import ( "github.com/cloudflare/cfssl/csr" "github.com/cloudflare/cfssl/signer" "github.com/moby/swarmkit/v2/log" - "github.com/pkg/errors" "golang.org/x/net/context/ctxhttp" ) @@ -114,7 +115,7 @@ func (eca *ExternalCA) Sign(ctx context.Context, req signer.SignRequest) (cert [ csrJSON, err := json.Marshal(req) if err != nil { - return nil, errors.Wrap(err, "unable to JSON-encode CFSSL signing request") + return nil, fmt.Errorf("unable to JSON-encode CFSSL signing request: %w", err) } // Try each configured proxy URL. Return after the first success. If @@ -186,29 +187,29 @@ func (eca *ExternalCA) CrossSignRootCA(ctx context.Context, rca RootCA) ([]byte, func makeExternalSignRequest(ctx context.Context, client *http.Client, url string, csrJSON []byte) (cert []byte, err error) { resp, err := ctxhttp.Post(ctx, client, url, "application/json", bytes.NewReader(csrJSON)) if err != nil { - return nil, recoverableErr{err: errors.Wrap(err, "unable to perform certificate signing request")} + return nil, recoverableErr{err: fmt.Errorf("unable to perform certificate signing request: %w", err)} } defer resp.Body.Close() b := io.LimitReader(resp.Body, CertificateMaxSize) body, err := io.ReadAll(b) if err != nil { - return nil, recoverableErr{err: errors.Wrap(err, "unable to read CSR response body")} + return nil, recoverableErr{err: fmt.Errorf("unable to read CSR response body: %w", err)} } if resp.StatusCode != http.StatusOK { - return nil, recoverableErr{err: errors.Errorf("unexpected status code in CSR response: %d - %s", resp.StatusCode, string(body))} + return nil, recoverableErr{err: fmt.Errorf("unexpected status code in CSR response: %d - %s", resp.StatusCode, string(body))} } var apiResponse api.Response if err := json.Unmarshal(body, &apiResponse); err != nil { log.G(ctx).Debugf("unable to JSON-parse CFSSL API response body: %s", string(body)) - return nil, recoverableErr{err: errors.Wrap(err, "unable to parse JSON response")} + return nil, recoverableErr{err: fmt.Errorf("unable to parse JSON response: %w", err)} } if !apiResponse.Success || apiResponse.Result == nil { if len(apiResponse.Errors) > 0 { - return nil, errors.Errorf("response errors: %v", apiResponse.Errors) + return nil, fmt.Errorf("response errors: %v", apiResponse.Errors) } return nil, errors.New("certificate signing request failed") @@ -216,12 +217,12 @@ func makeExternalSignRequest(ctx context.Context, client *http.Client, url strin result, ok := apiResponse.Result.(map[string]interface{}) if !ok { - return nil, errors.Errorf("invalid result type: %T", apiResponse.Result) + return nil, fmt.Errorf("invalid result type: %T", apiResponse.Result) } certPEM, ok := result["certificate"].(string) if !ok { - return nil, errors.Errorf("invalid result certificate field type: %T", result["certificate"]) + return nil, fmt.Errorf("invalid result certificate field type: %T", result["certificate"]) } return []byte(certPEM), nil diff --git a/ca/keyreadwriter.go b/ca/keyreadwriter.go index 55f7d6ba4a..6e563a3c1c 100644 --- a/ca/keyreadwriter.go +++ b/ca/keyreadwriter.go @@ -3,6 +3,8 @@ package ca import ( "crypto/x509" "encoding/pem" + "errors" + "fmt" "os" "path/filepath" "strconv" @@ -14,7 +16,6 @@ import ( "github.com/moby/swarmkit/v2/ca/keyutils" "github.com/moby/swarmkit/v2/ca/pkcs8" "github.com/moby/swarmkit/v2/ioutils" - "github.com/pkg/errors" ) const ( @@ -195,7 +196,7 @@ func (k *KeyReadWriter) Read() ([]byte, []byte, error) { if k.headersObj != nil { newHeaders, err := k.headersObj.UnmarshalHeaders(keyBlock.Headers, k.kekData) if err != nil { - return nil, nil, errors.Wrap(err, "unable to read TLS key headers") + return nil, nil, fmt.Errorf("unable to read TLS key headers: %w", err) } k.headersObj = newHeaders } @@ -385,7 +386,7 @@ func (k *KeyReadWriter) readKey() (*pem.Block, error) { } derBytes, err := k.keyFormatter.DecryptPEMBlock(keyBlock, k.kekData.KEK) - if err == keyutils.ErrFIPSUnsupportedKeyFormat { + if errors.Is(err, keyutils.ErrFIPSUnsupportedKeyFormat) { return nil, err } else if err != nil { return nil, ErrInvalidKEK{Wrapped: err} diff --git a/ca/reconciler.go b/ca/reconciler.go index 01a9558ece..ae1c7f361c 100644 --- a/ca/reconciler.go +++ b/ca/reconciler.go @@ -3,6 +3,7 @@ package ca import ( "bytes" "context" + "errors" "fmt" "reflect" "sync" @@ -13,7 +14,6 @@ import ( "github.com/moby/swarmkit/v2/api/equality" "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/manager/state/store" - "github.com/pkg/errors" ) // IssuanceStateRotateMaxBatchSize is the maximum number of nodes we'll tell to rotate their certificates in any given update @@ -54,7 +54,7 @@ func IssuerFromAPIRootCA(rootCA *api.RootCA) (*IssuerInfo, error) { } issuerCerts, err := helpers.ParseCertificatesPEM(wantedIssuer) if err != nil { - return nil, errors.Wrap(err, "invalid certificate in cluster root CA object") + return nil, fmt.Errorf("invalid certificate in cluster root CA object: %w", err) } if len(issuerCerts) == 0 { return nil, errors.New("invalid certificate in cluster root CA object") @@ -166,7 +166,7 @@ func (r *rootRotationReconciler) runReconcilerLoop(ctx context.Context, loopRoot return } log.G(r.ctx).WithError(err).Error("could not complete root rotation") - if err == errRootRotationChanged { + if errors.Is(err, errRootRotationChanged) { // if the root rotation has changed, this loop will be cancelled anyway, so may as well abort early return } @@ -222,7 +222,7 @@ func (r *rootRotationReconciler) finishRootRotation(tx store.Tx, expectedRootCA updatedRootCA, err := NewRootCA(cluster.RootCA.RootRotation.CACert, signerCert, cluster.RootCA.RootRotation.CAKey, DefaultNodeCertExpiration, nil) if err != nil { - return errors.Wrap(err, "invalid cluster root rotation object") + return fmt.Errorf("invalid cluster root rotation object: %w", err) } cluster.RootCA = api.RootCA{ CACert: cluster.RootCA.RootRotation.CACert, @@ -249,7 +249,7 @@ func (r *rootRotationReconciler) batchUpdateNodes(toUpdate []*api.Node) error { for _, n := range toUpdate { if err := batch.Update(func(tx store.Tx) error { return store.UpdateNode(tx, n) - }); err != nil && err != store.ErrSequenceConflict { + }); err != nil && !errors.Is(err, store.ErrSequenceConflict) { log.G(r.ctx).WithError(err).Errorf("unable to update node %s to request a certificate rotation", n.ID) } } diff --git a/ca/renewer.go b/ca/renewer.go index 1eacab16df..69ad035204 100644 --- a/ca/renewer.go +++ b/ca/renewer.go @@ -2,13 +2,13 @@ package ca import ( "context" + "errors" "sync" "time" "github.com/docker/go-events" "github.com/moby/swarmkit/v2/connectionbroker" "github.com/moby/swarmkit/v2/log" - "github.com/pkg/errors" ) // RenewTLSExponentialBackoff sets the exponential backoff when trying to renew TLS certificates that have expired diff --git a/ca/server.go b/ca/server.go index cfb035313d..abe5e8cbd3 100644 --- a/ca/server.go +++ b/ca/server.go @@ -5,6 +5,8 @@ import ( "context" "crypto/subtle" "crypto/x509" + "errors" + "fmt" "sync" "time" @@ -14,7 +16,6 @@ import ( "github.com/moby/swarmkit/v2/identity" "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/manager/state/store" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -332,7 +333,7 @@ func (s *Server) IssueNodeCertificate(ctx context.Context, request *api.IssueNod }).Debugf("new certificate entry added") break } - if err != store.ErrExist { + if !errors.Is(err, store.ErrExist) { return nil, err } if i == maxRetries { @@ -695,7 +696,7 @@ func (s *Server) UpdateRootCA(ctx context.Context, cluster *api.Cluster, reconci // Attempt to update our local RootCA with the new parameters updatedRootCA, err := RootCAFromAPI(rCA, expiry) if err != nil { - return errors.Wrap(err, "invalid Root CA object in cluster") + return fmt.Errorf("invalid Root CA object in cluster: %w", err) } s.localRootCA = &updatedRootCA @@ -791,7 +792,7 @@ func (s *Server) signNodeCert(ctx context.Context, node *api.Node) error { // Try using the external CA first. cert, err := externalCA.Sign(ctx, PrepareCSR(rawCSR, cn, ou, org)) - if err == ErrNoExternalCAURLs { + if errors.Is(err, ErrNoExternalCAURLs) { // No external CA servers configured. Try using the local CA. cert, err = rootCA.ParseValidateAndSignCSR(rawCSR, cn, ou, org) } @@ -808,7 +809,8 @@ func (s *Server) signNodeCert(ctx context.Context, node *api.Node) error { return errors.New("failed to sign CSR") } - if _, ok := err.(recoverableErr); ok { + var recoverableErr recoverableErr + if errors.As(err, &recoverableErr) { // Return without changing the state of the certificate. We may // retry signing it in the future. return errors.New("failed to sign CSR") @@ -818,7 +820,7 @@ func (s *Server) signNodeCert(ctx context.Context, node *api.Node) error { err = s.store.Update(func(tx store.Tx) error { node := store.GetNode(tx, nodeID) if node == nil { - return errors.Errorf("node %s not found", nodeID) + return fmt.Errorf("node %s not found", nodeID) } node.Certificate.Status = api.IssuanceStatus{ @@ -851,7 +853,7 @@ func (s *Server) signNodeCert(ctx context.Context, node *api.Node) error { if err != nil { node = store.GetNode(tx, nodeID) if node == nil { - err = errors.Errorf("node %s does not exist", nodeID) + err = fmt.Errorf("node %s does not exist", nodeID) } } return err @@ -865,7 +867,7 @@ func (s *Server) signNodeCert(ctx context.Context, node *api.Node) error { delete(s.pending, node.ID) break } - if err == store.ErrSequenceConflict { + if errors.Is(err, store.ErrSequenceConflict) { continue } diff --git a/ca/server_test.go b/ca/server_test.go index 01a4ed71bd..9881e7833e 100644 --- a/ca/server_test.go +++ b/ca/server_test.go @@ -5,6 +5,7 @@ import ( "context" "crypto/tls" "crypto/x509" + "errors" "fmt" "path/filepath" "reflect" @@ -20,7 +21,6 @@ import ( "github.com/moby/swarmkit/v2/manager/state/store" "github.com/moby/swarmkit/v2/testutils" "github.com/opencontainers/go-digest" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" "google.golang.org/grpc/codes" @@ -478,7 +478,7 @@ func TestServerExternalCAGetsTLSKeypairUpdates(t *testing.T) { } return nil }, 2*time.Second)) - require.Contains(t, errors.Cause(err).Error(), "remote error: tls: bad certificate") + require.ErrorContains(t, err, "remote error: tls: bad certificate") } func TestCAServerUpdateRootCA(t *testing.T) { @@ -652,7 +652,7 @@ func (r *rootRotationTester) convergeRootCA(wantRootCA *api.RootCA, descr string require.NoError(r.t, r.tc.MemoryStore.Update(func(tx store.Tx) error { clusters, err := store.FindClusters(tx, store.All) if err != nil || len(clusters) != 1 { - return errors.Wrap(err, "unable to find cluster") + return fmt.Errorf("unable to find cluster: %w", err) } clusters[0].RootCA = *wantRootCA return store.UpdateCluster(tx, clusters[0]) @@ -1264,7 +1264,7 @@ func TestRootRotationReconciliationRace(t *testing.T) { return err } if !bytes.Equal(s.Key, rotationKey) { - return errors.Errorf("server %d's root CAs hasn't been updated yet", i) + return fmt.Errorf("server %d's root CAs hasn't been updated yet", i) } } return nil diff --git a/ca/testutils/externalutils.go b/ca/testutils/externalutils.go index 3d63f8dda7..e93355a18a 100644 --- a/ca/testutils/externalutils.go +++ b/ca/testutils/externalutils.go @@ -17,7 +17,6 @@ import ( cfsslerrors "github.com/cloudflare/cfssl/errors" "github.com/cloudflare/cfssl/signer" "github.com/moby/swarmkit/v2/ca" - "github.com/pkg/errors" ) var crossSignPolicy = config.SigningProfile{ @@ -56,7 +55,7 @@ func NewExternalSigningServer(rootCA ca.RootCA, basedir string) (*ExternalSignin } serverCert, _, err := rootCA.IssueAndSaveNewCertificates(ca.NewKeyReadWriter(serverPaths, nil, nil), serverCN, serverOU, "") if err != nil { - return nil, errors.Wrap(err, "unable to get TLS server certificate") + return nil, fmt.Errorf("unable to get TLS server certificate: %w", err) } serverTLSConfig := &tls.Config{ @@ -67,7 +66,7 @@ func NewExternalSigningServer(rootCA ca.RootCA, basedir string) (*ExternalSignin tlsListener, err := tls.Listen("tcp", "localhost:0", serverTLSConfig) if err != nil { - return nil, errors.Wrap(err, "unable to create TLS connection listener") + return nil, fmt.Errorf("unable to create TLS connection listener: %w", err) } assignedPort := tlsListener.Addr().(*net.TCPAddr).Port diff --git a/ca/transport.go b/ca/transport.go index 69c4379b36..dc0a17f587 100644 --- a/ca/transport.go +++ b/ca/transport.go @@ -5,11 +5,11 @@ import ( "crypto/tls" "crypto/x509" "crypto/x509/pkix" + "errors" "net" "strings" "sync" - "github.com/pkg/errors" "google.golang.org/grpc/credentials" ) diff --git a/design/store.md b/design/store.md index 91fffd8e15..d2c121bbb4 100644 --- a/design/store.md +++ b/design/store.md @@ -105,10 +105,10 @@ Here is an example of a batch operation: node.Status.Message = `Node moved to "unknown" state due to leadership change in cluster` if err := d.nodes.AddUnknown(node, expireFunc); err != nil { - return errors.Wrap(err, `adding node in "unknown" state to node store failed`) + return fmt.Errorf(err, `adding node in "unknown" state to node store failed`) } if err := store.UpdateNode(tx, node); err != nil { - return errors.Wrap(err, "update failed") + return fmt.Errorf(err, "update failed") } return nil }) diff --git a/go.mod b/go.mod index 7c069b65c2..33152e3c57 100644 --- a/go.mod +++ b/go.mod @@ -20,7 +20,6 @@ require ( github.com/onsi/gomega v1.27.6 github.com/opencontainers/go-digest v1.0.0 github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee - github.com/pkg/errors v0.9.1 github.com/rcrowley/go-metrics v0.0.0-20181016184325-3113b8401b8a github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 diff --git a/identity/randomid.go b/identity/randomid.go index 0eb13527aa..a26e7aab6d 100644 --- a/identity/randomid.go +++ b/identity/randomid.go @@ -45,7 +45,7 @@ func NewID() string { var p [randomIDEntropyBytes]byte if _, err := io.ReadFull(idReader, p[:]); err != nil { - panic(fmt.Errorf("failed to read random bytes: %v", err)) + panic(fmt.Errorf("failed to read random bytes: %w", err)) } p[0] |= 0x80 // set high bit to avoid the need for padding diff --git a/integration/integration_test.go b/integration/integration_test.go index 9ee3943a6d..f04eed311e 100644 --- a/integration/integration_test.go +++ b/integration/integration_test.go @@ -3,6 +3,7 @@ package integration import ( "bytes" "context" + "errors" "flag" "fmt" "os" @@ -23,7 +24,6 @@ import ( "github.com/moby/swarmkit/v2/manager" "github.com/moby/swarmkit/v2/node" "github.com/moby/swarmkit/v2/testutils" - "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -538,7 +538,7 @@ func TestRestartLeader(t *testing.T) { } require.False(t, node.Status.State == api.NodeStatus_DOWN, "nodes shouldn't go to down") if node.Status.State != api.NodeStatus_READY { - return errors.Errorf("node %s is still not ready", node.ID) + return fmt.Errorf("node %s is still not ready", node.ID) } } return nil diff --git a/manager/allocator/network.go b/manager/allocator/network.go index 141e86e720..87090456b6 100644 --- a/manager/allocator/network.go +++ b/manager/allocator/network.go @@ -2,6 +2,7 @@ package allocator import ( "context" + "errors" "fmt" "time" @@ -12,7 +13,6 @@ import ( "github.com/moby/swarmkit/v2/manager/state" "github.com/moby/swarmkit/v2/manager/state/store" "github.com/moby/swarmkit/v2/protobuf/ptypes" - "github.com/pkg/errors" ) const ( @@ -92,8 +92,8 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) { // allocated, before reading all network objects for allocation. // If not found, it means it was removed by user, nothing to do here. ingressNetwork, err := GetIngressNetwork(a.store) - switch err { - case nil: + switch { + case err == nil: // Try to complete ingress network allocation before anything else so // that the we can get the preferred subnet for ingress network. nc.ingressNetwork = ingressNetwork @@ -109,11 +109,9 @@ func (a *Allocator) doNetworkInit(ctx context.Context) (err error) { log.G(ctx).WithError(err).Error("failed committing allocation of ingress network during init") } } - case ErrNoIngress: - // Ingress network is not present in store, It means user removed it - // and did not create a new one. + case errors.Is(err, ErrNoIngress): default: - return errors.Wrap(err, "failure while looking for ingress network during init") + return fmt.Errorf("failure while looking for ingress network during init: %w", err) } // First, allocate (read it as restore) objects likes network,nodes,serives @@ -369,7 +367,7 @@ func (a *Allocator) getAllocatedNetworks() ([]*api.Network, error) { }) if err != nil { - return nil, errors.Wrap(err, "error listing all networks in store while trying to allocate during init") + return nil, fmt.Errorf("error listing all networks in store while trying to allocate during init: %w", err) } for _, n := range networks { @@ -457,13 +455,13 @@ func (a *Allocator) allocateNodes(ctx context.Context, existingAddressesOnly boo nodes, err = store.FindNodes(tx, store.All) }) if err != nil { - return errors.Wrap(err, "error listing all nodes in store while trying to allocate network resources") + return fmt.Errorf("error listing all nodes in store while trying to allocate network resources: %w", err) } for _, node := range nodes { networks, err := a.getNodeNetworks(node.ID) if err != nil { - return errors.Wrap(err, "error getting all networks needed by node") + return fmt.Errorf("error getting all networks needed by node: %w", err) } isAllocated := a.allocateNode(ctx, node, existingAddressesOnly, networks) if isAllocated { @@ -497,7 +495,7 @@ func (a *Allocator) deallocateNodes(ctx context.Context) error { nodes, err = store.FindNodes(tx, store.All) }) if err != nil { - return fmt.Errorf("error listing all nodes in store while trying to free network resources") + return errors.New("error listing all nodes in store while trying to free network resources") } for _, node := range nodes { @@ -527,7 +525,7 @@ func (a *Allocator) deallocateNodeAttachments(ctx context.Context, nid string) e nodes, err = store.FindNodes(tx, store.All) }) if err != nil { - return fmt.Errorf("error listing all nodes in store while trying to free network resources") + return errors.New("error listing all nodes in store while trying to free network resources") } for _, node := range nodes { @@ -601,7 +599,7 @@ func (a *Allocator) allocateNetworks(ctx context.Context, existingOnly bool) err networks, err = store.FindNetworks(tx, store.All) }) if err != nil { - return errors.Wrap(err, "error listing all networks in store while trying to allocate during init") + return fmt.Errorf("error listing all networks in store while trying to allocate during init: %w", err) } var allocatedNetworks []*api.Network @@ -654,7 +652,7 @@ func (a *Allocator) allocateServices(ctx context.Context, existingAddressesOnly services, err = store.FindServices(tx, store.All) }) if err != nil { - return errors.Wrap(err, "error listing all services in store while trying to allocate during init") + return fmt.Errorf("error listing all services in store while trying to allocate during init: %w", err) } var allocatedServices []*api.Service @@ -720,7 +718,7 @@ func (a *Allocator) allocateTasks(ctx context.Context, existingAddressesOnly boo tasks, err = store.FindTasks(tx, store.All) }) if err != nil { - return errors.Wrap(err, "error listing all tasks in store while trying to allocate during init") + return fmt.Errorf("error listing all tasks in store while trying to allocate during init: %w", err) } logger := log.G(ctx).WithField("method", "(*Allocator).allocateTasks") @@ -777,7 +775,7 @@ func (a *Allocator) allocateTasks(ctx context.Context, existingAddressesOnly boo err := a.allocateTask(ctx, t) if err == nil { allocatedTasks = append(allocatedTasks, t) - } else if err != errNoChanges { + } else if !errors.Is(err, errNoChanges) { logger.WithError(err).Errorf("failed allocating task %s during init", t.ID) nc.unallocatedTasks[t.ID] = t } @@ -1084,19 +1082,19 @@ func (a *Allocator) reallocateNode(ctx context.Context, nodeID string) error { node = store.GetNode(tx, nodeID) }) if node == nil { - return errors.Errorf("node %v cannot be found", nodeID) + return fmt.Errorf("node %v cannot be found", nodeID) } networks, err := a.getNodeNetworks(node.ID) if err != nil { - return errors.Wrapf(err, "error getting networks for node %v", nodeID) + return fmt.Errorf("error getting networks for node %v: %w", nodeID, err) } if a.allocateNode(ctx, node, false, networks) { // if something was allocated, commit the node if err := a.store.Batch(func(batch *store.Batch) error { return a.commitAllocatedNode(ctx, batch, node) }); err != nil { - return errors.Wrapf(err, "error committing allocation for node %v", nodeID) + return fmt.Errorf("error committing allocation for node %v: %w", nodeID, err) } } return nil @@ -1106,13 +1104,13 @@ func (a *Allocator) commitAllocatedNode(ctx context.Context, batch *store.Batch, if err := batch.Update(func(tx store.Tx) error { err := store.UpdateNode(tx, node) - if err == store.ErrSequenceConflict { + if errors.Is(err, store.ErrSequenceConflict) { storeNode := store.GetNode(tx, node.ID) storeNode.Attachments = node.Attachments err = store.UpdateNode(tx, storeNode) } - return errors.Wrapf(err, "failed updating state in store transaction for node %s", node.ID) + return fmt.Errorf("failed updating state in store transaction for node %s: %w", node.ID, err) }); err != nil { if err := a.deallocateNode(node); err != nil { log.G(ctx).WithError(err).Errorf("failed rolling back allocation of node %s", node.ID) @@ -1175,7 +1173,7 @@ func (a *Allocator) allocateService(ctx context.Context, s *api.Service, existin // network only if it is not already done. if IsIngressNetworkNeeded(s) { if nc.ingressNetwork == nil { - return fmt.Errorf("ingress network is missing") + return errors.New("ingress network is missing") } var found bool for _, vip := range s.Endpoint.VirtualIPs { @@ -1248,13 +1246,13 @@ func (a *Allocator) commitAllocatedService(ctx context.Context, batch *store.Bat if err := batch.Update(func(tx store.Tx) error { err := store.UpdateService(tx, s) - if err == store.ErrSequenceConflict { + if errors.Is(err, store.ErrSequenceConflict) { storeService := store.GetService(tx, s.ID) storeService.Endpoint = s.Endpoint err = store.UpdateService(tx, storeService) } - return errors.Wrapf(err, "failed updating state in store transaction for service %s", s.ID) + return fmt.Errorf("failed updating state in store transaction for service %s: %w", s.ID, err) }); err != nil { if err := a.netCtx.deallocateService(s); err != nil { log.G(ctx).WithError(err).Errorf("failed rolling back allocation of service %s", s.ID) @@ -1280,7 +1278,7 @@ func (a *Allocator) allocateNetwork(ctx context.Context, n *api.Network) error { func (a *Allocator) commitAllocatedNetwork(ctx context.Context, batch *store.Batch, n *api.Network) error { if err := batch.Update(func(tx store.Tx) error { if err := store.UpdateNetwork(tx, n); err != nil { - return errors.Wrapf(err, "failed updating state in store transaction for network %s", n.ID) + return fmt.Errorf("failed updating state in store transaction for network %s: %w", n.ID, err) } return nil }); err != nil { @@ -1375,7 +1373,7 @@ func (a *Allocator) commitAllocatedTask(ctx context.Context, batch *store.Batch, retError := batch.Update(func(tx store.Tx) error { err := store.UpdateTask(tx, t) - if err == store.ErrSequenceConflict { + if errors.Is(err, store.ErrSequenceConflict) { storeTask := store.GetTask(tx, t.ID) taskUpdateNetworks(storeTask, t.Networks) taskUpdateEndpoint(storeTask, t.Endpoint) @@ -1385,7 +1383,7 @@ func (a *Allocator) commitAllocatedTask(ctx context.Context, batch *store.Batch, err = store.UpdateTask(tx, storeTask) } - return errors.Wrapf(err, "failed updating state in store transaction for task %s", t.ID) + return fmt.Errorf("failed updating state in store transaction for task %s: %w", t.ID, err) }) if retError == nil { @@ -1487,7 +1485,7 @@ func (a *Allocator) procTasksNetwork(ctx context.Context, onRetry bool) { if err := a.allocateTask(ctx, t); err == nil { allocatedTasks = append(allocatedTasks, t) - } else if err != errNoChanges { + } else if !errors.Is(err, errNoChanges) { if quiet { log.G(ctx).WithError(err).Debug("task allocation failure") } else { diff --git a/manager/controlapi/config.go b/manager/controlapi/config.go index 98ac006577..9c48eebee0 100644 --- a/manager/controlapi/config.go +++ b/manager/controlapi/config.go @@ -3,6 +3,7 @@ package controlapi import ( "bytes" "context" + "errors" "strings" "github.com/moby/swarmkit/v2/api" @@ -161,15 +162,11 @@ func (s *Server) CreateConfig(ctx context.Context, request *api.CreateConfigRequ return store.CreateConfig(tx, config) }) - switch err { - case store.ErrNameConflict: + switch { + case errors.Is(err, store.ErrNameConflict): return nil, status.Errorf(codes.AlreadyExists, "config %s already exists", request.Spec.Annotations.Name) - case nil: - log.G(ctx).WithFields(log.Fields{ - "config.Name": request.Spec.Annotations.Name, - "method": "CreateConfig", - }).Debugf("config created") - + case err == nil: + log.G(ctx).WithFields(log.Fields{"config.Name": request.Spec.Annotations.Name, "method": "CreateConfig"}).Debugf("config created") return &api.CreateConfigResponse{Config: config}, nil default: return nil, err @@ -217,15 +214,11 @@ func (s *Server) RemoveConfig(ctx context.Context, request *api.RemoveConfigRequ return store.DeleteConfig(tx, request.ConfigID) }) - switch err { - case store.ErrNotExist: + switch { + case errors.Is(err, store.ErrNotExist): return nil, status.Errorf(codes.NotFound, "config %s not found", request.ConfigID) - case nil: - log.G(ctx).WithFields(log.Fields{ - "config.ID": request.ConfigID, - "method": "RemoveConfig", - }).Debugf("config removed") - + case err == nil: + log.G(ctx).WithFields(log.Fields{"config.ID": request.ConfigID, "method": "RemoveConfig"}).Debugf("config removed") return &api.RemoveConfigResponse{}, nil default: return nil, err diff --git a/manager/controlapi/extension.go b/manager/controlapi/extension.go index b2f2ef8d03..2495a69731 100644 --- a/manager/controlapi/extension.go +++ b/manager/controlapi/extension.go @@ -2,6 +2,7 @@ package controlapi import ( "context" + "errors" "strings" "github.com/moby/swarmkit/v2/api" @@ -32,15 +33,11 @@ func (s *Server) CreateExtension(ctx context.Context, request *api.CreateExtensi return store.CreateExtension(tx, extension) }) - switch err { - case store.ErrNameConflict: + switch { + case errors.Is(err, store.ErrNameConflict): return nil, status.Errorf(codes.AlreadyExists, "extension %s already exists", request.Annotations.Name) - case nil: - log.G(ctx).WithFields(log.Fields{ - "extension.Name": request.Annotations.Name, - "method": "CreateExtension", - }).Debugf("extension created") - + case err == nil: + log.G(ctx).WithFields(log.Fields{"extension.Name": request.Annotations.Name, "method": "CreateExtension"}).Debugf("extension created") return &api.CreateExtensionResponse{Extension: extension}, nil default: return nil, status.Errorf(codes.Internal, "could not create extension: %v", err.Error()) @@ -116,15 +113,11 @@ func (s *Server) RemoveExtension(ctx context.Context, request *api.RemoveExtensi return store.DeleteExtension(tx, request.ExtensionID) }) - switch err { - case store.ErrNotExist: + switch { + case errors.Is(err, store.ErrNotExist): return nil, status.Errorf(codes.NotFound, "extension %s not found", request.ExtensionID) - case nil: - log.G(ctx).WithFields(log.Fields{ - "extension.ID": request.ExtensionID, - "method": "RemoveExtension", - }).Debugf("extension removed") - + case err == nil: + log.G(ctx).WithFields(log.Fields{"extension.ID": request.ExtensionID, "method": "RemoveExtension"}).Debugf("extension removed") return &api.RemoveExtensionResponse{}, nil default: return nil, err diff --git a/manager/controlapi/network.go b/manager/controlapi/network.go index 1c7fb7520c..0e465b7f2c 100644 --- a/manager/controlapi/network.go +++ b/manager/controlapi/network.go @@ -2,6 +2,7 @@ package controlapi import ( "context" + "errors" "net" "github.com/moby/swarmkit/v2/api" @@ -118,7 +119,7 @@ func (s *Server) CreateNetwork(ctx context.Context, request *api.CreateNetworkRe if request.Spec.Ingress { if n, err := allocator.GetIngressNetwork(s.store); err == nil { return status.Errorf(codes.AlreadyExists, "ingress network (%s) is already present", n.ID) - } else if err != allocator.ErrNoIngress { + } else if !errors.Is(err, allocator.ErrNoIngress) { return status.Errorf(codes.Internal, "failed ingress network presence check: %v", err) } } @@ -184,7 +185,7 @@ func (s *Server) RemoveNetwork(ctx context.Context, request *api.RemoveNetworkRe } if err := rm(n.ID); err != nil { - if err == store.ErrNotExist { + if errors.Is(err, store.ErrNotExist) { return nil, status.Errorf(codes.NotFound, "network %s not found", request.NetworkID) } return nil, err diff --git a/manager/controlapi/resource.go b/manager/controlapi/resource.go index fa1113210d..d56424fd82 100644 --- a/manager/controlapi/resource.go +++ b/manager/controlapi/resource.go @@ -2,6 +2,7 @@ package controlapi import ( "context" + "errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" @@ -39,20 +40,13 @@ func (s *Server) CreateResource(ctx context.Context, request *api.CreateResource return store.CreateResource(tx, r) }) - switch err { - case store.ErrNoKind: + switch { + case errors.Is(err, store.ErrNoKind): return nil, status.Errorf(codes.InvalidArgument, "Kind %v is not registered", r.Kind) - case store.ErrNameConflict: - return nil, status.Errorf( - codes.AlreadyExists, - "A resource with name %v already exists", - r.Annotations.Name, - ) - case nil: - log.G(ctx).WithFields(log.Fields{ - "resource.Name": r.Annotations.Name, - "method": "CreateResource", - }).Debugf("resource created") + case errors.Is(err, store.ErrNameConflict): + return nil, status.Errorf(codes.AlreadyExists, "A resource with name %v already exists", r.Annotations.Name) + case err == nil: + log.G(ctx).WithFields(log.Fields{"resource.Name": r.Annotations.Name, "method": "CreateResource"}).Debugf("resource created") return &api.CreateResourceResponse{Resource: r}, nil default: return nil, err @@ -91,10 +85,10 @@ func (s *Server) RemoveResource(ctx context.Context, request *api.RemoveResource err := s.store.Update(func(tx store.Tx) error { return store.DeleteResource(tx, request.ResourceID) }) - switch err { - case store.ErrNotExist: + switch { + case errors.Is(err, store.ErrNotExist): return nil, status.Errorf(codes.NotFound, "resource %s not found", request.ResourceID) - case nil: + case err == nil: return &api.RemoveResourceResponse{}, nil default: return nil, err @@ -212,13 +206,11 @@ func (s *Server) UpdateResource(ctx context.Context, request *api.UpdateResource return store.UpdateResource(tx, r) }) - switch err { - case store.ErrSequenceConflict: + switch { + case errors.Is(err, store.ErrSequenceConflict): return nil, status.Errorf(codes.InvalidArgument, "update out of sequence") - case nil: - return &api.UpdateResourceResponse{ - Resource: r, - }, nil + case err == nil: + return &api.UpdateResourceResponse{Resource: r}, nil default: return nil, err } diff --git a/manager/controlapi/secret.go b/manager/controlapi/secret.go index 835947a6b8..25ce69fe92 100644 --- a/manager/controlapi/secret.go +++ b/manager/controlapi/secret.go @@ -3,6 +3,7 @@ package controlapi import ( "context" "crypto/subtle" + "errors" "strings" "github.com/moby/swarmkit/v2/api" @@ -168,16 +169,12 @@ func (s *Server) CreateSecret(ctx context.Context, request *api.CreateSecretRequ return store.CreateSecret(tx, secret) }) - switch err { - case store.ErrNameConflict: + switch { + case errors.Is(err, store.ErrNameConflict): return nil, status.Errorf(codes.AlreadyExists, "secret %s already exists", request.Spec.Annotations.Name) - case nil: + case err == nil: secret.Spec.Data = nil // clean the actual secret data so it's never returned - log.G(ctx).WithFields(log.Fields{ - "secret.Name": request.Spec.Annotations.Name, - "method": "CreateSecret", - }).Debugf("secret created") - + log.G(ctx).WithFields(log.Fields{"secret.Name": request.Spec.Annotations.Name, "method": "CreateSecret"}).Debugf("secret created") return &api.CreateSecretResponse{Secret: secret}, nil default: return nil, err @@ -225,15 +222,11 @@ func (s *Server) RemoveSecret(ctx context.Context, request *api.RemoveSecretRequ return store.DeleteSecret(tx, request.SecretID) }) - switch err { - case store.ErrNotExist: + switch { + case errors.Is(err, store.ErrNotExist): return nil, status.Errorf(codes.NotFound, "secret %s not found", request.SecretID) - case nil: - log.G(ctx).WithFields(log.Fields{ - "secret.ID": request.SecretID, - "method": "RemoveSecret", - }).Debugf("secret removed") - + case err == nil: + log.G(ctx).WithFields(log.Fields{"secret.ID": request.SecretID, "method": "RemoveSecret"}).Debugf("secret removed") return &api.RemoveSecretResponse{}, nil default: return nil, err diff --git a/manager/controlapi/service.go b/manager/controlapi/service.go index 3c9ce212d0..9c86261dc5 100644 --- a/manager/controlapi/service.go +++ b/manager/controlapi/service.go @@ -752,7 +752,7 @@ func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRe } if allocator.IsIngressNetworkNeeded(service) { - if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress { + if _, err := allocator.GetIngressNetwork(s.store); errors.Is(err, allocator.ErrNoIngress) { return nil, status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present") } } @@ -771,13 +771,10 @@ func (s *Server) CreateService(ctx context.Context, request *api.CreateServiceRe return store.CreateService(tx, service) }) - switch err { - case store.ErrNameConflict: - // Enhance the name-confict error to include the service name. The original - // `ErrNameConflict` error-message is included for backward-compatibility - // with older consumers of the API performing string-matching. + switch { + case errors.Is(err, store.ErrNameConflict): return nil, status.Errorf(codes.AlreadyExists, "%s: service %s already exists", err.Error(), request.Spec.Annotations.Name) - case nil: + case err == nil: return &api.CreateServiceResponse{Service: service}, nil default: return nil, err @@ -920,7 +917,7 @@ func (s *Server) UpdateService(ctx context.Context, request *api.UpdateServiceRe } if allocator.IsIngressNetworkNeeded(service) { - if _, err := allocator.GetIngressNetwork(s.store); err == allocator.ErrNoIngress { + if _, err := allocator.GetIngressNetwork(s.store); errors.Is(err, allocator.ErrNoIngress) { return status.Errorf(codes.FailedPrecondition, "service needs ingress network, but no ingress network is present") } } @@ -949,7 +946,7 @@ func (s *Server) RemoveService(ctx context.Context, request *api.RemoveServiceRe return store.DeleteService(tx, request.ServiceID) }) if err != nil { - if err == store.ErrNotExist { + if errors.Is(err, store.ErrNotExist) { return nil, status.Errorf(codes.NotFound, "service %s not found", request.ServiceID) } return nil, err @@ -998,8 +995,8 @@ func (s *Server) ListServices(ctx context.Context, request *api.ListServicesRequ } }) if err != nil { - switch err { - case store.ErrInvalidFindBy: + switch { + case errors.Is(err, store.ErrInvalidFindBy): return nil, status.Errorf(codes.InvalidArgument, err.Error()) default: return nil, err diff --git a/manager/controlapi/task.go b/manager/controlapi/task.go index 00dcc3dcb0..b925687ccb 100644 --- a/manager/controlapi/task.go +++ b/manager/controlapi/task.go @@ -2,6 +2,7 @@ package controlapi import ( "context" + "errors" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/api/naming" @@ -44,7 +45,7 @@ func (s *Server) RemoveTask(ctx context.Context, request *api.RemoveTaskRequest) return store.DeleteTask(tx, request.TaskID) }) if err != nil { - if err == store.ErrNotExist { + if errors.Is(err, store.ErrNotExist) { return nil, status.Errorf(codes.NotFound, "task %s not found", request.TaskID) } return nil, err diff --git a/manager/deks.go b/manager/deks.go index 3249f61b55..0a54d64e43 100644 --- a/manager/deks.go +++ b/manager/deks.go @@ -3,7 +3,7 @@ package manager import ( "crypto/subtle" "encoding/base64" - "fmt" + "errors" "github.com/moby/swarmkit/v2/ca" "github.com/moby/swarmkit/v2/manager/encryption" @@ -62,7 +62,7 @@ func (r RaftDEKData) UnmarshalHeaders(headers map[string]string, kekData ca.KEKD } if pendingDEK != nil && currentDEK == nil { - return nil, fmt.Errorf("there is a pending DEK, but no current DEK") + return nil, errors.New("there is a pending DEK, but no current DEK") } _, ok := headers[pemHeaderRaftDEKNeedsRotation] @@ -120,7 +120,7 @@ func compareKEKs(oldKEK, candidateKEK ca.KEKData) (bool, bool, error) { keksEqual := subtle.ConstantTimeCompare(oldKEK.KEK, candidateKEK.KEK) == 1 switch { case oldKEK.Version == candidateKEK.Version && !keksEqual: - return false, false, fmt.Errorf("candidate KEK has the same version as the current KEK, but a different KEK value") + return false, false, errors.New("candidate KEK has the same version as the current KEK, but a different KEK value") case oldKEK.Version >= candidateKEK.Version || keksEqual: return false, false, nil default: @@ -136,11 +136,11 @@ type RaftDEKManager struct { FIPS bool } -var errNoUpdateNeeded = fmt.Errorf("don't need to rotate or update") +var errNoUpdateNeeded = errors.New("don't need to rotate or update") // this error is returned if the KeyReadWriter's PEMKeyHeaders object is no longer a RaftDEKData object - // this can happen if the node is no longer a manager, for example -var errNotUsingRaftDEKData = fmt.Errorf("RaftDEKManager can no longer store and manage TLS key headers") +var errNotUsingRaftDEKData = errors.New("RaftDEKManager can no longer store and manage TLS key headers") // NewRaftDEKManager returns a RaftDEKManager that uses the current key writer // and header manager @@ -159,7 +159,7 @@ func NewRaftDEKManager(kw ca.KeyWriter, fips bool) (*RaftDEKManager, error) { } return nil, errNoUpdateNeeded }) - if err != nil && err != errNoUpdateNeeded { + if err != nil && !errors.Is(err, errNoUpdateNeeded) { return nil, err } return &RaftDEKManager{ @@ -259,7 +259,7 @@ func (r *RaftDEKManager) MaybeUpdateKEK(candidateKEK ca.KEKData) (bool, bool, er } return candidateKEK, data, nil }) - if err == errNoUpdateNeeded { + if errors.Is(err, errNoUpdateNeeded) { err = nil } diff --git a/manager/deks_test.go b/manager/deks_test.go index 7f25010bfa..9247e07cf1 100644 --- a/manager/deks_test.go +++ b/manager/deks_test.go @@ -10,7 +10,6 @@ import ( "github.com/moby/swarmkit/v2/ca" cautils "github.com/moby/swarmkit/v2/ca/testutils" "github.com/moby/swarmkit/v2/manager/state/raft" - "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -512,12 +511,14 @@ O0T3aXuZGYNyh//KqAoA3erCmh6HauMz84Y= krw := ca.NewKeyReadWriter(path.Node, wrongKEK, RaftDEKData{}) _, _, err = krw.Read() - require.IsType(t, ca.ErrInvalidKEK{}, errors.Cause(err)) + var eik1 ca.ErrInvalidKEK + require.ErrorAs(t, err, &eik1) krw = ca.NewKeyReadWriter(path.Node, falsePositiveKEK, RaftDEKData{}) _, _, err = krw.Read() require.Error(t, err) - require.IsType(t, ca.ErrInvalidKEK{}, errors.Cause(err)) + var eik2 ca.ErrInvalidKEK + require.ErrorAs(t, err, &eik2) krw = ca.NewKeyReadWriter(path.Node, realKEK, RaftDEKData{}) _, _, err = krw.Read() diff --git a/manager/dispatcher/assignments.go b/manager/dispatcher/assignments.go index 1fdca36f92..3bd483fbc7 100644 --- a/manager/dispatcher/assignments.go +++ b/manager/dispatcher/assignments.go @@ -1,7 +1,7 @@ package dispatcher import ( - "fmt" + "errors" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/api/equality" @@ -446,7 +446,7 @@ func (a *assignmentSet) message() api.AssignmentsMessage { func (a *assignmentSet) secret(readTx store.ReadTx, task *api.Task, secretID string) (*api.Secret, bool, error) { secret := store.GetSecret(readTx, secretID) if secret == nil { - return nil, false, fmt.Errorf("secret not found") + return nil, false, errors.New("secret not found") } if secret.Spec.Driver == nil { return secret, false, nil diff --git a/manager/dispatcher/dispatcher.go b/manager/dispatcher/dispatcher.go index 150a03c3b6..6bd5612300 100644 --- a/manager/dispatcher/dispatcher.go +++ b/manager/dispatcher/dispatcher.go @@ -2,6 +2,7 @@ package dispatcher import ( "context" + "errors" "fmt" "net" "strconv" @@ -20,7 +21,6 @@ import ( "github.com/moby/swarmkit/v2/protobuf/ptypes" "github.com/moby/swarmkit/v2/remotes" "github.com/moby/swarmkit/v2/watch" - "github.com/pkg/errors" "google.golang.org/grpc/codes" "google.golang.org/grpc/status" ) @@ -426,7 +426,7 @@ func (d *Dispatcher) markNodesUnknown(ctx context.Context) error { nodes, err = store.FindNodes(tx, store.All) }) if err != nil { - return errors.Wrap(err, "failed to get list of nodes") + return fmt.Errorf("failed to get list of nodes: %w", err) } err = d.store.Batch(func(batch *store.Batch) error { for _, n := range nodes { @@ -466,10 +466,10 @@ func (d *Dispatcher) markNodesUnknown(ctx context.Context) error { } } if err := d.nodes.AddUnknown(node, expireFunc); err != nil { - return errors.Wrapf(err, `adding node %s in "unknown" state to node store failed`, nodeID) + return fmt.Errorf(`adding node %s in "unknown" state to node store failed: %w`, nodeID, err) } if err := store.UpdateNode(tx, node); err != nil { - return errors.Wrapf(err, "update for node %s failed", nodeID) + return fmt.Errorf("update for node %s failed: %w", nodeID, err) } return nil }) @@ -544,7 +544,7 @@ func nodeIPFromContext(ctx context.Context) (string, error) { } addr, _, err := net.SplitHostPort(nodeInfo.RemoteAddr) if err != nil { - return "", errors.Wrap(err, "unable to get ip from addr:port") + return "", fmt.Errorf("unable to get ip from addr:port: %w", err) } return addr, nil } @@ -1304,7 +1304,7 @@ func (d *Dispatcher) markNodeNotReady(id string, state api.NodeStatus_State, mes } if rn := d.nodes.Delete(id); rn == nil { - return errors.Errorf("node %s is not found in local storage", id) + return fmt.Errorf("node %s is not found in local storage", id) } logLocal.Debugf("deleted node %s from node store", node.ID) diff --git a/manager/drivers/provider.go b/manager/drivers/provider.go index 1dd3fa620b..ea80a7075f 100644 --- a/manager/drivers/provider.go +++ b/manager/drivers/provider.go @@ -1,7 +1,7 @@ package drivers import ( - "fmt" + "errors" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/node/plugin" @@ -20,10 +20,10 @@ func New(pluginGetter plugin.Getter) *DriverProvider { // NewSecretDriver creates a new driver for fetching secrets func (m *DriverProvider) NewSecretDriver(driver *api.Driver) (*SecretDriver, error) { if m.pluginGetter == nil { - return nil, fmt.Errorf("plugin getter is nil") + return nil, errors.New("plugin getter is nil") } if driver == nil || driver.Name == "" { - return nil, fmt.Errorf("driver specification is nil") + return nil, errors.New("driver specification is nil") } // Search for the specified plugin plugin, err := m.pluginGetter.Get(driver.Name, SecretsProviderCapability) diff --git a/manager/drivers/secrets.go b/manager/drivers/secrets.go index 63d6e47c23..84057bf9e6 100644 --- a/manager/drivers/secrets.go +++ b/manager/drivers/secrets.go @@ -1,7 +1,7 @@ package drivers import ( - "fmt" + "errors" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/api/naming" @@ -32,10 +32,10 @@ func NewSecretDriver(plugin plugin.Plugin) *SecretDriver { // the driver returns an error in the payload. func (d *SecretDriver) Get(spec *api.SecretSpec, task *api.Task) ([]byte, bool, error) { if spec == nil { - return nil, false, fmt.Errorf("secret spec is nil") + return nil, false, errors.New("secret spec is nil") } if task == nil { - return nil, false, fmt.Errorf("task is nil") + return nil, false, errors.New("task is nil") } var secretResp SecretsProviderResponse @@ -80,7 +80,7 @@ func (d *SecretDriver) Get(spec *api.SecretSpec, task *api.Task) ([]byte, bool, return nil, false, err } if secretResp.Err != "" { - return nil, secretResp.DoNotReuse, fmt.Errorf(secretResp.Err) + return nil, secretResp.DoNotReuse, errors.New(secretResp.Err) } // Assign the secret value return secretResp.Value, secretResp.DoNotReuse, nil diff --git a/manager/encryption/encryption.go b/manager/encryption/encryption.go index 45d98cf6ef..186a423029 100644 --- a/manager/encryption/encryption.go +++ b/manager/encryption/encryption.go @@ -3,13 +3,13 @@ package encryption import ( cryptorand "crypto/rand" "encoding/base64" + "errors" "fmt" "io" "strings" "github.com/gogo/protobuf/proto" "github.com/moby/swarmkit/v2/api" - "github.com/pkg/errors" ) // This package defines the interfaces and encryption package @@ -39,7 +39,7 @@ type noopCrypter struct{} func (n noopCrypter) Decrypt(e api.MaybeEncryptedRecord) ([]byte, error) { if e.Algorithm != n.Algorithm() { - return nil, fmt.Errorf("record is encrypted") + return nil, errors.New("record is encrypted") } return e.Data, nil } @@ -135,17 +135,17 @@ func Decrypt(encryptd []byte, decrypter Decrypter) ([]byte, error) { // Encrypt turns a slice of bytes into a serialized MaybeEncryptedRecord slice of bytes func Encrypt(plaintext []byte, encrypter Encrypter) ([]byte, error) { if encrypter == nil { - return nil, fmt.Errorf("no encrypter specified") + return nil, errors.New("no encrypter specified") } encryptedRecord, err := encrypter.Encrypt(plaintext) if err != nil { - return nil, errors.Wrap(err, "unable to encrypt data") + return nil, fmt.Errorf("unable to encrypt data: %w", err) } data, err := proto.Marshal(encryptedRecord) if err != nil { - return nil, errors.Wrap(err, "unable to marshal as MaybeEncryptedRecord") + return nil, fmt.Errorf("unable to marshal as MaybeEncryptedRecord: %w", err) } return data, nil @@ -168,7 +168,7 @@ func GenerateSecretKey() []byte { secretData := make([]byte, naclSecretboxKeySize) if _, err := io.ReadFull(cryptorand.Reader, secretData); err != nil { // panic if we can't read random data - panic(errors.Wrap(err, "failed to read random bytes")) + panic(fmt.Errorf("failed to read random bytes: %w", err)) } return secretData } @@ -183,11 +183,11 @@ func HumanReadableKey(key []byte) string { // said keys func ParseHumanReadableKey(key string) ([]byte, error) { if !strings.HasPrefix(key, humanReadablePrefix) { - return nil, fmt.Errorf("invalid key string") + return nil, errors.New("invalid key string") } keyBytes, err := base64.RawStdEncoding.DecodeString(strings.TrimPrefix(key, humanReadablePrefix)) if err != nil { - return nil, fmt.Errorf("invalid key string") + return nil, errors.New("invalid key string") } return keyBytes, nil } diff --git a/manager/encryption/fernet.go b/manager/encryption/fernet.go index fea08d2939..2c69e42cb4 100644 --- a/manager/encryption/fernet.go +++ b/manager/encryption/fernet.go @@ -1,6 +1,7 @@ package encryption import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -41,7 +42,7 @@ func (f Fernet) Encrypt(data []byte) (*api.MaybeEncryptedRecord, error) { // Decrypt decrypts a MaybeEncryptedRecord and returns some bytes func (f Fernet) Decrypt(record api.MaybeEncryptedRecord) ([]byte, error) { if record.Algorithm != f.Algorithm() { - return nil, fmt.Errorf("record is not a Fernet message") + return nil, errors.New("record is not a Fernet message") } // -1 skips the TTL check, since we don't care about message expiry diff --git a/manager/encryption/nacl.go b/manager/encryption/nacl.go index 2928d1294a..fe65d5b82a 100644 --- a/manager/encryption/nacl.go +++ b/manager/encryption/nacl.go @@ -2,6 +2,7 @@ package encryption import ( cryptorand "crypto/rand" + "errors" "fmt" "io" @@ -54,7 +55,7 @@ func (n NACLSecretbox) Encrypt(data []byte) (*api.MaybeEncryptedRecord, error) { // Decrypt decrypts a MaybeEncryptedRecord and returns some bytes func (n NACLSecretbox) Decrypt(record api.MaybeEncryptedRecord) ([]byte, error) { if record.Algorithm != n.Algorithm() { - return nil, fmt.Errorf("not a NACL secretbox record") + return nil, errors.New("not a NACL secretbox record") } if len(record.Nonce) != naclSecretboxNonceSize { return nil, fmt.Errorf("invalid nonce size for NACL secretbox: require 24, got %d", len(record.Nonce)) diff --git a/manager/keymanager/keymanager.go b/manager/keymanager/keymanager.go index cbbea32533..014a0bfebd 100644 --- a/manager/keymanager/keymanager.go +++ b/manager/keymanager/keymanager.go @@ -9,13 +9,14 @@ import ( "context" cryptorand "crypto/rand" "encoding/binary" + "errors" + "fmt" "sync" "time" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/manager/state/store" - "github.com/pkg/errors" ) const ( @@ -97,7 +98,7 @@ func (k *KeyManager) allocateKey(ctx context.Context, subsys string) *api.Encryp _, err := cryptorand.Read(key) if err != nil { - panic(errors.Wrap(err, "key generated failed")) + panic(fmt.Errorf("key generated failed: %w", err)) } k.keyRing.lClock++ diff --git a/manager/logbroker/broker.go b/manager/logbroker/broker.go index 9683fd28bb..e6a06d32bd 100644 --- a/manager/logbroker/broker.go +++ b/manager/logbroker/broker.go @@ -391,7 +391,7 @@ func (lb *LogBroker) PublishLogs(stream api.LogBroker_PublishLogsServer) (err er for { logMsg, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { return stream.SendAndClose(&api.PublishLogsResponse{}) } if err != nil { diff --git a/manager/manager.go b/manager/manager.go index 8e00c57477..3a0089530d 100644 --- a/manager/manager.go +++ b/manager/manager.go @@ -3,6 +3,7 @@ package manager import ( "context" "crypto/tls" + "errors" "fmt" "math" "net" @@ -47,7 +48,6 @@ import ( "github.com/moby/swarmkit/v2/node/plugin" "github.com/moby/swarmkit/v2/remotes" "github.com/moby/swarmkit/v2/xnet" - "github.com/pkg/errors" "google.golang.org/grpc" "google.golang.org/grpc/credentials" ) @@ -213,13 +213,13 @@ func (l *closeOnceListener) Close() error { func New(config *Config) (*Manager, error) { err := os.MkdirAll(config.StateDir, 0o700) if err != nil { - return nil, errors.Wrap(err, "failed to create state directory") + return nil, fmt.Errorf("failed to create state directory: %w", err) } raftStateDir := filepath.Join(config.StateDir, "raft") err = os.MkdirAll(raftStateDir, 0o700) if err != nil { - return nil, errors.Wrap(err, "failed to create raft state directory") + return nil, fmt.Errorf("failed to create raft state directory: %w", err) } raftCfg := raft.DefaultNodeConfig() @@ -341,7 +341,7 @@ func (m *Manager) BindControl(addr string) error { if runtime.GOOS != "windows" { err := os.MkdirAll(filepath.Dir(addr), 0o700) if err != nil { - return errors.Wrap(err, "failed to create socket directory") + return fmt.Errorf("failed to create socket directory: %w", err) } } @@ -351,19 +351,21 @@ func (m *Manager) BindControl(addr string) error { // exists. Try replacing the file. if runtime.GOOS != "windows" { unwrappedErr := err - if op, ok := unwrappedErr.(*net.OpError); ok { + op := &net.OpError{} + if errors.As(unwrappedErr, &op) { unwrappedErr = op.Err } - if sys, ok := unwrappedErr.(*os.SyscallError); ok { + sys := &os.SyscallError{} + if errors.As(unwrappedErr, &sys) { unwrappedErr = sys.Err } - if unwrappedErr == syscall.EADDRINUSE { + if errors.Is(unwrappedErr, syscall.EADDRINUSE) { os.Remove(addr) l, err = xnet.ListenLocal(addr) } } if err != nil { - return errors.Wrap(err, "failed to listen on control API address") + return fmt.Errorf("failed to listen on control API address: %w", err) } m.config.ControlAPI = addr @@ -403,7 +405,7 @@ func (m *Manager) BindRemote(ctx context.Context, addrs RemoteAddrs) error { l, err := net.Listen("tcp", addrs.ListenAddr) if err != nil { - return errors.Wrap(err, "failed to listen on remote API address") + return fmt.Errorf("failed to listen on remote API address: %w", err) } if advertiseAddrPort == "0" { advertiseAddr = l.Addr().String() @@ -582,7 +584,7 @@ func (m *Manager) Run(parent context.Context) error { if err := m.raftNode.JoinAndStart(ctx); err != nil { // Don't block future calls to Stop. close(m.started) - return errors.Wrap(err, "can't initialize raft node") + return fmt.Errorf("can't initialize raft node: %w", err) } localHealthServer.SetServingStatus("ControlAPI", api.HealthCheckResponse_SERVING) @@ -804,7 +806,7 @@ func (m *Manager) watchForClusterChanges(ctx context.Context) error { func(tx store.ReadTx) error { cluster = store.GetCluster(tx, clusterID) if cluster == nil { - return fmt.Errorf("unable to get current cluster") + return errors.New("unable to get current cluster") } return nil }, @@ -844,18 +846,17 @@ func (m *Manager) getLeaderNodeID() string { // the purposes of logging leadership changes, and should not be relied on // for other purposes leader, leaderErr := m.raftNode.Leader() - switch leaderErr { - case raft.ErrNoRaftMember: + switch { + case errors.Is(leaderErr, raft.ErrNoRaftMember): // this is an unlikely case, but we have to handle it. this means this // node is not a member of the raft quorum. this won't look very pretty // in logs ("leadership changed from aslkdjfa to ErrNoRaftMember") but // it also won't be very common return "not yet part of a raft cluster" - case raft.ErrNoClusterLeader: + case errors.Is(leaderErr, raft.ErrNoClusterLeader): return "no cluster leader" default: id, err := m.raftNode.GetNodeIDByRaftID(leader) - // the only possible error here is "ErrMemberUnknown" if err != nil { return "an unknown node" } @@ -979,7 +980,7 @@ func (m *Manager) becomeLeader(ctx context.Context) { } err := store.CreateCluster(tx, clusterObj) - if err != nil && (err != store.ErrExist || err != store.ErrNameConflict) { + if err != nil && (!errors.Is(err, store.ErrExist) || !errors.Is(err, store.ErrNameConflict)) { log.G(ctx).WithError(err).Errorf("error creating cluster object") } @@ -1002,7 +1003,7 @@ func (m *Manager) becomeLeader(ctx context.Context) { // in order to allow running services on the predefined docker // networks like `bridge` and `host`. for _, p := range m.config.networkProvider().PredefinedNetworks() { - if err := store.CreateNetwork(tx, newPredefinedNetwork(p.Name, p.Driver)); err != nil && err != store.ErrNameConflict { + if err := store.CreateNetwork(tx, newPredefinedNetwork(p.Name, p.Driver)); err != nil && !errors.Is(err, store.ErrNameConflict) { log.G(ctx).WithError(err).Error("failed to create predefined network " + p.Name) } } diff --git a/manager/state/raft/raft.go b/manager/state/raft/raft.go index f375c14c2c..7d7da6d4ef 100644 --- a/manager/state/raft/raft.go +++ b/manager/state/raft/raft.go @@ -2,6 +2,7 @@ package raft import ( "context" + "errors" "fmt" "io" "math" @@ -27,7 +28,6 @@ import ( "github.com/moby/swarmkit/v2/manager/state/raft/transport" "github.com/moby/swarmkit/v2/manager/state/store" "github.com/moby/swarmkit/v2/watch" - "github.com/pkg/errors" "github.com/sirupsen/logrus" "go.etcd.io/etcd/pkg/v3/idutil" "go.etcd.io/etcd/raft/v3" @@ -388,7 +388,7 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) { }() loadAndStartErr := n.loadAndStart(ctx, n.opts.ForceNewCluster) - if loadAndStartErr != nil && loadAndStartErr != storage.ErrNoWAL { + if loadAndStartErr != nil && !errors.Is(loadAndStartErr, storage.ErrNoWAL) { return loadAndStartErr } @@ -413,7 +413,7 @@ func (n *Node) JoinAndStart(ctx context.Context) (err error) { if loadAndStartErr == nil { if n.opts.JoinAddr != "" && n.opts.ForceJoin { if err := n.joinCluster(ctx); err != nil { - return errors.Wrap(err, "failed to rejoin cluster") + return fmt.Errorf("failed to rejoin cluster: %w", err) } } n.campaignWhenAble = true @@ -583,7 +583,7 @@ func (n *Node) Run(ctx context.Context) error { // Save entries to storage if err := n.saveToStorage(ctx, &raftConfig, rd.HardState, rd.Entries, rd.Snapshot); err != nil { - return errors.Wrap(err, "failed to save entries to storage") + return fmt.Errorf("failed to save entries to storage: %w", err) } // If the memory store lock has been held for too long, @@ -1060,7 +1060,7 @@ func (n *Node) checkHealth(ctx context.Context, addr string, timeout time.Durati healthClient := api.NewHealthClient(conn) resp, err := healthClient.Check(ctx, &api.HealthCheckRequest{Service: "Raft"}) if err != nil { - return errors.Wrap(err, "could not connect to prospective new cluster member using its advertised address") + return fmt.Errorf("could not connect to prospective new cluster member using its advertised address: %w", err) } if resp.Status != api.HealthCheckResponse_SERVING { return fmt.Errorf("health check returned status %s", resp.Status.String()) @@ -1096,7 +1096,7 @@ func (n *Node) addMember(ctx context.Context, addr string, raftID uint64, nodeID func (n *Node) updateNodeBlocking(ctx context.Context, id uint64, addr string) error { m := n.cluster.GetMember(id) if m == nil { - return errors.Errorf("member %x is not found for update", id) + return fmt.Errorf("member %x is not found for update", id) } node := api.RaftMember{ RaftID: m.RaftID, @@ -1239,7 +1239,7 @@ func (n *Node) TransferLeadership(ctx context.Context) error { transferee, err := n.transport.LongestActive() if err != nil { - return errors.Wrap(err, "failed to get longest-active member") + return fmt.Errorf("failed to get longest-active member: %w", err) } start := time.Now() n.raftNode.TransferLeadership(ctx, n.Config.ID, transferee) @@ -1342,7 +1342,7 @@ func (n *Node) StreamRaftMessage(stream api.Raft_StreamRaftMessageServer) error for { recvdMsg, err = stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { break } else if err != nil { log.G(stream.Context()).WithError(err).Error("error while reading from stream") @@ -1383,7 +1383,7 @@ func (n *Node) StreamRaftMessage(stream api.Raft_StreamRaftMessageServer) error } // We should have the complete snapshot. Verify and process. - if err == io.EOF { + if errors.Is(err, io.EOF) { _, err = n.ProcessRaftMessage(stream.Context(), &api.ProcessRaftMessageRequest{Message: assembledMessage.Message}) if err == nil { // Translate the response of ProcessRaftMessage() from @@ -1502,7 +1502,7 @@ func (n *Node) getLeaderConn() (*grpc.ClientConn, error) { } conn, err := n.transport.PeerConn(leader) if err != nil { - return nil, errors.Wrap(err, "failed to get connection to leader") + return nil, fmt.Errorf("failed to get connection to leader: %w", err) } return conn, nil } @@ -1514,7 +1514,7 @@ func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) { if err == nil { return cc, nil } - if err == raftselector.ErrIsLeader { + if errors.Is(err, raftselector.ErrIsLeader) { return nil, err } if atomic.LoadUint32(&n.ticksWithNoLeader) > lostQuorumTimeout { @@ -1530,7 +1530,7 @@ func (n *Node) LeaderConn(ctx context.Context) (*grpc.ClientConn, error) { if err == nil { return cc, nil } - if err == raftselector.ErrIsLeader { + if errors.Is(err, raftselector.ErrIsLeader) { return nil, err } case <-ctx.Done(): @@ -1579,7 +1579,7 @@ func (n *Node) registerNode(node *api.RaftMember) error { err := n.cluster.AddMember(member) if err != nil { if rerr := n.transport.RemovePeer(node.RaftID); rerr != nil { - return errors.Wrapf(rerr, "failed to remove peer after error %v", err) + return fmt.Errorf("failed to remove peer after error %w: %w", err, rerr) } return err } @@ -1646,7 +1646,7 @@ func (n *Node) ChangesBetween(from, to api.Version) ([]state.Change, error) { r := &api.InternalRaftRequest{} err := proto.Unmarshal(pb.Data, r) if err != nil { - return nil, errors.Wrap(err, "error umarshalling internal raft request") + return nil, fmt.Errorf("error umarshalling internal raft request: %w", err) } if r.Action != nil { @@ -1746,18 +1746,18 @@ func (n *Node) saveToStorage( if !raft.IsEmptySnap(snapshot) { if err := n.raftLogger.SaveSnapshot(snapshot); err != nil { - return errors.Wrap(err, "failed to save snapshot") + return fmt.Errorf("failed to save snapshot: %w", err) } if err := n.raftLogger.GC(snapshot.Metadata.Index, snapshot.Metadata.Term, raftConfig.KeepOldSnapshots); err != nil { log.G(ctx).WithError(err).Error("unable to clean old snapshots and WALs") } if err = n.raftStore.ApplySnapshot(snapshot); err != nil { - return errors.Wrap(err, "failed to apply snapshot on raft node") + return fmt.Errorf("failed to apply snapshot on raft node: %w", err) } } if err := n.raftLogger.SaveEntries(hardState, entries); err != nil { - return errors.Wrap(err, "failed to save raft log entries") + return fmt.Errorf("failed to save raft log entries: %w", err) } if len(entries) > 0 { @@ -1768,7 +1768,7 @@ func (n *Node) saveToStorage( } if err = n.raftStore.Append(entries); err != nil { - return errors.Wrap(err, "failed to append raft log entries") + return fmt.Errorf("failed to append raft log entries: %w", err) } return nil diff --git a/manager/state/raft/storage.go b/manager/state/raft/storage.go index 28f4102ac2..854cc732dd 100644 --- a/manager/state/raft/storage.go +++ b/manager/state/raft/storage.go @@ -2,6 +2,7 @@ package raft import ( "context" + "errors" "fmt" "github.com/docker/go-metrics" @@ -11,7 +12,6 @@ import ( "github.com/moby/swarmkit/v2/manager/state/raft/membership" "github.com/moby/swarmkit/v2/manager/state/raft/storage" "github.com/moby/swarmkit/v2/manager/state/store" - "github.com/pkg/errors" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/raftpb" ) @@ -43,12 +43,13 @@ func (n *Node) readFromDisk(ctx context.Context) (*raftpb.Snapshot, storage.WALD snap, walData, err := n.raftLogger.BootstrapFromDisk(ctx) if keys.PendingDEK != nil { - switch errors.Cause(err).(type) { - case nil: + var ecd encryption.ErrCannotDecrypt + switch { + case err == nil: if err = n.keyRotator.UpdateKeys(EncryptionKeys{CurrentDEK: keys.PendingDEK}); err != nil { - err = errors.Wrap(err, "previous key rotation was successful, but unable mark rotation as complete") + err = fmt.Errorf("previous key rotation was successful, but unable mark rotation as complete: %w", err) } - case encryption.ErrCannotDecrypt: + case errors.As(err, &ecd): snap, walData, err = n.raftLogger.BootstrapFromDisk(ctx, keys.CurrentDEK) } } @@ -69,7 +70,7 @@ func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error { // Read logs to fully catch up store var raftNode api.RaftMember if err := raftNode.Unmarshal(waldata.Metadata); err != nil { - return errors.Wrap(err, "failed to unmarshal WAL metadata") + return fmt.Errorf("failed to unmarshal WAL metadata: %w", err) } n.Config.ID = raftNode.RaftID @@ -106,7 +107,7 @@ func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error { if ent.Index <= st.Commit && ent.Type == raftpb.EntryConfChange { var cc raftpb.ConfChange if err := cc.Unmarshal(ent.Data); err != nil { - return errors.Wrap(err, "failed to unmarshal config change") + return fmt.Errorf("failed to unmarshal config change: %w", err) } if cc.Type == raftpb.ConfChangeRemoveNode { n.cluster.RemoveMember(cc.NodeID) @@ -136,7 +137,7 @@ func (n *Node) loadAndStart(ctx context.Context, forceNewCluster bool) error { if ccEnt.Type == raftpb.EntryConfChange { var cc raftpb.ConfChange if err := cc.Unmarshal(ccEnt.Data); err != nil { - return errors.Wrap(err, "error unmarshalling force-new-cluster config change") + return fmt.Errorf("error unmarshalling force-new-cluster config change: %w", err) } if cc.Type == raftpb.ConfChangeRemoveNode { n.cluster.RemoveMember(cc.NodeID) @@ -174,7 +175,7 @@ func (n *Node) newRaftLogs(nodeID string) (raft.Peer, error) { } metadata, err := raftNode.Marshal() if err != nil { - return raft.Peer{}, errors.Wrap(err, "error marshalling raft node") + return raft.Peer{}, fmt.Errorf("error marshalling raft node: %w", err) } if err := n.raftLogger.BootstrapNew(metadata); err != nil { return raft.Peer{}, err @@ -234,11 +235,11 @@ func (n *Node) triggerSnapshot(ctx context.Context, raftConfig api.RaftConfig) { if appliedIndex > raftConfig.LogEntriesForSlowFollowers { err := n.raftStore.Compact(appliedIndex - raftConfig.LogEntriesForSlowFollowers) - if err != nil && err != raft.ErrCompacted { + if err != nil && !errors.Is(err, raft.ErrCompacted) { log.G(ctx).WithError(err).Error("failed to compact snapshot") } } - } else if err != raft.ErrSnapOutOfDate { + } else if !errors.Is(err, raft.ErrSnapOutOfDate) { log.G(ctx).WithError(err).Error("failed to create snapshot") } }(n.appliedIndex, n.snapshotMeta) diff --git a/manager/state/raft/storage/snapwrap.go b/manager/state/raft/storage/snapwrap.go index 7ae6d595ca..e6eff14103 100644 --- a/manager/state/raft/storage/snapwrap.go +++ b/manager/state/raft/storage/snapwrap.go @@ -1,13 +1,14 @@ package storage import ( + "errors" + "fmt" "os" "path/filepath" "sort" "strings" "github.com/moby/swarmkit/v2/manager/encryption" - "github.com/pkg/errors" "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" ) @@ -107,10 +108,10 @@ func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory) // use temporary snapshot directory so initialization appears atomic oldSnapshotter := oldFactory.New(oldDir) snapshot, err := oldSnapshotter.Load() - switch err { - case snap.ErrNoSnapshot: // if there's no snapshot, the migration succeeded + switch { + case errors.Is(err, snap.ErrNoSnapshot): // if there's no snapshot, the migration succeeded return nil - case nil: + case err == nil: break default: return err @@ -118,10 +119,10 @@ func MigrateSnapshot(oldDir, newDir string, oldFactory, newFactory SnapFactory) tmpdirpath := filepath.Clean(newDir) + ".tmp" if err := os.RemoveAll(tmpdirpath); err != nil { - return errors.Wrap(err, "could not remove temporary snapshot directory") + return fmt.Errorf("could not remove temporary snapshot directory: %w", err) } if err := os.MkdirAll(tmpdirpath, 0o700); err != nil { - return errors.Wrap(err, "could not create temporary snapshot directory") + return fmt.Errorf("could not create temporary snapshot directory: %w", err) } tmpSnapshotter := newFactory.New(tmpdirpath) diff --git a/manager/state/raft/storage/storage.go b/manager/state/raft/storage/storage.go index 8e7476156f..5548e56396 100644 --- a/manager/state/raft/storage/storage.go +++ b/manager/state/raft/storage/storage.go @@ -2,6 +2,7 @@ package storage import ( "context" + "errors" "fmt" "os" "path/filepath" @@ -9,7 +10,6 @@ import ( "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/manager/encryption" - "github.com/pkg/errors" "go.etcd.io/etcd/client/pkg/v3/fileutil" "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/etcdserver/api/snap" @@ -84,7 +84,7 @@ func (e *EncryptedRaftLogger) BootstrapFromDisk(ctx context.Context, oldEncrypti } // ensure the new directory exists if err := os.MkdirAll(snapDir, 0o700); err != nil { - return nil, WALData{}, errors.Wrap(err, "failed to create snapshot directory") + return nil, WALData{}, fmt.Errorf("failed to create snapshot directory: %w", err) } var ( @@ -96,7 +96,7 @@ func (e *EncryptedRaftLogger) BootstrapFromDisk(ctx context.Context, oldEncrypti // Create a snapshotter and load snapshot data snapshotter = snapFactory.New(snapDir) snapshot, err := snapshotter.Load() - if err != nil && err != snap.ErrNoSnapshot { + if err != nil && !errors.Is(err, snap.ErrNoSnapshot) { return nil, WALData{}, err } @@ -149,14 +149,14 @@ func (e *EncryptedRaftLogger) BootstrapNew(metadata []byte) error { for _, dirpath := range []string{filepath.Dir(e.walDir()), e.snapDir()} { if err := os.MkdirAll(dirpath, 0o700); err != nil { - return errors.Wrapf(err, "failed to create %s", dirpath) + return fmt.Errorf("failed to create %s: %w", dirpath, err) } } var err error // the wal directory must not already exist upon creation e.wal, err = walFactory.Create(e.walDir(), metadata) if err != nil { - return errors.Wrap(err, "failed to create WAL") + return fmt.Errorf("failed to create WAL: %w", err) } e.snapshotter = NewSnapFactory(encrypter, decrypter).New(e.snapDir()) @@ -184,7 +184,7 @@ func (e *EncryptedRaftLogger) RotateEncryptionKey(newKey []byte) { // have a lock on writing to snapshots and WALs. wrapped, ok := e.wal.(*wrappedWAL) if !ok { - panic(fmt.Errorf("EncryptedRaftLogger's WAL is not a wrappedWAL")) + panic(errors.New("EncryptedRaftLogger's WAL is not a wrappedWAL")) } wrapped.encrypter, wrapped.decrypter = encryption.Defaults(newKey, e.FIPS) @@ -270,7 +270,7 @@ func (e *EncryptedRaftLogger) GC(index uint64, term uint64, keepOldSnapshots uin var snapTerm, snapIndex uint64 _, err = fmt.Sscanf(oldestSnapshot, "%016x-%016x.snap", &snapTerm, &snapIndex) if err != nil { - return errors.Wrapf(err, "malformed snapshot filename %s", oldestSnapshot) + return fmt.Errorf("malformed snapshot filename %s: %w", oldestSnapshot, err) } wals, err := ListWALs(e.walDir()) @@ -285,7 +285,7 @@ func (e *EncryptedRaftLogger) GC(index uint64, term uint64, keepOldSnapshots uin var walSeq, walIndex uint64 _, err = fmt.Sscanf(walName, "%016x-%016x.wal", &walSeq, &walIndex) if err != nil { - return errors.Wrapf(err, "could not parse WAL name %s", walName) + return fmt.Errorf("could not parse WAL name %s: %w", walName, err) } if walIndex >= snapIndex { @@ -305,12 +305,12 @@ func (e *EncryptedRaftLogger) GC(index uint64, term uint64, keepOldSnapshots uin walPath := filepath.Join(e.walDir(), wals[i]) l, err := fileutil.TryLockFile(walPath, os.O_WRONLY, fileutil.PrivateFileMode) if err != nil { - return errors.Wrapf(err, "could not lock old WAL file %s for removal", wals[i]) + return fmt.Errorf("could not lock old WAL file %s for removal: %w", wals[i], err) } err = os.Remove(walPath) l.Close() if err != nil { - return errors.Wrapf(err, "error removing old WAL file %s", wals[i]) + return fmt.Errorf("error removing old WAL file %s: %w", wals[i], err) } } @@ -323,7 +323,7 @@ func (e *EncryptedRaftLogger) SaveEntries(st raftpb.HardState, entries []raftpb. defer e.encoderMu.RUnlock() if e.wal == nil { - return fmt.Errorf("raft WAL has either been closed or has never been created") + return errors.New("raft WAL has either been closed or has never been created") } return e.wal.Save(st, entries) } diff --git a/manager/state/raft/storage/storage_test.go b/manager/state/raft/storage/storage_test.go index 9d0d2af751..13e3d460b2 100644 --- a/manager/state/raft/storage/storage_test.go +++ b/manager/state/raft/storage/storage_test.go @@ -7,7 +7,6 @@ import ( "testing" "github.com/moby/swarmkit/v2/manager/encryption" - "github.com/pkg/errors" "github.com/stretchr/testify/require" "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/wal/walpb" @@ -74,7 +73,8 @@ func TestBootstrapFromDisk(t *testing.T) { EncryptionKey: []byte(key), } _, _, err := logger.BootstrapFromDisk(context.Background()) - require.IsType(t, encryption.ErrCannotDecrypt{}, errors.Cause(err)) + var ecd encryption.ErrCannotDecrypt + require.ErrorAs(t, err, &ecd) } // but we can if we combine the two keys, we can bootstrap just fine diff --git a/manager/state/raft/storage/walwrap.go b/manager/state/raft/storage/walwrap.go index 48252059eb..7d64574c9e 100644 --- a/manager/state/raft/storage/walwrap.go +++ b/manager/state/raft/storage/walwrap.go @@ -2,6 +2,8 @@ package storage import ( "context" + "errors" + "fmt" "io" "os" "path/filepath" @@ -10,7 +12,6 @@ import ( "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/manager/encryption" - "github.com/pkg/errors" "go.etcd.io/etcd/raft/v3/raftpb" "go.etcd.io/etcd/server/v3/wal" "go.etcd.io/etcd/server/v3/wal/walpb" @@ -164,14 +165,15 @@ func ReadRepairWAL( repaired := false for { if reader, err = factory.Open(walDir, walsnap); err != nil { - return nil, WALData{}, errors.Wrap(err, "failed to open WAL") + return nil, WALData{}, fmt.Errorf("failed to open WAL: %w", err) } if metadata, st, ents, err = reader.ReadAll(); err != nil { if closeErr := reader.Close(); closeErr != nil { return nil, WALData{}, closeErr } - if _, ok := err.(encryption.ErrCannotDecrypt); ok { - return nil, WALData{}, errors.Wrap(err, "failed to decrypt WAL") + var errCannotDecrypt encryption.ErrCannotDecrypt + if errors.As(err, &errCannotDecrypt) { + return nil, WALData{}, fmt.Errorf("failed to decrypt WAL: %w", err) } // we can only repair ErrUnexpectedEOF and we never repair twice. if repaired || !errors.Is(err, io.ErrUnexpectedEOF) { @@ -179,10 +181,10 @@ func ReadRepairWAL( // some (last) of the files cannot be recovered? ("best effort" recovery?) // Or should an informative error be produced to help the user (which could // mean: remove the last file?). See TestReadRepairWAL for more details. - return nil, WALData{}, errors.Wrap(err, "irreparable WAL error") + return nil, WALData{}, fmt.Errorf("irreparable WAL error: %w", err) } if !wal.Repair(nil, walDir) { - return nil, WALData{}, errors.Wrap(err, "WAL error cannot be repaired") + return nil, WALData{}, fmt.Errorf("WAL error cannot be repaired: %w", err) } log.G(ctx).WithError(err).Info("repaired WAL error") repaired = true @@ -207,28 +209,28 @@ func MigrateWALs(ctx context.Context, oldDir, newDir string, oldFactory, newFact oldReader.Close() if err := os.MkdirAll(filepath.Dir(newDir), 0o700); err != nil { - return errors.Wrap(err, "could not create parent directory") + return fmt.Errorf("could not create parent directory: %w", err) } // keep temporary wal directory so WAL initialization appears atomic tmpdirpath := filepath.Clean(newDir) + ".tmp" if err := os.RemoveAll(tmpdirpath); err != nil { - return errors.Wrap(err, "could not remove temporary WAL directory") + return fmt.Errorf("could not remove temporary WAL directory: %w", err) } defer os.RemoveAll(tmpdirpath) tmpWAL, err := newFactory.Create(tmpdirpath, waldata.Metadata) if err != nil { - return errors.Wrap(err, "could not create new WAL in temporary WAL directory") + return fmt.Errorf("could not create new WAL in temporary WAL directory: %w", err) } defer tmpWAL.Close() if err := tmpWAL.SaveSnapshot(snapshot); err != nil { - return errors.Wrap(err, "could not write WAL snapshot in temporary directory") + return fmt.Errorf("could not write WAL snapshot in temporary directory: %w", err) } if err := tmpWAL.Save(waldata.HardState, waldata.Entries); err != nil { - return errors.Wrap(err, "could not migrate WALs to temporary directory") + return fmt.Errorf("could not migrate WALs to temporary directory: %w", err) } if err := tmpWAL.Close(); err != nil { return err diff --git a/manager/state/raft/testutils/testutils.go b/manager/state/raft/testutils/testutils.go index 613f7f3128..daa2e79fde 100644 --- a/manager/state/raft/testutils/testutils.go +++ b/manager/state/raft/testutils/testutils.go @@ -2,6 +2,8 @@ package testutils import ( "context" + "errors" + "fmt" "net" "os" "reflect" @@ -20,7 +22,6 @@ import ( "github.com/moby/swarmkit/v2/manager/state/raft" "github.com/moby/swarmkit/v2/manager/state/store" "github.com/moby/swarmkit/v2/testutils" - "github.com/pkg/errors" "github.com/stretchr/testify/assert" "github.com/stretchr/testify/require" etcdraft "go.etcd.io/etcd/raft/v3" @@ -552,7 +553,7 @@ func CheckValue(t *testing.T, clockSource *fakeclock.FakeClock, raftNode *TestNo return } if len(allNodes) != 1 { - err = errors.Errorf("expected 1 node, got %d nodes", len(allNodes)) + err = fmt.Errorf("expected 1 node, got %d nodes", len(allNodes)) return } if !reflect.DeepEqual(allNodes[0], createdNode) { @@ -575,7 +576,7 @@ func CheckNoValue(t *testing.T, clockSource *fakeclock.FakeClock, raftNode *Test return } if len(allNodes) != 0 { - err = errors.Errorf("expected no nodes, got %d", len(allNodes)) + err = fmt.Errorf("expected no nodes, got %d", len(allNodes)) } }) return err @@ -599,16 +600,16 @@ func CheckValuesOnNodes(t *testing.T, clockSource *fakeclock.FakeClock, checkNod for i, id := range ids { n := store.GetNode(tx, id) if n == nil { - err = errors.Errorf("node %s not found on %d (iteration %d)", id, checkNodeID, iteration) + err = fmt.Errorf("node %s not found on %d (iteration %d)", id, checkNodeID, iteration) return } if !reflect.DeepEqual(values[i], n) { - err = errors.Errorf("node %s did not match expected value on %d (iteration %d)", id, checkNodeID, iteration) + err = fmt.Errorf("node %s did not match expected value on %d (iteration %d)", id, checkNodeID, iteration) return } } if len(allNodes) != len(ids) { - err = errors.Errorf("expected %d nodes, got %d (iteration %d)", len(ids), len(allNodes), iteration) + err = fmt.Errorf("expected %d nodes, got %d (iteration %d)", len(ids), len(allNodes), iteration) return } }) diff --git a/manager/state/raft/transport/peer.go b/manager/state/raft/transport/peer.go index 071f6dc76f..4a08529d9e 100644 --- a/manager/state/raft/transport/peer.go +++ b/manager/state/raft/transport/peer.go @@ -2,6 +2,7 @@ package transport import ( "context" + "errors" "fmt" "sync" "time" @@ -12,7 +13,6 @@ import ( "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/manager/state/raft/membership" - "github.com/pkg/errors" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/raftpb" "google.golang.org/grpc/status" @@ -46,7 +46,7 @@ type peer struct { func newPeer(id uint64, addr string, tr *Transport) (*peer, error) { cc, err := tr.dial(addr) if err != nil { - return nil, errors.Wrapf(err, "failed to create conn for %x with addr %s", id, addr) + return nil, fmt.Errorf("failed to create conn for %x with addr %s: %w", id, addr, err) } ctx, cancel := context.WithCancel(tr.ctx) ctx = log.WithField(ctx, "peer_id", fmt.Sprintf("%x", id)) @@ -84,7 +84,7 @@ func (p *peer) send(m raftpb.Message) (err error) { return p.ctx.Err() default: p.tr.config.ReportUnreachable(p.id) - return errors.Errorf("peer is unreachable") + return errors.New("peer is unreachable") } return nil } @@ -132,7 +132,7 @@ func (p *peer) address() string { func (p *peer) resolveAddr(ctx context.Context, id uint64) (string, error) { resp, err := api.NewRaftClient(p.conn()).ResolveAddress(ctx, &api.ResolveAddressRequest{RaftID: id}) if err != nil { - return "", errors.Wrap(err, "failed to resolve address") + return "", fmt.Errorf("failed to resolve address: %w", err) } return resp.Addr, nil } @@ -304,10 +304,10 @@ func (p *peer) sendProcessMessage(ctx context.Context, m raftpb.Message) error { func healthCheckConn(ctx context.Context, cc *grpc.ClientConn) error { resp, err := api.NewHealthClient(cc).Check(ctx, &api.HealthCheckRequest{Service: "Raft"}) if err != nil { - return errors.Wrap(err, "failed to check health") + return fmt.Errorf("failed to check health: %w", err) } if resp.Status != api.HealthCheckResponse_SERVING { - return errors.Errorf("health check returned status %s", resp.Status) + return fmt.Errorf("health check returned status %s", resp.Status) } return nil } @@ -351,7 +351,7 @@ func (p *peer) drain() error { return nil } if err := p.sendProcessMessage(ctx, m); err != nil { - return errors.Wrap(err, "send drain message") + return fmt.Errorf("send drain message: %w", err) } case <-ctx.Done(): return ctx.Err() diff --git a/manager/state/raft/transport/transport.go b/manager/state/raft/transport/transport.go index a2bc5a7e2f..565fbf07a7 100644 --- a/manager/state/raft/transport/transport.go +++ b/manager/state/raft/transport/transport.go @@ -4,6 +4,8 @@ package transport import ( "context" + "errors" + "fmt" "math" "net" "sync" @@ -14,7 +16,6 @@ import ( "google.golang.org/grpc/credentials" "github.com/moby/swarmkit/v2/log" - "github.com/pkg/errors" "go.etcd.io/etcd/raft/v3" "go.etcd.io/etcd/raft/v3/raftpb" ) @@ -129,7 +130,7 @@ func (t *Transport) Send(m raftpb.Message) error { return errors.New("transport stopped") } if t.config.IsIDRemoved(m.To) { - return errors.Errorf("refusing to send message %s to removed member %x", m.Type, m.To) + return fmt.Errorf("refusing to send message %s to removed member %x", m.Type, m.To) } p, ok := t.peers[m.To] if !ok { @@ -146,7 +147,7 @@ func (t *Transport) Send(m raftpb.Message) error { return nil } if err := p.send(m); err != nil { - return errors.Wrapf(err, "failed to send message %x to %x", m.Type, m.To) + return fmt.Errorf("failed to send message %x to %x: %w", m.Type, m.To, err) } return nil } @@ -164,12 +165,12 @@ func (t *Transport) AddPeer(id uint64, addr string) error { if ep.address() == addr { return nil } - return errors.Errorf("peer %x already added with addr %s", id, ep.addr) + return fmt.Errorf("peer %x already added with addr %s", id, ep.addr) } log.G(t.ctx).Debugf("transport: add peer %x with address %s", id, addr) p, err := newPeer(id, addr, t) if err != nil { - return errors.Wrapf(err, "failed to create peer %x with addr %s", id, addr) + return fmt.Errorf("failed to create peer %x with addr %s: %w", id, addr, err) } t.peers[id] = p return nil @@ -402,11 +403,11 @@ func (t *Transport) resolvePeer(ctx context.Context, id uint64) (*peer, error) { func (t *Transport) sendUnknownMessage(ctx context.Context, m raftpb.Message) error { p, err := t.resolvePeer(ctx, m.To) if err != nil { - return errors.Wrapf(err, "failed to resolve peer") + return fmt.Errorf("failed to resolve peer: %w", err) } defer p.cancel() if err := p.sendProcessMessage(ctx, m); err != nil { - return errors.Wrapf(err, "failed to send message") + return fmt.Errorf("failed to send message: %w", err) } return nil } diff --git a/manager/state/store/memory.go b/manager/state/store/memory.go index 4814e04551..fe2e247fcd 100644 --- a/manager/state/store/memory.go +++ b/manager/state/store/memory.go @@ -181,7 +181,7 @@ func (s *MemoryStore) Close() error { func fromArgs(args ...interface{}) ([]byte, error) { if len(args) != 1 { - return nil, fmt.Errorf("must provide only a single argument") + return nil, errors.New("must provide only a single argument") } arg, ok := args[0].(string) if !ok { @@ -310,7 +310,7 @@ func (s *MemoryStore) ApplyStoreActions(actions []api.StoreAction) error { func applyStoreAction(tx Tx, sa api.StoreAction) error { for _, os := range objectStorers { err := os.ApplyStoreAction(tx, sa) - if err != errUnknownStoreAction { + if !errors.Is(err, errUnknownStoreAction) { return err } } diff --git a/manager/state/store/resources.go b/manager/state/store/resources.go index 5ed12225e8..f4b510539e 100644 --- a/manager/state/store/resources.go +++ b/manager/state/store/resources.go @@ -1,11 +1,12 @@ package store import ( + "errors" + "fmt" "strings" memdb "github.com/hashicorp/go-memdb" "github.com/moby/swarmkit/v2/api" - "github.com/pkg/errors" ) const tableResource = "resource" @@ -92,7 +93,7 @@ func confirmExtension(tx Tx, r *api.Resource) error { // There must be an extension corresponding to the Kind field. extensions, err := FindExtensions(tx, ByName(r.Kind)) if err != nil { - return errors.Wrap(err, "failed to query extensions") + return fmt.Errorf("failed to query extensions: %w", err) } if len(extensions) == 0 { return ErrNoKind diff --git a/node/node.go b/node/node.go index eda5266a73..ca90101e67 100644 --- a/node/node.go +++ b/node/node.go @@ -5,6 +5,8 @@ import ( "context" "crypto/tls" "encoding/json" + "errors" + "fmt" "math" "net" "os" @@ -32,7 +34,6 @@ import ( "github.com/moby/swarmkit/v2/node/plugin" "github.com/moby/swarmkit/v2/remotes" "github.com/moby/swarmkit/v2/xnet" - "github.com/pkg/errors" "github.com/sirupsen/logrus" bolt "go.etcd.io/bbolt" "google.golang.org/grpc" @@ -519,10 +520,10 @@ func (n *Node) run(ctx context.Context) (err error) { // And, finally, we park and wait for the node to close up. If we get any // error other than context canceled, we return it. wg.Wait() - if managerErr != nil && errors.Cause(managerErr) != context.Canceled { + if managerErr != nil && !errors.Is(managerErr, context.Canceled) { return managerErr } - if agentErr != nil && errors.Cause(agentErr) != context.Canceled { + if agentErr != nil && !errors.Is(agentErr, context.Canceled) { return agentErr } // NOTE(dperny): we return err here, but the last time I can see err being @@ -813,18 +814,18 @@ func (n *Node) loadSecurityConfig(ctx context.Context, paths *ca.SecurityConfigP // Check if we already have a valid certificates on disk. rootCA, err := ca.GetLocalRootCA(paths.RootCA) - if err != nil && err != ca.ErrNoLocalRootCA { + if err != nil && !errors.Is(err, ca.ErrNoLocalRootCA) { return nil, nil, err } if err == nil { // if forcing a new cluster, we allow the certificates to be expired - a new set will be generated securityConfig, cancel, err = ca.LoadSecurityConfig(ctx, rootCA, krw, n.config.ForceNewCluster) if err != nil { - _, isInvalidKEK := errors.Cause(err).(ca.ErrInvalidKEK) - if isInvalidKEK { + var eie ca.ErrInvalidKEK + if errors.As(err, &eie) { return nil, nil, ErrInvalidUnlockKey } else if !os.IsNotExist(err) { - return nil, nil, errors.Wrapf(err, "error while loading TLS certificate in %s", paths.Node.Cert) + return nil, nil, fmt.Errorf("error while loading TLS certificate in %s: %w", paths.Node.Cert, err) } } } @@ -845,7 +846,7 @@ func (n *Node) loadSecurityConfig(ctx context.Context, paths *ca.SecurityConfigP return nil, nil, err } log.G(ctx).Debug("generated CA key and certificate") - } else if err == ca.ErrNoLocalRootCA { // from previous error loading the root CA from disk + } else if errors.Is(err, ca.ErrNoLocalRootCA) { // from previous error loading the root CA from disk // if we are attempting to join another cluster, which has a FIPS join token, and we are not FIPS, error if n.config.JoinAddr != "" && isMandatoryFIPSClusterJoinToken(n.config.JoinToken) && !n.config.FIPS { return nil, nil, ErrMandatoryFIPS @@ -869,7 +870,8 @@ func (n *Node) loadSecurityConfig(ctx context.Context, paths *ca.SecurityConfigP "node.id": securityConfig.ClientTLSCreds.NodeID(), }).Debugf("loaded TLS certificate") } else { - if _, ok := errors.Cause(err).(ca.ErrInvalidKEK); ok { + var eie ca.ErrInvalidKEK + if errors.As(err, &eie) { return nil, nil, ErrInvalidUnlockKey } log.G(ctx).WithError(err).Debugf("no node credentials found in: %s", krw.Target()) @@ -1124,7 +1126,7 @@ func (n *Node) superviseManager(ctx context.Context, securityConfig *ca.Security wasRemoved, err := n.runManager(ctx, securityConfig, rootPaths, ready, workerRole) if err != nil { waitRoleCancel() - return errors.Wrap(err, "manager stopped") + return fmt.Errorf("manager stopped: %w", err) } // If the manager stopped running and our role is still diff --git a/node/node_test.go b/node/node_test.go index cdf9e8d680..5d99159fe7 100644 --- a/node/node_test.go +++ b/node/node_test.go @@ -5,6 +5,7 @@ import ( "context" "crypto/x509" "encoding/pem" + "errors" "fmt" "os" "path/filepath" @@ -24,7 +25,6 @@ import ( "github.com/moby/swarmkit/v2/log" "github.com/moby/swarmkit/v2/manager/state/store" "github.com/moby/swarmkit/v2/testutils" - "github.com/pkg/errors" "github.com/stretchr/testify/require" ) @@ -153,7 +153,8 @@ func TestLoadSecurityConfigLoadFromDisk(t *testing.T) { }) require.NoError(t, err) _, _, err = node.loadSecurityConfig(context.Background(), paths) - require.IsType(t, x509.UnknownAuthorityError{}, errors.Cause(err)) + var uae x509.UnknownAuthorityError + require.ErrorAs(t, err, &uae) // Convert to PKCS1 and require FIPS require.NoError(t, krw.DowngradeKey()) @@ -168,7 +169,7 @@ func TestLoadSecurityConfigLoadFromDisk(t *testing.T) { }) require.NoError(t, err) _, _, err = node.loadSecurityConfig(context.Background(), paths) - require.Equal(t, keyutils.ErrFIPSUnsupportedKeyFormat, errors.Cause(err)) + require.ErrorIs(t, err, keyutils.ErrFIPSUnsupportedKeyFormat) } // If there is no CA, and a join addr is provided, one is downloaded from the diff --git a/remotes/remotes.go b/remotes/remotes.go index c31e1abb49..b680ca4207 100644 --- a/remotes/remotes.go +++ b/remotes/remotes.go @@ -1,7 +1,7 @@ package remotes import ( - "fmt" + "errors" "math" "math/rand" "sort" @@ -10,7 +10,7 @@ import ( "github.com/moby/swarmkit/v2/api" ) -var errRemotesUnavailable = fmt.Errorf("no remote hosts provided") +var errRemotesUnavailable = errors.New("no remote hosts provided") // DefaultObservationWeight provides a weight to use for positive observations // that will balance well under repeated observations. diff --git a/swarmd/cmd/swarm-rafttool/dump.go b/swarmd/cmd/swarm-rafttool/dump.go index b58f5f74fa..9889f53665 100644 --- a/swarmd/cmd/swarm-rafttool/dump.go +++ b/swarmd/cmd/swarm-rafttool/dump.go @@ -59,7 +59,7 @@ func loadData(swarmdir, unlockKey string) (*storage.WALData, *raftpb.Snapshot, e var walsnap walpb.Snapshot snapshot, err := snapFactory.New(snapDir).Load() - if err != nil && err != snap.ErrNoSnapshot { + if err != nil && !errors.Is(err, snap.ErrNoSnapshot) { return nil, nil, err } if snapshot != nil { @@ -449,7 +449,7 @@ func dumpObject(swarmdir, unlockKey, objType string, selector objSelector) error } if len(objects) == 0 { - return fmt.Errorf("no matching objects found") + return errors.New("no matching objects found") } for _, object := range objects { diff --git a/swarmd/cmd/swarm-rafttool/renewcert.go b/swarmd/cmd/swarm-rafttool/renewcert.go index 696370961a..decd6f08a9 100644 --- a/swarmd/cmd/swarm-rafttool/renewcert.go +++ b/swarmd/cmd/swarm-rafttool/renewcert.go @@ -1,12 +1,12 @@ package main import ( + "errors" "fmt" "github.com/cloudflare/cfssl/helpers" "github.com/gogo/protobuf/proto" "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "go.etcd.io/etcd/raft/v3/raftpb" "github.com/moby/swarmkit/v2/api" @@ -18,15 +18,15 @@ func renewCerts(swarmdir, unlockKey string) error { // it's expired - this will just obtain a new cert anyway. krw, err := getKRW(swarmdir, unlockKey) if err != nil { - return errors.Wrap(err, "could not load swarm certificate") + return fmt.Errorf("could not load swarm certificate: %w", err) } cert, _, err := krw.Read() if err != nil { - return errors.Wrap(err, "could not read swarm certificate") + return fmt.Errorf("could not read swarm certificate: %w", err) } certificates, err := helpers.ParseCertificatesPEM(cert) if err != nil { - return errors.Wrap(err, "could not parse node certificate") + return fmt.Errorf("could not parse node certificate: %w", err) } // We need to make sure when renewing that we provide the same CN (node ID), // OU (role), and org (swarm cluster ID) when getting a new certificate @@ -39,7 +39,7 @@ func renewCerts(swarmdir, unlockKey string) error { // Load up the raft data on disk walData, snapshot, err := loadData(swarmdir, unlockKey) if err != nil { - return errors.Wrap(err, "could not load swarm data") + return fmt.Errorf("could not load swarm data: %w", err) } var cluster *api.Cluster @@ -65,7 +65,7 @@ func renewCerts(swarmdir, unlockKey string) error { r := &api.InternalRaftRequest{} err := proto.Unmarshal(ent.Data, r) if err != nil { - return errors.Wrap(err, "could not read WAL") + return fmt.Errorf("could not read WAL: %w", err) } for _, act := range r.Action { @@ -93,7 +93,7 @@ func renewCerts(swarmdir, unlockKey string) error { } rootCA, err := ca.RootCAFromAPI(&cluster.RootCA, expiry) if err != nil { - return errors.Wrap(err, "invalid CA info in raft logs; cannot renew certs") + return fmt.Errorf("invalid CA info in raft logs; cannot renew certs: %w", err) } _, _, err = rootCA.IssueAndSaveNewCertificates(krw, cn, ou, org) diff --git a/swarmd/cmd/swarmctl/node/activate.go b/swarmd/cmd/swarmctl/node/activate.go index d1efda53b3..46a1c228d2 100644 --- a/swarmd/cmd/swarmctl/node/activate.go +++ b/swarmd/cmd/swarmctl/node/activate.go @@ -1,6 +1,7 @@ package node import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -13,7 +14,7 @@ var ( Short: "Activate a node", RunE: func(cmd *cobra.Command, args []string) error { if err := changeNodeAvailability(cmd, args, api.NodeAvailabilityActive); err != nil { - if err == errNoChange { + if errors.Is(err, errNoChange) { return fmt.Errorf("Node %s is already active", args[0]) } return err diff --git a/swarmd/cmd/swarmctl/node/demote.go b/swarmd/cmd/swarmctl/node/demote.go index b0b32511d5..1184847950 100644 --- a/swarmd/cmd/swarmctl/node/demote.go +++ b/swarmd/cmd/swarmctl/node/demote.go @@ -1,6 +1,7 @@ package node import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -13,7 +14,7 @@ var ( Short: "Demote a node from a manager to a worker", RunE: func(cmd *cobra.Command, args []string) error { if err := changeNodeRole(cmd, args, api.NodeRoleWorker); err != nil { - if err == errNoChange { + if errors.Is(err, errNoChange) { return fmt.Errorf("Node %s is already a worker", args[0]) } return err diff --git a/swarmd/cmd/swarmctl/node/drain.go b/swarmd/cmd/swarmctl/node/drain.go index 1f6fb28a21..31cb98e34f 100644 --- a/swarmd/cmd/swarmctl/node/drain.go +++ b/swarmd/cmd/swarmctl/node/drain.go @@ -1,6 +1,7 @@ package node import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -13,7 +14,7 @@ var ( Short: "Drain a node", RunE: func(cmd *cobra.Command, args []string) error { if err := changeNodeAvailability(cmd, args, api.NodeAvailabilityDrain); err != nil { - if err == errNoChange { + if errors.Is(err, errNoChange) { return fmt.Errorf("Node %s was already drained", args[0]) } return err diff --git a/swarmd/cmd/swarmctl/node/pause.go b/swarmd/cmd/swarmctl/node/pause.go index 039521978c..ea4258562c 100644 --- a/swarmd/cmd/swarmctl/node/pause.go +++ b/swarmd/cmd/swarmctl/node/pause.go @@ -1,6 +1,7 @@ package node import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -13,7 +14,7 @@ var ( Short: "Pause a node", RunE: func(cmd *cobra.Command, args []string) error { if err := changeNodeAvailability(cmd, args, api.NodeAvailabilityPause); err != nil { - if err == errNoChange { + if errors.Is(err, errNoChange) { return fmt.Errorf("Node %s was already paused", args[0]) } return err diff --git a/swarmd/cmd/swarmctl/node/promote.go b/swarmd/cmd/swarmctl/node/promote.go index d3e892faa6..10895beba0 100644 --- a/swarmd/cmd/swarmctl/node/promote.go +++ b/swarmd/cmd/swarmctl/node/promote.go @@ -1,6 +1,7 @@ package node import ( + "errors" "fmt" "github.com/moby/swarmkit/v2/api" @@ -13,7 +14,7 @@ var ( Short: "Promote a node to a manager", RunE: func(cmd *cobra.Command, args []string) error { if err := changeNodeRole(cmd, args, api.NodeRoleManager); err != nil { - if err == errNoChange { + if errors.Is(err, errNoChange) { return fmt.Errorf("Node %s is already a manager", args[0]) } return err diff --git a/swarmd/cmd/swarmctl/node/update.go b/swarmd/cmd/swarmctl/node/update.go index 4e06f8d4e9..1739f89f91 100644 --- a/swarmd/cmd/swarmctl/node/update.go +++ b/swarmd/cmd/swarmctl/node/update.go @@ -1,6 +1,7 @@ package node import ( + "errors" "fmt" "github.com/spf13/cobra" @@ -12,7 +13,7 @@ var ( Short: "Update a node", RunE: func(cmd *cobra.Command, args []string) error { if err := updateNode(cmd, args); err != nil { - if err == errNoChange { + if errors.Is(err, errNoChange) { return fmt.Errorf("No change for node %s", args[0]) } return err diff --git a/swarmd/cmd/swarmctl/service/flagparser/config.go b/swarmd/cmd/swarmctl/service/flagparser/config.go index 6235c0c427..a76303d322 100644 --- a/swarmd/cmd/swarmctl/service/flagparser/config.go +++ b/swarmd/cmd/swarmctl/service/flagparser/config.go @@ -1,6 +1,7 @@ package flagparser import ( + "errors" "fmt" "strings" @@ -16,14 +17,14 @@ func parseConfigString(configString string) (configName, presentName string, err configName = strings.TrimSpace(tokens[0]) if configName == "" { - err = fmt.Errorf("invalid config name provided") + err = errors.New("invalid config name provided") return } if len(tokens) > 1 { presentName = strings.TrimSpace(tokens[1]) if presentName == "" { - err = fmt.Errorf("invalid presentation name provided") + err = errors.New("invalid presentation name provided") return } } else { diff --git a/swarmd/cmd/swarmctl/service/flagparser/mode.go b/swarmd/cmd/swarmctl/service/flagparser/mode.go index 353c7a183a..b1ff9f2e0b 100644 --- a/swarmd/cmd/swarmctl/service/flagparser/mode.go +++ b/swarmd/cmd/swarmctl/service/flagparser/mode.go @@ -1,7 +1,7 @@ package flagparser import ( - "fmt" + "errors" "github.com/moby/swarmkit/v2/api" "github.com/spf13/pflag" @@ -32,7 +32,7 @@ func parseMode(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if flags.Changed("replicas") { if spec.GetReplicated() == nil { - return fmt.Errorf("--replicas can only be specified in --mode replicated") + return errors.New("--replicas can only be specified in --mode replicated") } replicas, err := flags.GetUint64("replicas") if err != nil { diff --git a/swarmd/cmd/swarmctl/service/flagparser/placement.go b/swarmd/cmd/swarmctl/service/flagparser/placement.go index 279a509873..1b0406d08a 100644 --- a/swarmd/cmd/swarmctl/service/flagparser/placement.go +++ b/swarmd/cmd/swarmctl/service/flagparser/placement.go @@ -1,7 +1,7 @@ package flagparser import ( - "fmt" + "errors" "github.com/moby/swarmkit/v2/api" "github.com/spf13/pflag" @@ -21,7 +21,7 @@ func parsePlacement(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if flags.Changed("replicas-max-per-node") { if spec.GetReplicated() == nil { - return fmt.Errorf("--replicas-max-per-node can only be specified in --mode replicated") + return errors.New("--replicas-max-per-node can only be specified in --mode replicated") } maxReplicas, err := flags.GetUint64("replicas-max-per-node") if err != nil { diff --git a/swarmd/cmd/swarmctl/service/flagparser/port.go b/swarmd/cmd/swarmctl/service/flagparser/port.go index ac95892b31..de354992d5 100644 --- a/swarmd/cmd/swarmctl/service/flagparser/port.go +++ b/swarmd/cmd/swarmctl/service/flagparser/port.go @@ -1,11 +1,12 @@ package flagparser import ( + "errors" + "fmt" "strconv" "strings" "github.com/moby/swarmkit/v2/api" - "github.com/pkg/errors" "github.com/spf13/pflag" ) @@ -55,7 +56,7 @@ func parsePortConfig(portConfig string) (string, api.PortConfig_Protocol, uint32 portSpec := parts[1] protocol, port, err := parsePortSpec(portSpec) if err != nil { - return "", protocol, 0, 0, errors.Wrap(err, "failed to parse port") + return "", protocol, 0, 0, fmt.Errorf("failed to parse port: %w", err) } if len(parts) > 2 { @@ -64,7 +65,7 @@ func parsePortConfig(portConfig string) (string, api.PortConfig_Protocol, uint32 portSpec := parts[2] nodeProtocol, swarmPort, err := parsePortSpec(portSpec) if err != nil { - return "", protocol, 0, 0, errors.Wrap(err, "failed to parse node port") + return "", protocol, 0, 0, fmt.Errorf("failed to parse node port: %w", err) } if nodeProtocol != protocol { @@ -89,7 +90,7 @@ func parsePortSpec(portSpec string) (api.PortConfig_Protocol, uint32, error) { proto := parts[1] protocol, ok := api.PortConfig_Protocol_value[strings.ToUpper(proto)] if !ok { - return 0, 0, errors.Errorf("invalid protocol string: %s", proto) + return 0, 0, fmt.Errorf("invalid protocol string: %s", proto) } return api.PortConfig_Protocol(protocol), uint32(port), nil diff --git a/swarmd/cmd/swarmctl/service/flagparser/secret.go b/swarmd/cmd/swarmctl/service/flagparser/secret.go index 4e38638e10..8d455c7174 100644 --- a/swarmd/cmd/swarmctl/service/flagparser/secret.go +++ b/swarmd/cmd/swarmctl/service/flagparser/secret.go @@ -1,6 +1,7 @@ package flagparser import ( + "errors" "fmt" "strings" @@ -16,14 +17,14 @@ func parseSecretString(secretString string) (secretName, presentName string, err secretName = strings.TrimSpace(tokens[0]) if secretName == "" { - err = fmt.Errorf("invalid secret name provided") + err = errors.New("invalid secret name provided") return } if len(tokens) > 1 { presentName = strings.TrimSpace(tokens[1]) if presentName == "" { - err = fmt.Errorf("invalid presentation name provided") + err = errors.New("invalid presentation name provided") return } } else { diff --git a/swarmd/cmd/swarmctl/service/flagparser/tmpfs.go b/swarmd/cmd/swarmctl/service/flagparser/tmpfs.go index e0f90188e4..f5992ae28c 100644 --- a/swarmd/cmd/swarmctl/service/flagparser/tmpfs.go +++ b/swarmd/cmd/swarmctl/service/flagparser/tmpfs.go @@ -1,13 +1,14 @@ package flagparser import ( + "errors" + "fmt" "os" "path" "strconv" "strings" "github.com/moby/swarmkit/v2/api" - "github.com/pkg/errors" "github.com/spf13/pflag" ) @@ -27,11 +28,11 @@ func parseTmpfs(flags *pflag.FlagSet, spec *api.ServiceSpec) error { parts := strings.SplitN(tmpfs, ":", 2) if len(parts) < 1 { - return errors.Errorf("invalid mount spec: %v", tmpfs) + return fmt.Errorf("invalid mount spec: %v", tmpfs) } if len(parts[0]) == 0 || !path.IsAbs(parts[0]) { - return errors.Errorf("invalid mount spec: %v", tmpfs) + return fmt.Errorf("invalid mount spec: %v", tmpfs) } m := api.Mount{ @@ -42,7 +43,7 @@ func parseTmpfs(flags *pflag.FlagSet, spec *api.ServiceSpec) error { if len(parts) == 2 { if strings.Contains(parts[1], ":") { // repeated colon is illegal - return errors.Errorf("invalid mount spec: %v", tmpfs) + return fmt.Errorf("invalid mount spec: %v", tmpfs) } // BUG(stevvooe): Cobra stringslice actually doesn't correctly @@ -73,7 +74,7 @@ func parseTmpfs(flags *pflag.FlagSet, spec *api.ServiceSpec) error { case 'k': multiplier = 1 << 10 default: - return errors.Errorf("invalid size format: %v", flag) + return fmt.Errorf("invalid size format: %v", flag) } // reparse the meat diff --git a/swarmd/cmd/swarmctl/service/logs.go b/swarmd/cmd/swarmctl/service/logs.go index 333119020f..3fc735c61b 100644 --- a/swarmd/cmd/swarmctl/service/logs.go +++ b/swarmd/cmd/swarmctl/service/logs.go @@ -2,13 +2,13 @@ package service import ( "context" + "errors" "fmt" "io" "os" "github.com/moby/swarmkit/swarmd/cmd/swarmctl/common" "github.com/moby/swarmkit/v2/api" - "github.com/pkg/errors" "github.com/spf13/cobra" ) @@ -55,16 +55,16 @@ var ( }, }) if err != nil { - return errors.Wrap(err, "failed to subscribe to logs") + return fmt.Errorf("failed to subscribe to logs: %w", err) } for { log, err := stream.Recv() - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } if err != nil { - return errors.Wrap(err, "failed receiving stream message") + return fmt.Errorf("failed receiving stream message: %w", err) } for _, msg := range log.Messages { diff --git a/swarmd/dockerexec/adapter.go b/swarmd/dockerexec/adapter.go index 94008fbacb..07892f4e4f 100644 --- a/swarmd/dockerexec/adapter.go +++ b/swarmd/dockerexec/adapter.go @@ -3,6 +3,7 @@ package dockerexec import ( "context" "encoding/json" + "errors" "fmt" "io" "strings" @@ -16,7 +17,6 @@ import ( "github.com/moby/swarmkit/v2/agent/exec" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/log" - "github.com/pkg/errors" "golang.org/x/time/rate" ) @@ -104,7 +104,7 @@ func (c *containerAdapter) pullImage(ctx context.Context) error { } // if the final stream object contained an error, return it if errMsg, ok := m["error"]; ok { - return errors.Errorf("%v", errMsg) + return fmt.Errorf("%v", errMsg) } return nil } @@ -286,7 +286,7 @@ func (c *containerAdapter) logs(ctx context.Context, options api.LogSubscription // See protobuf documentation for details of how this works. apiOptions.Tail = fmt.Sprint(-options.Tail - 1) } else if options.Tail > 0 { - return nil, fmt.Errorf("tail relative to start of logs not supported via docker API") + return nil, errors.New("tail relative to start of logs not supported via docker API") } if len(options.Streams) == 0 { diff --git a/swarmd/dockerexec/controller.go b/swarmd/dockerexec/controller.go index 0f608b8229..a1b3a54abb 100644 --- a/swarmd/dockerexec/controller.go +++ b/swarmd/dockerexec/controller.go @@ -5,6 +5,7 @@ import ( "bytes" "context" "encoding/binary" + "errors" "fmt" "io" "strconv" @@ -16,7 +17,6 @@ import ( engineapi "github.com/docker/docker/client" "github.com/docker/go-connections/nat" gogotypes "github.com/gogo/protobuf/types" - "github.com/pkg/errors" "golang.org/x/time/rate" "github.com/moby/swarmkit/v2/agent/exec" @@ -181,7 +181,7 @@ func (r *controller) Start(ctx context.Context) error { } if err := r.adapter.start(ctx); err != nil { - return errors.Wrap(err, "starting container failed") + return fmt.Errorf("starting container failed: %w", err) } // no health check @@ -219,7 +219,7 @@ func (r *controller) Start(ctx context.Context) error { case "die": // exit on terminal events ctnr, err := r.adapter.inspect(ctx) if err != nil { - return errors.Wrap(err, "die event received") + return fmt.Errorf("die event received: %w", err) } return makeExitError(ctnr) @@ -232,7 +232,7 @@ func (r *controller) Start(ctx context.Context) error { // in this case, we stop the container and report unhealthy status // TODO(runshenzhu): double check if it can cause a dead lock issue here if err := r.Shutdown(ctx); err != nil { - return errors.Wrap(err, "unhealthy container shutdown failed") + return fmt.Errorf("unhealthy container shutdown failed: %w", err) } return ErrContainerUnhealthy @@ -262,7 +262,7 @@ func (r *controller) Wait(ctx context.Context) error { // check the initial state and report that. ctnr, err := r.adapter.inspect(ctx) if err != nil { - return errors.Wrap(err, "inspecting container failed") + return fmt.Errorf("inspecting container failed: %w", err) } switch ctnr.State.Status { @@ -291,7 +291,7 @@ func (r *controller) Wait(ctx context.Context) error { case "die": // exit on terminal events ctnr, err := r.adapter.inspect(ctx) if err != nil { - return errors.Wrap(err, "die event received") + return fmt.Errorf("die event received: %w", err) } return makeExitError(ctnr) @@ -304,7 +304,7 @@ func (r *controller) Wait(ctx context.Context) error { // in this case, we stop the container and report unhealthy status // TODO(runshenzhu): double check if it can cause a dead lock issue here if err := r.Shutdown(ctx); err != nil { - return errors.Wrap(err, "unhealthy container shutdown failed") + return fmt.Errorf("unhealthy container shutdown failed: %w", err) } return ErrContainerUnhealthy } @@ -423,7 +423,7 @@ func (r *controller) waitReady(pctx context.Context) error { ctnr, err := r.adapter.inspect(ctx) if err != nil { if !isUnknownContainer(err) { - return errors.Wrap(err, "inspect container failed") + return fmt.Errorf("inspect container failed: %w", err) } } else { switch ctnr.State.Status { @@ -463,12 +463,12 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti } if err := r.waitReady(ctx); err != nil { - return errors.Wrap(err, "container not ready for logs") + return fmt.Errorf("container not ready for logs: %w", err) } rc, err := r.adapter.logs(ctx, options) if err != nil { - return errors.Wrap(err, "failed getting container logs") + return fmt.Errorf("failed getting container logs: %w", err) } defer rc.Close() @@ -488,24 +488,24 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti // so, message header is 8 bytes, treat as uint64, pull stream off MSB var header uint64 if err := binary.Read(brd, binary.BigEndian, &header); err != nil { - if err == io.EOF { + if errors.Is(err, io.EOF) { return nil } - return errors.Wrap(err, "failed reading log header") + return fmt.Errorf("failed reading log header: %w", err) } stream, size := (header>>(7<<3))&0xFF, header & ^(uint64(0xFF)<<(7<<3)) // limit here to decrease allocation back pressure. if err := limiter.WaitN(ctx, int(size)); err != nil { - return errors.Wrap(err, "failed rate limiter") + return fmt.Errorf("failed rate limiter: %w", err) } buf := make([]byte, size) _, err := io.ReadFull(brd, buf) if err != nil { - return errors.Wrap(err, "failed reading buffer") + return fmt.Errorf("failed reading buffer: %w", err) } // Timestamp is RFC3339Nano with 1 space after. Lop, parse, publish @@ -516,12 +516,12 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti ts, err := time.Parse(time.RFC3339Nano, string(parts[0])) if err != nil { - return errors.Wrap(err, "failed to parse timestamp") + return fmt.Errorf("failed to parse timestamp: %w", err) } tsp, err := gogotypes.TimestampProto(ts) if err != nil { - return errors.Wrap(err, "failed to convert timestamp") + return fmt.Errorf("failed to convert timestamp: %w", err) } if err := publisher.Publish(ctx, api.LogMessage{ @@ -531,7 +531,7 @@ func (r *controller) Logs(ctx context.Context, publisher exec.LogPublisher, opti Data: parts[1], }); err != nil { - return errors.Wrap(err, "failed to publish log message") + return fmt.Errorf("failed to publish log message: %w", err) } } } diff --git a/swarmd/go.mod b/swarmd/go.mod index cbd612527b..35b41f885a 100644 --- a/swarmd/go.mod +++ b/swarmd/go.mod @@ -3,37 +3,23 @@ module github.com/moby/swarmkit/swarmd go 1.18 require ( - code.cloudfoundry.org/clock v1.1.0 // indirect - github.com/Microsoft/go-winio v0.6.1 // indirect github.com/cloudflare/cfssl v1.6.4 - github.com/container-storage-interface/spec v1.2.0 // indirect - github.com/distribution/reference v0.5.0 // indirect github.com/docker/docker v24.0.0-rc.2.0.20230908212318-6ce5aa1cd5a4+incompatible // master (v25.0.0-dev) github.com/docker/go-connections v0.4.1-0.20231110212414-fa09c952e3ea - github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect - github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-units v0.5.0 github.com/dustin/go-humanize v1.0.0 - github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect github.com/gogo/protobuf v1.3.2 - github.com/golang/protobuf v1.5.3 // indirect github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 - github.com/hashicorp/go-memdb v1.3.2 // indirect - github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 github.com/opencontainers/image-spec v1.1.0-rc5 - github.com/pkg/errors v0.9.1 github.com/prometheus/client_golang v1.14.0 github.com/sirupsen/logrus v1.9.3 github.com/spf13/cobra v1.7.0 github.com/spf13/pflag v1.0.5 github.com/stretchr/testify v1.8.4 - go.etcd.io/bbolt v1.3.7 // indirect go.etcd.io/etcd/client/pkg/v3 v3.5.6 - go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect go.etcd.io/etcd/raft/v3 v3.5.6 go.etcd.io/etcd/server/v3 v3.5.6 - golang.org/x/crypto v0.14.0 // indirect - golang.org/x/net v0.17.0 // indirect golang.org/x/time v0.3.0 // NOTE(dperny,cyli): there is some error handling, found in the @@ -52,24 +38,33 @@ require ( google.golang.org/grpc v1.53.0 ) -require github.com/moby/swarmkit/v2 v2.0.0-20240125134710-dcda100a8261 - require ( + code.cloudfoundry.org/clock v1.1.0 // indirect github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect + github.com/Microsoft/go-winio v0.6.1 // indirect github.com/beorn7/perks v1.0.1 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect + github.com/container-storage-interface/spec v1.2.0 // indirect github.com/containerd/containerd v1.6.22 // indirect github.com/davecgh/go-spew v1.1.1 // indirect + github.com/distribution/reference v0.5.0 // indirect + github.com/docker/go-events v0.0.0-20190806004212-e31b211e4f1c // indirect + github.com/docker/go-metrics v0.0.1 // indirect github.com/felixge/httpsnoop v1.0.3 // indirect + github.com/fernet/fernet-go v0.0.0-20211208181803-9f70042a33ee // indirect github.com/go-logr/logr v1.2.4 // indirect github.com/go-logr/stdr v1.2.2 // indirect + github.com/golang/protobuf v1.5.3 // indirect github.com/google/certificate-transparency-go v1.1.4 // indirect github.com/hashicorp/go-immutable-radix v1.3.1 // indirect + github.com/hashicorp/go-memdb v1.3.2 // indirect github.com/hashicorp/golang-lru v0.5.4 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/ishidawataru/sctp v0.0.0-20230406120618-7ff4192f6ff2 // indirect github.com/jmoiron/sqlx v1.3.3 // indirect github.com/matttproud/golang_protobuf_extensions v1.0.4 // indirect + github.com/opencontainers/go-digest v1.0.0 // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/pmezard/go-difflib v1.0.0 // indirect github.com/prometheus/client_model v0.3.0 // indirect github.com/prometheus/common v0.42.0 // indirect @@ -77,6 +72,8 @@ require ( github.com/weppos/publicsuffix-go v0.15.1-0.20210511084619-b1f36a2d6c0b // indirect github.com/zmap/zcrypto v0.0.0-20210511125630-18f1e0152cfc // indirect github.com/zmap/zlint/v3 v3.1.0 // indirect + go.etcd.io/bbolt v1.3.7 // indirect + go.etcd.io/etcd/pkg/v3 v3.5.6 // indirect go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.29.0 // indirect go.opentelemetry.io/otel v1.4.1 // indirect go.opentelemetry.io/otel/internal/metric v0.27.0 // indirect @@ -85,7 +82,9 @@ require ( go.uber.org/atomic v1.9.0 // indirect go.uber.org/multierr v1.8.0 // indirect go.uber.org/zap v1.21.0 // indirect + golang.org/x/crypto v0.14.0 // indirect golang.org/x/mod v0.11.0 // indirect + golang.org/x/net v0.17.0 // indirect golang.org/x/sys v0.13.0 // indirect golang.org/x/text v0.13.0 // indirect golang.org/x/tools v0.10.0 // indirect diff --git a/template/context.go b/template/context.go index b1d995e994..31a0ccbe96 100644 --- a/template/context.go +++ b/template/context.go @@ -2,6 +2,7 @@ package template import ( "bytes" + "errors" "fmt" "strings" "text/template" @@ -11,7 +12,6 @@ import ( "github.com/moby/swarmkit/v2/agent/secrets" "github.com/moby/swarmkit/v2/api" "github.com/moby/swarmkit/v2/api/naming" - "github.com/pkg/errors" ) // Platform holds information about the underlying platform of the node @@ -132,7 +132,7 @@ func (ctx *PayloadContext) secretGetter(target string) (string, error) { } } - return "", errors.Errorf("secret target %s not found", target) + return "", fmt.Errorf("secret target %s not found", target) } func (ctx *PayloadContext) configGetter(target string) (string, error) { @@ -156,7 +156,7 @@ func (ctx *PayloadContext) configGetter(target string) (string, error) { } } - return "", errors.Errorf("config target %s not found", target) + return "", fmt.Errorf("config target %s not found", target) } func (ctx *PayloadContext) envGetter(variable string) (string, error) { diff --git a/template/expand.go b/template/expand.go index 13a7add036..d1b8c7434d 100644 --- a/template/expand.go +++ b/template/expand.go @@ -1,12 +1,12 @@ package template import ( + "errors" "fmt" "strings" "github.com/moby/swarmkit/v2/agent/exec" "github.com/moby/swarmkit/v2/api" - "github.com/pkg/errors" ) // ExpandContainerSpec expands templated fields in the runtime using the task @@ -18,7 +18,7 @@ import ( func ExpandContainerSpec(n *api.NodeDescription, t *api.Task) (*api.ContainerSpec, error) { container := t.Spec.GetContainer() if container == nil { - return nil, errors.Errorf("task missing ContainerSpec to expand") + return nil, errors.New("task missing ContainerSpec to expand") } container = container.Copy() @@ -27,17 +27,17 @@ func ExpandContainerSpec(n *api.NodeDescription, t *api.Task) (*api.ContainerSpe var err error container.Env, err = expandEnv(ctx, container.Env) if err != nil { - return container, errors.Wrap(err, "expanding env failed") + return container, fmt.Errorf("expanding env failed: %w", err) } // For now, we only allow templating of string-based mount fields container.Mounts, err = expandMounts(ctx, container.Mounts) if err != nil { - return container, errors.Wrap(err, "expanding mounts failed") + return container, fmt.Errorf("expanding mounts failed: %w", err) } container.Hostname, err = ctx.Expand(container.Hostname) - return container, errors.Wrap(err, "expanding hostname failed") + return container, fmt.Errorf("expanding hostname failed: %w", err) } func expandMounts(ctx Context, mounts []api.Mount) ([]api.Mount, error) { @@ -50,24 +50,24 @@ func expandMounts(ctx Context, mounts []api.Mount) ([]api.Mount, error) { var err error mount.Source, err = ctx.Expand(mount.Source) if err != nil { - return mounts, errors.Wrapf(err, "expanding mount source %q", mount.Source) + return mounts, fmt.Errorf("expanding mount source %q: %w", mount.Source, err) } mount.Target, err = ctx.Expand(mount.Target) if err != nil { - return mounts, errors.Wrapf(err, "expanding mount target %q", mount.Target) + return mounts, fmt.Errorf("expanding mount target %q: %w", mount.Target, err) } if mount.VolumeOptions != nil { mount.VolumeOptions.Labels, err = expandMap(ctx, mount.VolumeOptions.Labels) if err != nil { - return mounts, errors.Wrap(err, "expanding volume labels") + return mounts, fmt.Errorf("expanding volume labels: %w", err) } if mount.VolumeOptions.DriverConfig != nil { mount.VolumeOptions.DriverConfig.Options, err = expandMap(ctx, mount.VolumeOptions.DriverConfig.Options) if err != nil { - return mounts, errors.Wrap(err, "expanding volume driver config") + return mounts, fmt.Errorf("expanding volume driver config: %w", err) } } } @@ -87,7 +87,7 @@ func expandMap(ctx Context, m map[string]string) (map[string]string, error) { for k, v := range m { v, err = ctx.Expand(v) if err != nil { - return m, errors.Wrapf(err, "expanding map entry %q=%q", k, v) + return m, fmt.Errorf("expanding map entry %q=%q: %w", k, v, err) } n[k] = v @@ -107,7 +107,7 @@ func expandEnv(ctx Context, values []string) ([]string, error) { if len(parts) > 1 { expanded, err := ctx.Expand(parts[1]) if err != nil { - return values, errors.Wrapf(err, "expanding env %q", value) + return values, fmt.Errorf("expanding env %q: %w", value, err) } entry = fmt.Sprintf("%s=%s", entry, expanded) diff --git a/template/getter.go b/template/getter.go index e8344735db..0211059177 100644 --- a/template/getter.go +++ b/template/getter.go @@ -1,9 +1,11 @@ package template import ( + "errors" + "fmt" + "github.com/moby/swarmkit/v2/agent/exec" "github.com/moby/swarmkit/v2/api" - "github.com/pkg/errors" ) type templatedSecretGetter struct { @@ -34,7 +36,7 @@ func (t templatedSecretGetter) Get(secretID string) (*api.Secret, error) { newSpec, err := ExpandSecretSpec(secret, t.node, t.t, t.dependencies) if err != nil { - return secret, errors.Wrapf(err, "failed to expand templated secret %s", secretID) + return secret, fmt.Errorf("failed to expand templated secret %s: %w", secretID, err) } secretCopy := *secret @@ -87,7 +89,7 @@ func (t templatedConfigGetter) GetAndFlagSecretData(configID string) (*api.Confi newSpec, sensitive, err := ExpandConfigSpec(config, t.node, t.t, t.dependencies) if err != nil { - return config, false, errors.Wrapf(err, "failed to expand templated config %s", configID) + return config, false, fmt.Errorf("failed to expand templated config %s: %w", configID, err) } configCopy := *config diff --git a/testutils/poll.go b/testutils/poll.go index a2cbeb88d7..d220c69552 100644 --- a/testutils/poll.go +++ b/testutils/poll.go @@ -1,10 +1,10 @@ package testutils import ( + "fmt" "time" "code.cloudfoundry.org/clock/fakeclock" - "github.com/pkg/errors" ) // PollFuncWithTimeout is used to periodically execute a check function, it @@ -25,7 +25,7 @@ func PollFuncWithTimeout(clockSource *fakeclock.FakeClock, f func() error, timeo } select { case <-timer.C: - return errors.Wrap(err, "polling failed") + return fmt.Errorf("polling failed: %w", err) case <-time.After(50 * time.Millisecond): } } diff --git a/vendor/github.com/gogo/protobuf/protobuf/Makefile b/vendor/github.com/gogo/protobuf/protobuf/Makefile deleted file mode 100644 index e3e107663e..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/Makefile +++ /dev/null @@ -1,65 +0,0 @@ -VERSION=3.9.1 -URL="https://raw.githubusercontent.com/protocolbuffers/protobuf/v${VERSION}/src/google/protobuf" - -regenerate: - go install github.com/gogo/protobuf/protoc-gen-gogotypes - go install github.com/gogo/protobuf/protoc-min-version - - protoc-min-version \ - --version="3.0.0" \ - --gogotypes_out=../types/ \ - -I=. \ - google/protobuf/any.proto \ - google/protobuf/type.proto \ - google/protobuf/empty.proto \ - google/protobuf/api.proto \ - google/protobuf/timestamp.proto \ - google/protobuf/duration.proto \ - google/protobuf/struct.proto \ - google/protobuf/wrappers.proto \ - google/protobuf/field_mask.proto \ - google/protobuf/source_context.proto - - mv ../types/google/protobuf/*.pb.go ../types/ || true - rmdir ../types/google/protobuf || true - rmdir ../types/google || true - -update: - go install github.com/gogo/protobuf/gogoreplace - - (cd ./google/protobuf && rm descriptor.proto; wget ${URL}/descriptor.proto) - # gogoprotobuf requires users to import gogo.proto which imports descriptor.proto - # The descriptor.proto is only compatible with proto3 just because of the reserved keyword. - # We remove it to stay compatible with previous versions of protoc before proto3 - gogoreplace 'reserved 38;' '//reserved 38;' ./google/protobuf/descriptor.proto - gogoreplace 'reserved 8;' '//reserved 8;' ./google/protobuf/descriptor.proto - gogoreplace 'reserved 9;' '//reserved 9;' ./google/protobuf/descriptor.proto - gogoreplace 'reserved 4;' '//reserved 4;' ./google/protobuf/descriptor.proto - gogoreplace 'reserved 5;' '//reserved 5;' ./google/protobuf/descriptor.proto - gogoreplace 'option go_package = "github.com/golang/protobuf/protoc-gen-go/descriptor;descriptor";' 'option go_package = "descriptor";' ./google/protobuf/descriptor.proto - - (cd ./google/protobuf/compiler && rm plugin.proto; wget ${URL}/compiler/plugin.proto) - gogoreplace 'option go_package = "github.com/golang/protobuf/protoc-gen-go/plugin;plugin_go";' 'option go_package = "plugin_go";' ./google/protobuf/compiler/plugin.proto - - (cd ./google/protobuf && rm any.proto; wget ${URL}/any.proto) - gogoreplace 'go_package = "github.com/golang/protobuf/ptypes/any";' 'go_package = "types";' ./google/protobuf/any.proto - (cd ./google/protobuf && rm empty.proto; wget ${URL}/empty.proto) - gogoreplace 'go_package = "github.com/golang/protobuf/ptypes/empty";' 'go_package = "types";' ./google/protobuf/empty.proto - (cd ./google/protobuf && rm timestamp.proto; wget ${URL}/timestamp.proto) - gogoreplace 'go_package = "github.com/golang/protobuf/ptypes/timestamp";' 'go_package = "types";' ./google/protobuf/timestamp.proto - (cd ./google/protobuf && rm duration.proto; wget ${URL}/duration.proto) - gogoreplace 'go_package = "github.com/golang/protobuf/ptypes/duration";' 'go_package = "types";' ./google/protobuf/duration.proto - (cd ./google/protobuf && rm struct.proto; wget ${URL}/struct.proto) - gogoreplace 'go_package = "github.com/golang/protobuf/ptypes/struct;structpb";' 'go_package = "types";' ./google/protobuf/struct.proto - (cd ./google/protobuf && rm wrappers.proto; wget ${URL}/wrappers.proto) - gogoreplace 'go_package = "github.com/golang/protobuf/ptypes/wrappers";' 'go_package = "types";' ./google/protobuf/wrappers.proto - (cd ./google/protobuf && rm field_mask.proto; wget ${URL}/field_mask.proto) - gogoreplace 'option go_package = "google.golang.org/genproto/protobuf/field_mask;field_mask";' 'option go_package = "types";' ./google/protobuf/field_mask.proto - (cd ./google/protobuf && rm api.proto; wget ${URL}/api.proto) - gogoreplace 'option go_package = "google.golang.org/genproto/protobuf/api;api";' 'option go_package = "types";' ./google/protobuf/api.proto - (cd ./google/protobuf && rm type.proto; wget ${URL}/type.proto) - gogoreplace 'option go_package = "google.golang.org/genproto/protobuf/ptype;ptype";' 'option go_package = "types";' ./google/protobuf/type.proto - (cd ./google/protobuf && rm source_context.proto; wget ${URL}/source_context.proto) - gogoreplace 'option go_package = "google.golang.org/genproto/protobuf/source_context;source_context";' 'option go_package = "types";' ./google/protobuf/source_context.proto - - diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto deleted file mode 100644 index 4cf3843bd7..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/any.proto +++ /dev/null @@ -1,155 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "types"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "AnyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Any` contains an arbitrary serialized protocol buffer message along with a -// URL that describes the type of the serialized message. -// -// Protobuf library provides support to pack/unpack Any values in the form -// of utility functions or additional generated methods of the Any type. -// -// Example 1: Pack and unpack a message in C++. -// -// Foo foo = ...; -// Any any; -// any.PackFrom(foo); -// ... -// if (any.UnpackTo(&foo)) { -// ... -// } -// -// Example 2: Pack and unpack a message in Java. -// -// Foo foo = ...; -// Any any = Any.pack(foo); -// ... -// if (any.is(Foo.class)) { -// foo = any.unpack(Foo.class); -// } -// -// Example 3: Pack and unpack a message in Python. -// -// foo = Foo(...) -// any = Any() -// any.Pack(foo) -// ... -// if any.Is(Foo.DESCRIPTOR): -// any.Unpack(foo) -// ... -// -// Example 4: Pack and unpack a message in Go -// -// foo := &pb.Foo{...} -// any, err := ptypes.MarshalAny(foo) -// ... -// foo := &pb.Foo{} -// if err := ptypes.UnmarshalAny(any, foo); err != nil { -// ... -// } -// -// The pack methods provided by protobuf library will by default use -// 'type.googleapis.com/full.type.name' as the type URL and the unpack -// methods only use the fully qualified type name after the last '/' -// in the type URL, for example "foo.bar.com/x/y.z" will yield type -// name "y.z". -// -// -// JSON -// ==== -// The JSON representation of an `Any` value uses the regular -// representation of the deserialized, embedded message, with an -// additional field `@type` which contains the type URL. Example: -// -// package google.profile; -// message Person { -// string first_name = 1; -// string last_name = 2; -// } -// -// { -// "@type": "type.googleapis.com/google.profile.Person", -// "firstName": , -// "lastName": -// } -// -// If the embedded message type is well-known and has a custom JSON -// representation, that representation will be embedded adding a field -// `value` which holds the custom JSON in addition to the `@type` -// field. Example (for message [google.protobuf.Duration][]): -// -// { -// "@type": "type.googleapis.com/google.protobuf.Duration", -// "value": "1.212s" -// } -// -message Any { - // A URL/resource name that uniquely identifies the type of the serialized - // protocol buffer message. This string must contain at least - // one "/" character. The last segment of the URL's path must represent - // the fully qualified name of the type (as in - // `path/google.protobuf.Duration`). The name should be in a canonical form - // (e.g., leading "." is not accepted). - // - // In practice, teams usually precompile into the binary all types that they - // expect it to use in the context of Any. However, for URLs which use the - // scheme `http`, `https`, or no scheme, one can optionally set up a type - // server that maps type URLs to message definitions as follows: - // - // * If no scheme is provided, `https` is assumed. - // * An HTTP GET on the URL must yield a [google.protobuf.Type][] - // value in binary format, or produce an error. - // * Applications are allowed to cache lookup results based on the - // URL, or have them precompiled into a binary to avoid any - // lookup. Therefore, binary compatibility needs to be preserved - // on changes to types. (Use versioned type names to manage - // breaking changes.) - // - // Note: this functionality is not currently available in the official - // protobuf release, and it is not used for type URLs beginning with - // type.googleapis.com. - // - // Schemes other than `http`, `https` (or the empty scheme) might be - // used with implementation specific semantics. - // - string type_url = 1; - - // Must be a valid serialized protocol buffer of the above specified type. - bytes value = 2; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto deleted file mode 100644 index 67c1ddbd9d..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/api.proto +++ /dev/null @@ -1,210 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -import "google/protobuf/source_context.proto"; -import "google/protobuf/type.proto"; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "ApiProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option go_package = "types"; - -// Api is a light-weight descriptor for an API Interface. -// -// Interfaces are also described as "protocol buffer services" in some contexts, -// such as by the "service" keyword in a .proto file, but they are different -// from API Services, which represent a concrete implementation of an interface -// as opposed to simply a description of methods and bindings. They are also -// sometimes simply referred to as "APIs" in other contexts, such as the name of -// this message itself. See https://cloud.google.com/apis/design/glossary for -// detailed terminology. -message Api { - - // The fully qualified name of this interface, including package name - // followed by the interface's simple name. - string name = 1; - - // The methods of this interface, in unspecified order. - repeated Method methods = 2; - - // Any metadata attached to the interface. - repeated Option options = 3; - - // A version string for this interface. If specified, must have the form - // `major-version.minor-version`, as in `1.10`. If the minor version is - // omitted, it defaults to zero. If the entire version field is empty, the - // major version is derived from the package name, as outlined below. If the - // field is not empty, the version in the package name will be verified to be - // consistent with what is provided here. - // - // The versioning schema uses [semantic - // versioning](http://semver.org) where the major version number - // indicates a breaking change and the minor version an additive, - // non-breaking change. Both version numbers are signals to users - // what to expect from different versions, and should be carefully - // chosen based on the product plan. - // - // The major version is also reflected in the package name of the - // interface, which must end in `v`, as in - // `google.feature.v1`. For major versions 0 and 1, the suffix can - // be omitted. Zero major versions must only be used for - // experimental, non-GA interfaces. - // - // - string version = 4; - - // Source context for the protocol buffer service represented by this - // message. - SourceContext source_context = 5; - - // Included interfaces. See [Mixin][]. - repeated Mixin mixins = 6; - - // The source syntax of the service. - Syntax syntax = 7; -} - -// Method represents a method of an API interface. -message Method { - - // The simple name of this method. - string name = 1; - - // A URL of the input message type. - string request_type_url = 2; - - // If true, the request is streamed. - bool request_streaming = 3; - - // The URL of the output message type. - string response_type_url = 4; - - // If true, the response is streamed. - bool response_streaming = 5; - - // Any metadata attached to the method. - repeated Option options = 6; - - // The source syntax of this method. - Syntax syntax = 7; -} - -// Declares an API Interface to be included in this interface. The including -// interface must redeclare all the methods from the included interface, but -// documentation and options are inherited as follows: -// -// - If after comment and whitespace stripping, the documentation -// string of the redeclared method is empty, it will be inherited -// from the original method. -// -// - Each annotation belonging to the service config (http, -// visibility) which is not set in the redeclared method will be -// inherited. -// -// - If an http annotation is inherited, the path pattern will be -// modified as follows. Any version prefix will be replaced by the -// version of the including interface plus the [root][] path if -// specified. -// -// Example of a simple mixin: -// -// package google.acl.v1; -// service AccessControl { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v1/{resource=**}:getAcl"; -// } -// } -// -// package google.storage.v2; -// service Storage { -// rpc GetAcl(GetAclRequest) returns (Acl); -// -// // Get a data record. -// rpc GetData(GetDataRequest) returns (Data) { -// option (google.api.http).get = "/v2/{resource=**}"; -// } -// } -// -// Example of a mixin configuration: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// -// The mixin construct implies that all methods in `AccessControl` are -// also declared with same name and request/response types in -// `Storage`. A documentation generator or annotation processor will -// see the effective `Storage.GetAcl` method after inherting -// documentation and annotations as follows: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v2/{resource=**}:getAcl"; -// } -// ... -// } -// -// Note how the version in the path pattern changed from `v1` to `v2`. -// -// If the `root` field in the mixin is specified, it should be a -// relative path under which inherited HTTP paths are placed. Example: -// -// apis: -// - name: google.storage.v2.Storage -// mixins: -// - name: google.acl.v1.AccessControl -// root: acls -// -// This implies the following inherited HTTP annotation: -// -// service Storage { -// // Get the underlying ACL object. -// rpc GetAcl(GetAclRequest) returns (Acl) { -// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; -// } -// ... -// } -message Mixin { - // The fully qualified name of the interface which is included. - string name = 1; - - // If non-empty specifies a path under which inherited HTTP paths - // are rooted. - string root = 2; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto deleted file mode 100644 index 4a88adf148..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/compiler/plugin.proto +++ /dev/null @@ -1,168 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// -// WARNING: The plugin interface is currently EXPERIMENTAL and is subject to -// change. -// -// protoc (aka the Protocol Compiler) can be extended via plugins. A plugin is -// just a program that reads a CodeGeneratorRequest from stdin and writes a -// CodeGeneratorResponse to stdout. -// -// Plugins written using C++ can use google/protobuf/compiler/plugin.h instead -// of dealing with the raw protocol defined here. -// -// A plugin executable needs only to be placed somewhere in the path. The -// plugin should be named "protoc-gen-$NAME", and will then be used when the -// flag "--${NAME}_out" is passed to protoc. - -syntax = "proto2"; - -package google.protobuf.compiler; -option java_package = "com.google.protobuf.compiler"; -option java_outer_classname = "PluginProtos"; - -option go_package = "plugin_go"; - -import "google/protobuf/descriptor.proto"; - -// The version number of protocol compiler. -message Version { - optional int32 major = 1; - optional int32 minor = 2; - optional int32 patch = 3; - // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should - // be empty for mainline stable releases. - optional string suffix = 4; -} - -// An encoded CodeGeneratorRequest is written to the plugin's stdin. -message CodeGeneratorRequest { - // The .proto files that were explicitly listed on the command-line. The - // code generator should generate code only for these files. Each file's - // descriptor will be included in proto_file, below. - repeated string file_to_generate = 1; - - // The generator parameter passed on the command-line. - optional string parameter = 2; - - // FileDescriptorProtos for all files in files_to_generate and everything - // they import. The files will appear in topological order, so each file - // appears before any file that imports it. - // - // protoc guarantees that all proto_files will be written after - // the fields above, even though this is not technically guaranteed by the - // protobuf wire format. This theoretically could allow a plugin to stream - // in the FileDescriptorProtos and handle them one by one rather than read - // the entire set into memory at once. However, as of this writing, this - // is not similarly optimized on protoc's end -- it will store all fields in - // memory at once before sending them to the plugin. - // - // Type names of fields and extensions in the FileDescriptorProto are always - // fully qualified. - repeated FileDescriptorProto proto_file = 15; - - // The version number of protocol compiler. - optional Version compiler_version = 3; - -} - -// The plugin writes an encoded CodeGeneratorResponse to stdout. -message CodeGeneratorResponse { - // Error message. If non-empty, code generation failed. The plugin process - // should exit with status code zero even if it reports an error in this way. - // - // This should be used to indicate errors in .proto files which prevent the - // code generator from generating correct code. Errors which indicate a - // problem in protoc itself -- such as the input CodeGeneratorRequest being - // unparseable -- should be reported by writing a message to stderr and - // exiting with a non-zero status code. - optional string error = 1; - - // Represents a single generated file. - message File { - // The file name, relative to the output directory. The name must not - // contain "." or ".." components and must be relative, not be absolute (so, - // the file cannot lie outside the output directory). "/" must be used as - // the path separator, not "\". - // - // If the name is omitted, the content will be appended to the previous - // file. This allows the generator to break large files into small chunks, - // and allows the generated text to be streamed back to protoc so that large - // files need not reside completely in memory at one time. Note that as of - // this writing protoc does not optimize for this -- it will read the entire - // CodeGeneratorResponse before writing files to disk. - optional string name = 1; - - // If non-empty, indicates that the named file should already exist, and the - // content here is to be inserted into that file at a defined insertion - // point. This feature allows a code generator to extend the output - // produced by another code generator. The original generator may provide - // insertion points by placing special annotations in the file that look - // like: - // @@protoc_insertion_point(NAME) - // The annotation can have arbitrary text before and after it on the line, - // which allows it to be placed in a comment. NAME should be replaced with - // an identifier naming the point -- this is what other generators will use - // as the insertion_point. Code inserted at this point will be placed - // immediately above the line containing the insertion point (thus multiple - // insertions to the same point will come out in the order they were added). - // The double-@ is intended to make it unlikely that the generated code - // could contain things that look like insertion points by accident. - // - // For example, the C++ code generator places the following line in the - // .pb.h files that it generates: - // // @@protoc_insertion_point(namespace_scope) - // This line appears within the scope of the file's package namespace, but - // outside of any particular class. Another plugin can then specify the - // insertion_point "namespace_scope" to generate additional classes or - // other declarations that should be placed in this scope. - // - // Note that if the line containing the insertion point begins with - // whitespace, the same whitespace will be added to every line of the - // inserted text. This is useful for languages like Python, where - // indentation matters. In these languages, the insertion point comment - // should be indented the same amount as any inserted code will need to be - // in order to work correctly in that context. - // - // The code generator that generates the initial file and the one which - // inserts into it must both run as part of a single invocation of protoc. - // Code generators are executed in the order in which they appear on the - // command line. - // - // If |insertion_point| is present, |name| must also be present. - optional string insertion_point = 2; - - // The file contents. - optional string content = 15; - } - repeated File file = 15; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto deleted file mode 100644 index 4a08905a56..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/descriptor.proto +++ /dev/null @@ -1,885 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Author: kenton@google.com (Kenton Varda) -// Based on original Protocol Buffers design by -// Sanjay Ghemawat, Jeff Dean, and others. -// -// The messages in this file describe the definitions found in .proto files. -// A valid .proto file can be translated directly to a FileDescriptorProto -// without any other information (e.g. without reading its imports). - - -syntax = "proto2"; - -package google.protobuf; - -option go_package = "descriptor"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DescriptorProtos"; -option csharp_namespace = "Google.Protobuf.Reflection"; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// descriptor.proto must be optimized for speed because reflection-based -// algorithms don't work during bootstrapping. -option optimize_for = SPEED; - -// The protocol compiler can output a FileDescriptorSet containing the .proto -// files it parses. -message FileDescriptorSet { - repeated FileDescriptorProto file = 1; -} - -// Describes a complete .proto file. -message FileDescriptorProto { - optional string name = 1; // file name, relative to root of source tree - optional string package = 2; // e.g. "foo", "foo.bar", etc. - - // Names of files imported by this file. - repeated string dependency = 3; - // Indexes of the public imported files in the dependency list above. - repeated int32 public_dependency = 10; - // Indexes of the weak imported files in the dependency list. - // For Google-internal migration only. Do not use. - repeated int32 weak_dependency = 11; - - // All top-level definitions in this file. - repeated DescriptorProto message_type = 4; - repeated EnumDescriptorProto enum_type = 5; - repeated ServiceDescriptorProto service = 6; - repeated FieldDescriptorProto extension = 7; - - optional FileOptions options = 8; - - // This field contains optional information about the original source code. - // You may safely remove this entire field without harming runtime - // functionality of the descriptors -- the information is needed only by - // development tools. - optional SourceCodeInfo source_code_info = 9; - - // The syntax of the proto file. - // The supported values are "proto2" and "proto3". - optional string syntax = 12; -} - -// Describes a message type. -message DescriptorProto { - optional string name = 1; - - repeated FieldDescriptorProto field = 2; - repeated FieldDescriptorProto extension = 6; - - repeated DescriptorProto nested_type = 3; - repeated EnumDescriptorProto enum_type = 4; - - message ExtensionRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - - optional ExtensionRangeOptions options = 3; - } - repeated ExtensionRange extension_range = 5; - - repeated OneofDescriptorProto oneof_decl = 8; - - optional MessageOptions options = 7; - - // Range of reserved tag numbers. Reserved tag numbers may not be used by - // fields or extension ranges in the same message. Reserved ranges may - // not overlap. - message ReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Exclusive. - } - repeated ReservedRange reserved_range = 9; - // Reserved field names, which may not be used by fields in the same message. - // A given name may only be reserved once. - repeated string reserved_name = 10; -} - -message ExtensionRangeOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -// Describes a field within a message. -message FieldDescriptorProto { - enum Type { - // 0 is reserved for errors. - // Order is weird for historical reasons. - TYPE_DOUBLE = 1; - TYPE_FLOAT = 2; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT64 if - // negative values are likely. - TYPE_INT64 = 3; - TYPE_UINT64 = 4; - // Not ZigZag encoded. Negative numbers take 10 bytes. Use TYPE_SINT32 if - // negative values are likely. - TYPE_INT32 = 5; - TYPE_FIXED64 = 6; - TYPE_FIXED32 = 7; - TYPE_BOOL = 8; - TYPE_STRING = 9; - // Tag-delimited aggregate. - // Group type is deprecated and not supported in proto3. However, Proto3 - // implementations should still be able to parse the group wire format and - // treat group fields as unknown fields. - TYPE_GROUP = 10; - TYPE_MESSAGE = 11; // Length-delimited aggregate. - - // New in version 2. - TYPE_BYTES = 12; - TYPE_UINT32 = 13; - TYPE_ENUM = 14; - TYPE_SFIXED32 = 15; - TYPE_SFIXED64 = 16; - TYPE_SINT32 = 17; // Uses ZigZag encoding. - TYPE_SINT64 = 18; // Uses ZigZag encoding. - } - - enum Label { - // 0 is reserved for errors - LABEL_OPTIONAL = 1; - LABEL_REQUIRED = 2; - LABEL_REPEATED = 3; - } - - optional string name = 1; - optional int32 number = 3; - optional Label label = 4; - - // If type_name is set, this need not be set. If both this and type_name - // are set, this must be one of TYPE_ENUM, TYPE_MESSAGE or TYPE_GROUP. - optional Type type = 5; - - // For message and enum types, this is the name of the type. If the name - // starts with a '.', it is fully-qualified. Otherwise, C++-like scoping - // rules are used to find the type (i.e. first the nested types within this - // message are searched, then within the parent, on up to the root - // namespace). - optional string type_name = 6; - - // For extensions, this is the name of the type being extended. It is - // resolved in the same manner as type_name. - optional string extendee = 2; - - // For numeric types, contains the original text representation of the value. - // For booleans, "true" or "false". - // For strings, contains the default text contents (not escaped in any way). - // For bytes, contains the C escaped value. All bytes >= 128 are escaped. - // TODO(kenton): Base-64 encode? - optional string default_value = 7; - - // If set, gives the index of a oneof in the containing type's oneof_decl - // list. This field is a member of that oneof. - optional int32 oneof_index = 9; - - // JSON name of this field. The value is set by protocol compiler. If the - // user has set a "json_name" option on this field, that option's value - // will be used. Otherwise, it's deduced from the field's name by converting - // it to camelCase. - optional string json_name = 10; - - optional FieldOptions options = 8; -} - -// Describes a oneof. -message OneofDescriptorProto { - optional string name = 1; - optional OneofOptions options = 2; -} - -// Describes an enum type. -message EnumDescriptorProto { - optional string name = 1; - - repeated EnumValueDescriptorProto value = 2; - - optional EnumOptions options = 3; - - // Range of reserved numeric values. Reserved values may not be used by - // entries in the same enum. Reserved ranges may not overlap. - // - // Note that this is distinct from DescriptorProto.ReservedRange in that it - // is inclusive such that it can appropriately represent the entire int32 - // domain. - message EnumReservedRange { - optional int32 start = 1; // Inclusive. - optional int32 end = 2; // Inclusive. - } - - // Range of reserved numeric values. Reserved numeric values may not be used - // by enum values in the same enum declaration. Reserved ranges may not - // overlap. - repeated EnumReservedRange reserved_range = 4; - - // Reserved enum value names, which may not be reused. A given name may only - // be reserved once. - repeated string reserved_name = 5; -} - -// Describes a value within an enum. -message EnumValueDescriptorProto { - optional string name = 1; - optional int32 number = 2; - - optional EnumValueOptions options = 3; -} - -// Describes a service. -message ServiceDescriptorProto { - optional string name = 1; - repeated MethodDescriptorProto method = 2; - - optional ServiceOptions options = 3; -} - -// Describes a method of a service. -message MethodDescriptorProto { - optional string name = 1; - - // Input and output type names. These are resolved in the same way as - // FieldDescriptorProto.type_name, but must refer to a message type. - optional string input_type = 2; - optional string output_type = 3; - - optional MethodOptions options = 4; - - // Identifies if client streams multiple client messages - optional bool client_streaming = 5 [default = false]; - // Identifies if server streams multiple server messages - optional bool server_streaming = 6 [default = false]; -} - - -// =================================================================== -// Options - -// Each of the definitions above may have "options" attached. These are -// just annotations which may cause code to be generated slightly differently -// or may contain hints for code that manipulates protocol messages. -// -// Clients may define custom options as extensions of the *Options messages. -// These extensions may not yet be known at parsing time, so the parser cannot -// store the values in them. Instead it stores them in a field in the *Options -// message called uninterpreted_option. This field must have the same name -// across all *Options messages. We then use this field to populate the -// extensions when we build a descriptor, at which point all protos have been -// parsed and so all extensions are known. -// -// Extension numbers for custom options may be chosen as follows: -// * For options which will only be used within a single application or -// organization, or for experimental options, use field numbers 50000 -// through 99999. It is up to you to ensure that you do not use the -// same number for multiple options. -// * For options which will be published and used publicly by multiple -// independent entities, e-mail protobuf-global-extension-registry@google.com -// to reserve extension numbers. Simply provide your project name (e.g. -// Objective-C plugin) and your project website (if available) -- there's no -// need to explain how you intend to use them. Usually you only need one -// extension number. You can declare multiple options with only one extension -// number by putting them in a sub-message. See the Custom Options section of -// the docs for examples: -// https://developers.google.com/protocol-buffers/docs/proto#options -// If this turns out to be popular, a web service will be set up -// to automatically assign option numbers. - -message FileOptions { - - // Sets the Java package where classes generated from this .proto will be - // placed. By default, the proto package is used, but this is often - // inappropriate because proto packages do not normally start with backwards - // domain names. - optional string java_package = 1; - - - // If set, all the classes from the .proto file are wrapped in a single - // outer class with the given name. This applies to both Proto1 - // (equivalent to the old "--one_java_file" option) and Proto2 (where - // a .proto always translates to a single class, but you may want to - // explicitly choose the class name). - optional string java_outer_classname = 8; - - // If set true, then the Java code generator will generate a separate .java - // file for each top-level message, enum, and service defined in the .proto - // file. Thus, these types will *not* be nested inside the outer class - // named by java_outer_classname. However, the outer class will still be - // generated to contain the file's getDescriptor() method as well as any - // top-level extensions defined in the file. - optional bool java_multiple_files = 10 [default = false]; - - // This option does nothing. - optional bool java_generate_equals_and_hash = 20 [deprecated=true]; - - // If set true, then the Java2 code generator will generate code that - // throws an exception whenever an attempt is made to assign a non-UTF-8 - // byte sequence to a string field. - // Message reflection will do the same. - // However, an extension field still accepts non-UTF-8 byte sequences. - // This option has no effect on when used with the lite runtime. - optional bool java_string_check_utf8 = 27 [default = false]; - - - // Generated classes can be optimized for speed or code size. - enum OptimizeMode { - SPEED = 1; // Generate complete code for parsing, serialization, - // etc. - CODE_SIZE = 2; // Use ReflectionOps to implement these methods. - LITE_RUNTIME = 3; // Generate code using MessageLite and the lite runtime. - } - optional OptimizeMode optimize_for = 9 [default = SPEED]; - - // Sets the Go package where structs generated from this .proto will be - // placed. If omitted, the Go package will be derived from the following: - // - The basename of the package import path, if provided. - // - Otherwise, the package statement in the .proto file, if present. - // - Otherwise, the basename of the .proto file, without extension. - optional string go_package = 11; - - - - - // Should generic services be generated in each language? "Generic" services - // are not specific to any particular RPC system. They are generated by the - // main code generators in each language (without additional plugins). - // Generic services were the only kind of service generation supported by - // early versions of google.protobuf. - // - // Generic services are now considered deprecated in favor of using plugins - // that generate code specific to your particular RPC system. Therefore, - // these default to false. Old code which depends on generic services should - // explicitly set them to true. - optional bool cc_generic_services = 16 [default = false]; - optional bool java_generic_services = 17 [default = false]; - optional bool py_generic_services = 18 [default = false]; - optional bool php_generic_services = 42 [default = false]; - - // Is this file deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for everything in the file, or it will be completely ignored; in the very - // least, this is a formalization for deprecating files. - optional bool deprecated = 23 [default = false]; - - // Enables the use of arenas for the proto messages in this file. This applies - // only to generated classes for C++. - optional bool cc_enable_arenas = 31 [default = false]; - - - // Sets the objective c class prefix which is prepended to all objective c - // generated classes from this .proto. There is no default. - optional string objc_class_prefix = 36; - - // Namespace for generated classes; defaults to the package. - optional string csharp_namespace = 37; - - // By default Swift generators will take the proto package and CamelCase it - // replacing '.' with underscore and use that to prefix the types/symbols - // defined. When this options is provided, they will use this value instead - // to prefix the types/symbols defined. - optional string swift_prefix = 39; - - // Sets the php class prefix which is prepended to all php generated classes - // from this .proto. Default is empty. - optional string php_class_prefix = 40; - - // Use this option to change the namespace of php generated classes. Default - // is empty. When this option is empty, the package name will be used for - // determining the namespace. - optional string php_namespace = 41; - - // Use this option to change the namespace of php generated metadata classes. - // Default is empty. When this option is empty, the proto file name will be - // used for determining the namespace. - optional string php_metadata_namespace = 44; - - // Use this option to change the package of ruby generated classes. Default - // is empty. When this option is not set, the package name will be used for - // determining the ruby package. - optional string ruby_package = 45; - - - // The parser stores options it doesn't recognize here. - // See the documentation for the "Options" section above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. - // See the documentation for the "Options" section above. - extensions 1000 to max; - - //reserved 38; -} - -message MessageOptions { - // Set true to use the old proto1 MessageSet wire format for extensions. - // This is provided for backwards-compatibility with the MessageSet wire - // format. You should not use this for any other reason: It's less - // efficient, has fewer features, and is more complicated. - // - // The message must be defined exactly as follows: - // message Foo { - // option message_set_wire_format = true; - // extensions 4 to max; - // } - // Note that the message cannot have any defined fields; MessageSets only - // have extensions. - // - // All extensions of your type must be singular messages; e.g. they cannot - // be int32s, enums, or repeated messages. - // - // Because this is an option, the above two restrictions are not enforced by - // the protocol compiler. - optional bool message_set_wire_format = 1 [default = false]; - - // Disables the generation of the standard "descriptor()" accessor, which can - // conflict with a field of the same name. This is meant to make migration - // from proto1 easier; new code should avoid fields named "descriptor". - optional bool no_standard_descriptor_accessor = 2 [default = false]; - - // Is this message deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the message, or it will be completely ignored; in the very least, - // this is a formalization for deprecating messages. - optional bool deprecated = 3 [default = false]; - - // Whether the message is an automatically generated map entry type for the - // maps field. - // - // For maps fields: - // map map_field = 1; - // The parsed descriptor looks like: - // message MapFieldEntry { - // option map_entry = true; - // optional KeyType key = 1; - // optional ValueType value = 2; - // } - // repeated MapFieldEntry map_field = 1; - // - // Implementations may choose not to generate the map_entry=true message, but - // use a native map in the target language to hold the keys and values. - // The reflection APIs in such implementations still need to work as - // if the field is a repeated message field. - // - // NOTE: Do not set the option in .proto files. Always use the maps syntax - // instead. The option should only be implicitly set by the proto compiler - // parser. - optional bool map_entry = 7; - - //reserved 8; // javalite_serializable - //reserved 9; // javanano_as_lite - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message FieldOptions { - // The ctype option instructs the C++ code generator to use a different - // representation of the field than it normally would. See the specific - // options below. This option is not yet implemented in the open source - // release -- sorry, we'll try to include it in a future version! - optional CType ctype = 1 [default = STRING]; - enum CType { - // Default mode. - STRING = 0; - - CORD = 1; - - STRING_PIECE = 2; - } - // The packed option can be enabled for repeated primitive fields to enable - // a more efficient representation on the wire. Rather than repeatedly - // writing the tag and type for each element, the entire array is encoded as - // a single length-delimited blob. In proto3, only explicit setting it to - // false will avoid using packed encoding. - optional bool packed = 2; - - // The jstype option determines the JavaScript type used for values of the - // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING - // is represented as JavaScript string, which avoids loss of precision that - // can happen when a large value is converted to a floating point JavaScript. - // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to - // use the JavaScript "number" type. The behavior of the default option - // JS_NORMAL is implementation dependent. - // - // This option is an enum to permit additional types to be added, e.g. - // goog.math.Integer. - optional JSType jstype = 6 [default = JS_NORMAL]; - enum JSType { - // Use the default type. - JS_NORMAL = 0; - - // Use JavaScript strings. - JS_STRING = 1; - - // Use JavaScript numbers. - JS_NUMBER = 2; - } - - // Should this field be parsed lazily? Lazy applies only to message-type - // fields. It means that when the outer message is initially parsed, the - // inner message's contents will not be parsed but instead stored in encoded - // form. The inner message will actually be parsed when it is first accessed. - // - // This is only a hint. Implementations are free to choose whether to use - // eager or lazy parsing regardless of the value of this option. However, - // setting this option true suggests that the protocol author believes that - // using lazy parsing on this field is worth the additional bookkeeping - // overhead typically needed to implement it. - // - // This option does not affect the public interface of any generated code; - // all method signatures remain the same. Furthermore, thread-safety of the - // interface is not affected by this option; const methods remain safe to - // call from multiple threads concurrently, while non-const methods continue - // to require exclusive access. - // - // - // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outer message - // may return true even if the inner message has missing required fields. - // This is necessary because otherwise the inner message would have to be - // parsed in order to perform the check, defeating the purpose of lazy - // parsing. An implementation which chooses not to check required fields - // must be consistent about it. That is, for any particular sub-message, the - // implementation must either *always* check its required fields, or *never* - // check its required fields, regardless of whether or not the message has - // been parsed. - optional bool lazy = 5 [default = false]; - - // Is this field deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for accessors, or it will be completely ignored; in the very least, this - // is a formalization for deprecating fields. - optional bool deprecated = 3 [default = false]; - - // For Google-internal migration only. Do not use. - optional bool weak = 10 [default = false]; - - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; - - //reserved 4; // removed jtype -} - -message OneofOptions { - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumOptions { - - // Set this option to true to allow mapping different tag names to the same - // value. - optional bool allow_alias = 2; - - // Is this enum deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum, or it will be completely ignored; in the very least, this - // is a formalization for deprecating enums. - optional bool deprecated = 3 [default = false]; - - //reserved 5; // javanano_as_lite - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message EnumValueOptions { - // Is this enum value deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the enum value, or it will be completely ignored; in the very least, - // this is a formalization for deprecating enum values. - optional bool deprecated = 1 [default = false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message ServiceOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this service deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the service, or it will be completely ignored; in the very least, - // this is a formalization for deprecating services. - optional bool deprecated = 33 [default = false]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - -message MethodOptions { - - // Note: Field numbers 1 through 32 are reserved for Google's internal RPC - // framework. We apologize for hoarding these numbers to ourselves, but - // we were already using them long before we decided to release Protocol - // Buffers. - - // Is this method deprecated? - // Depending on the target platform, this can emit Deprecated annotations - // for the method, or it will be completely ignored; in the very least, - // this is a formalization for deprecating methods. - optional bool deprecated = 33 [default = false]; - - // Is this method side-effect-free (or safe in HTTP parlance), or idempotent, - // or neither? HTTP based RPC implementation may choose GET verb for safe - // methods, and PUT verb for idempotent methods instead of the default POST. - enum IdempotencyLevel { - IDEMPOTENCY_UNKNOWN = 0; - NO_SIDE_EFFECTS = 1; // implies idempotent - IDEMPOTENT = 2; // idempotent, but may have side effects - } - optional IdempotencyLevel idempotency_level = 34 - [default = IDEMPOTENCY_UNKNOWN]; - - // The parser stores options it doesn't recognize here. See above. - repeated UninterpretedOption uninterpreted_option = 999; - - // Clients can define custom options in extensions of this message. See above. - extensions 1000 to max; -} - - -// A message representing a option the parser does not recognize. This only -// appears in options protos created by the compiler::Parser class. -// DescriptorPool resolves these when building Descriptor objects. Therefore, -// options protos in descriptor objects (e.g. returned by Descriptor::options(), -// or produced by Descriptor::CopyTo()) will never have UninterpretedOptions -// in them. -message UninterpretedOption { - // The name of the uninterpreted option. Each string represents a segment in - // a dot-separated name. is_extension is true iff a segment represents an - // extension (denoted with parentheses in options specs in .proto files). - // E.g.,{ ["foo", false], ["bar.baz", true], ["qux", false] } represents - // "foo.(bar.baz).qux". - message NamePart { - required string name_part = 1; - required bool is_extension = 2; - } - repeated NamePart name = 2; - - // The value of the uninterpreted option, in whatever type the tokenizer - // identified it as during parsing. Exactly one of these should be set. - optional string identifier_value = 3; - optional uint64 positive_int_value = 4; - optional int64 negative_int_value = 5; - optional double double_value = 6; - optional bytes string_value = 7; - optional string aggregate_value = 8; -} - -// =================================================================== -// Optional source code info - -// Encapsulates information about the original source file from which a -// FileDescriptorProto was generated. -message SourceCodeInfo { - // A Location identifies a piece of source code in a .proto file which - // corresponds to a particular definition. This information is intended - // to be useful to IDEs, code indexers, documentation generators, and similar - // tools. - // - // For example, say we have a file like: - // message Foo { - // optional string foo = 1; - // } - // Let's look at just the field definition: - // optional string foo = 1; - // ^ ^^ ^^ ^ ^^^ - // a bc de f ghi - // We have the following locations: - // span path represents - // [a,i) [ 4, 0, 2, 0 ] The whole field definition. - // [a,b) [ 4, 0, 2, 0, 4 ] The label (optional). - // [c,d) [ 4, 0, 2, 0, 5 ] The type (string). - // [e,f) [ 4, 0, 2, 0, 1 ] The name (foo). - // [g,h) [ 4, 0, 2, 0, 3 ] The number (1). - // - // Notes: - // - A location may refer to a repeated field itself (i.e. not to any - // particular index within it). This is used whenever a set of elements are - // logically enclosed in a single code segment. For example, an entire - // extend block (possibly containing multiple extension definitions) will - // have an outer location whose path refers to the "extensions" repeated - // field without an index. - // - Multiple locations may have the same path. This happens when a single - // logical declaration is spread out across multiple places. The most - // obvious example is the "extend" block again -- there may be multiple - // extend blocks in the same scope, each of which will have the same path. - // - A location's span is not always a subset of its parent's span. For - // example, the "extendee" of an extension declaration appears at the - // beginning of the "extend" block and is shared by all extensions within - // the block. - // - Just because a location's span is a subset of some other location's span - // does not mean that it is a descendant. For example, a "group" defines - // both a type and a field in a single declaration. Thus, the locations - // corresponding to the type and field and their components will overlap. - // - Code which tries to interpret locations should probably be designed to - // ignore those that it doesn't understand, as more types of locations could - // be recorded in the future. - repeated Location location = 1; - message Location { - // Identifies which part of the FileDescriptorProto was defined at this - // location. - // - // Each element is a field number or an index. They form a path from - // the root FileDescriptorProto to the place where the definition. For - // example, this path: - // [ 4, 3, 2, 7, 1 ] - // refers to: - // file.message_type(3) // 4, 3 - // .field(7) // 2, 7 - // .name() // 1 - // This is because FileDescriptorProto.message_type has field number 4: - // repeated DescriptorProto message_type = 4; - // and DescriptorProto.field has field number 2: - // repeated FieldDescriptorProto field = 2; - // and FieldDescriptorProto.name has field number 1: - // optional string name = 1; - // - // Thus, the above path gives the location of a field name. If we removed - // the last element: - // [ 4, 3, 2, 7 ] - // this path refers to the whole field declaration (from the beginning - // of the label to the terminating semicolon). - repeated int32 path = 1 [packed = true]; - - // Always has exactly three or four elements: start line, start column, - // end line (optional, otherwise assumed same as start line), end column. - // These are packed into a single field for efficiency. Note that line - // and column numbers are zero-based -- typically you will want to add - // 1 to each before displaying to a user. - repeated int32 span = 2 [packed = true]; - - // If this SourceCodeInfo represents a complete declaration, these are any - // comments appearing before and after the declaration which appear to be - // attached to the declaration. - // - // A series of line comments appearing on consecutive lines, with no other - // tokens appearing on those lines, will be treated as a single comment. - // - // leading_detached_comments will keep paragraphs of comments that appear - // before (but not connected to) the current element. Each paragraph, - // separated by empty lines, will be one comment element in the repeated - // field. - // - // Only the comment content is provided; comment markers (e.g. //) are - // stripped out. For block comments, leading whitespace and an asterisk - // will be stripped from the beginning of each line other than the first. - // Newlines are included in the output. - // - // Examples: - // - // optional int32 foo = 1; // Comment attached to foo. - // // Comment attached to bar. - // optional int32 bar = 2; - // - // optional string baz = 3; - // // Comment attached to baz. - // // Another line attached to baz. - // - // // Comment attached to qux. - // // - // // Another line attached to qux. - // optional double qux = 4; - // - // // Detached comment for corge. This is not leading or trailing comments - // // to qux or corge because there are blank lines separating it from - // // both. - // - // // Detached comment for corge paragraph 2. - // - // optional string corge = 5; - // /* Block comment attached - // * to corge. Leading asterisks - // * will be removed. */ - // /* Block comment attached to - // * grault. */ - // optional int32 grault = 6; - // - // // ignored detached comments. - optional string leading_comments = 3; - optional string trailing_comments = 4; - repeated string leading_detached_comments = 6; - } -} - -// Describes the relationship between generated code and its original source -// file. A GeneratedCodeInfo message is associated with only one generated -// source file, but may contain references to different source .proto files. -message GeneratedCodeInfo { - // An Annotation connects some span of text in generated code to an element - // of its generating .proto file. - repeated Annotation annotation = 1; - message Annotation { - // Identifies the element in the original source .proto file. This field - // is formatted the same as SourceCodeInfo.Location.path. - repeated int32 path = 1 [packed = true]; - - // Identifies the filesystem path to the original source .proto. - optional string source_file = 2; - - // Identifies the starting offset in bytes in the generated code - // that relates to the identified object. - optional int32 begin = 3; - - // Identifies the ending offset in bytes in the generated code that - // relates to the identified offset. The end offset should be one past - // the last relevant byte (so the length of the text = end - begin). - optional int32 end = 4; - } -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto deleted file mode 100644 index b14bea5d01..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/duration.proto +++ /dev/null @@ -1,116 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "types"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "DurationProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Duration represents a signed, fixed-length span of time represented -// as a count of seconds and fractions of seconds at nanosecond -// resolution. It is independent of any calendar and concepts like "day" -// or "month". It is related to Timestamp in that the difference between -// two Timestamp values is a Duration and it can be added or subtracted -// from a Timestamp. Range is approximately +-10,000 years. -// -// # Examples -// -// Example 1: Compute Duration from two Timestamps in pseudo code. -// -// Timestamp start = ...; -// Timestamp end = ...; -// Duration duration = ...; -// -// duration.seconds = end.seconds - start.seconds; -// duration.nanos = end.nanos - start.nanos; -// -// if (duration.seconds < 0 && duration.nanos > 0) { -// duration.seconds += 1; -// duration.nanos -= 1000000000; -// } else if (durations.seconds > 0 && duration.nanos < 0) { -// duration.seconds -= 1; -// duration.nanos += 1000000000; -// } -// -// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. -// -// Timestamp start = ...; -// Duration duration = ...; -// Timestamp end = ...; -// -// end.seconds = start.seconds + duration.seconds; -// end.nanos = start.nanos + duration.nanos; -// -// if (end.nanos < 0) { -// end.seconds -= 1; -// end.nanos += 1000000000; -// } else if (end.nanos >= 1000000000) { -// end.seconds += 1; -// end.nanos -= 1000000000; -// } -// -// Example 3: Compute Duration from datetime.timedelta in Python. -// -// td = datetime.timedelta(days=3, minutes=10) -// duration = Duration() -// duration.FromTimedelta(td) -// -// # JSON Mapping -// -// In JSON format, the Duration type is encoded as a string rather than an -// object, where the string ends in the suffix "s" (indicating seconds) and -// is preceded by the number of seconds, with nanoseconds expressed as -// fractional seconds. For example, 3 seconds with 0 nanoseconds should be -// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should -// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 -// microsecond should be expressed in JSON format as "3.000001s". -// -// -message Duration { - // Signed seconds of the span of time. Must be from -315,576,000,000 - // to +315,576,000,000 inclusive. Note: these bounds are computed from: - // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years - int64 seconds = 1; - - // Signed fractions of a second at nanosecond resolution of the span - // of time. Durations less than one second are represented with a 0 - // `seconds` field and a positive or negative `nanos` field. For durations - // of one second or more, a non-zero value for the `nanos` field must be - // of the same sign as the `seconds` field. Must be from -999,999,999 - // to +999,999,999 inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto deleted file mode 100644 index 6057c8522d..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/empty.proto +++ /dev/null @@ -1,52 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option go_package = "types"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "EmptyProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option cc_enable_arenas = true; - -// A generic empty message that you can re-use to avoid defining duplicated -// empty messages in your APIs. A typical example is to use it as the request -// or the response type of an API method. For instance: -// -// service Foo { -// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); -// } -// -// The JSON representation for `Empty` is empty JSON object `{}`. -message Empty {} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto deleted file mode 100644 index 7b77007b7e..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/field_mask.proto +++ /dev/null @@ -1,245 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "FieldMaskProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option go_package = "types"; -option cc_enable_arenas = true; - -// `FieldMask` represents a set of symbolic field paths, for example: -// -// paths: "f.a" -// paths: "f.b.d" -// -// Here `f` represents a field in some root message, `a` and `b` -// fields in the message found in `f`, and `d` a field found in the -// message in `f.b`. -// -// Field masks are used to specify a subset of fields that should be -// returned by a get operation or modified by an update operation. -// Field masks also have a custom JSON encoding (see below). -// -// # Field Masks in Projections -// -// When used in the context of a projection, a response message or -// sub-message is filtered by the API to only contain those fields as -// specified in the mask. For example, if the mask in the previous -// example is applied to a response message as follows: -// -// f { -// a : 22 -// b { -// d : 1 -// x : 2 -// } -// y : 13 -// } -// z: 8 -// -// The result will not contain specific values for fields x,y and z -// (their value will be set to the default, and omitted in proto text -// output): -// -// -// f { -// a : 22 -// b { -// d : 1 -// } -// } -// -// A repeated field is not allowed except at the last position of a -// paths string. -// -// If a FieldMask object is not present in a get operation, the -// operation applies to all fields (as if a FieldMask of all fields -// had been specified). -// -// Note that a field mask does not necessarily apply to the -// top-level response message. In case of a REST get operation, the -// field mask applies directly to the response, but in case of a REST -// list operation, the mask instead applies to each individual message -// in the returned resource list. In case of a REST custom method, -// other definitions may be used. Where the mask applies will be -// clearly documented together with its declaration in the API. In -// any case, the effect on the returned resource/resources is required -// behavior for APIs. -// -// # Field Masks in Update Operations -// -// A field mask in update operations specifies which fields of the -// targeted resource are going to be updated. The API is required -// to only change the values of the fields as specified in the mask -// and leave the others untouched. If a resource is passed in to -// describe the updated values, the API ignores the values of all -// fields not covered by the mask. -// -// If a repeated field is specified for an update operation, new values will -// be appended to the existing repeated field in the target resource. Note that -// a repeated field is only allowed in the last position of a `paths` string. -// -// If a sub-message is specified in the last position of the field mask for an -// update operation, then new value will be merged into the existing sub-message -// in the target resource. -// -// For example, given the target message: -// -// f { -// b { -// d: 1 -// x: 2 -// } -// c: [1] -// } -// -// And an update message: -// -// f { -// b { -// d: 10 -// } -// c: [2] -// } -// -// then if the field mask is: -// -// paths: ["f.b", "f.c"] -// -// then the result will be: -// -// f { -// b { -// d: 10 -// x: 2 -// } -// c: [1, 2] -// } -// -// An implementation may provide options to override this default behavior for -// repeated and message fields. -// -// In order to reset a field's value to the default, the field must -// be in the mask and set to the default value in the provided resource. -// Hence, in order to reset all fields of a resource, provide a default -// instance of the resource and set all fields in the mask, or do -// not provide a mask as described below. -// -// If a field mask is not present on update, the operation applies to -// all fields (as if a field mask of all fields has been specified). -// Note that in the presence of schema evolution, this may mean that -// fields the client does not know and has therefore not filled into -// the request will be reset to their default. If this is unwanted -// behavior, a specific service may require a client to always specify -// a field mask, producing an error if not. -// -// As with get operations, the location of the resource which -// describes the updated values in the request message depends on the -// operation kind. In any case, the effect of the field mask is -// required to be honored by the API. -// -// ## Considerations for HTTP REST -// -// The HTTP kind of an update operation which uses a field mask must -// be set to PATCH instead of PUT in order to satisfy HTTP semantics -// (PUT must only be used for full updates). -// -// # JSON Encoding of Field Masks -// -// In JSON, a field mask is encoded as a single string where paths are -// separated by a comma. Fields name in each path are converted -// to/from lower-camel naming conventions. -// -// As an example, consider the following message declarations: -// -// message Profile { -// User user = 1; -// Photo photo = 2; -// } -// message User { -// string display_name = 1; -// string address = 2; -// } -// -// In proto a field mask for `Profile` may look as such: -// -// mask { -// paths: "user.display_name" -// paths: "photo" -// } -// -// In JSON, the same mask is represented as below: -// -// { -// mask: "user.displayName,photo" -// } -// -// # Field Masks and Oneof Fields -// -// Field masks treat fields in oneofs just as regular fields. Consider the -// following message: -// -// message SampleMessage { -// oneof test_oneof { -// string name = 4; -// SubMessage sub_message = 9; -// } -// } -// -// The field mask can be: -// -// mask { -// paths: "name" -// } -// -// Or: -// -// mask { -// paths: "sub_message" -// } -// -// Note that oneof type names ("test_oneof" in this case) cannot be used in -// paths. -// -// ## Field Mask Verification -// -// The implementation of any API method which has a FieldMask type field in the -// request should verify the included field paths, and return an -// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. -message FieldMask { - // The set of field mask paths. - repeated string paths = 1; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/source_context.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/source_context.proto deleted file mode 100644 index 8654578c77..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/source_context.proto +++ /dev/null @@ -1,48 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "SourceContextProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option go_package = "types"; - -// `SourceContext` represents information about the source of a -// protobuf element, like the file in which it is defined. -message SourceContext { - // The path-qualified name of the .proto file that contained the associated - // protobuf element. For example: `"google/protobuf/source_context.proto"`. - string file_name = 1; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto deleted file mode 100644 index 9db0771592..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/struct.proto +++ /dev/null @@ -1,95 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "types"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "StructProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// `Struct` represents a structured data value, consisting of fields -// which map to dynamically typed values. In some languages, `Struct` -// might be supported by a native representation. For example, in -// scripting languages like JS a struct is represented as an -// object. The details of that representation are described together -// with the proto support for the language. -// -// The JSON representation for `Struct` is JSON object. -message Struct { - // Unordered map of dynamically typed values. - map fields = 1; -} - -// `Value` represents a dynamically typed value which can be either -// null, a number, a string, a boolean, a recursive struct value, or a -// list of values. A producer of value is expected to set one of that -// variants, absence of any variant indicates an error. -// -// The JSON representation for `Value` is JSON value. -message Value { - // The kind of value. - oneof kind { - // Represents a null value. - NullValue null_value = 1; - // Represents a double value. - double number_value = 2; - // Represents a string value. - string string_value = 3; - // Represents a boolean value. - bool bool_value = 4; - // Represents a structured value. - Struct struct_value = 5; - // Represents a repeated `Value`. - ListValue list_value = 6; - } -} - -// `NullValue` is a singleton enumeration to represent the null value for the -// `Value` type union. -// -// The JSON representation for `NullValue` is JSON `null`. -enum NullValue { - // Null value. - NULL_VALUE = 0; -} - -// `ListValue` is a wrapper around a repeated field of values. -// -// The JSON representation for `ListValue` is JSON array. -message ListValue { - // Repeated field of dynamically typed values. - repeated Value values = 1; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto deleted file mode 100644 index 0ebe36ea73..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/timestamp.proto +++ /dev/null @@ -1,138 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "types"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TimestampProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// A Timestamp represents a point in time independent of any time zone or local -// calendar, encoded as a count of seconds and fractions of seconds at -// nanosecond resolution. The count is relative to an epoch at UTC midnight on -// January 1, 1970, in the proleptic Gregorian calendar which extends the -// Gregorian calendar backwards to year one. -// -// All minutes are 60 seconds long. Leap seconds are "smeared" so that no leap -// second table is needed for interpretation, using a [24-hour linear -// smear](https://developers.google.com/time/smear). -// -// The range is from 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. By -// restricting to that range, we ensure that we can convert to and from [RFC -// 3339](https://www.ietf.org/rfc/rfc3339.txt) date strings. -// -// # Examples -// -// Example 1: Compute Timestamp from POSIX `time()`. -// -// Timestamp timestamp; -// timestamp.set_seconds(time(NULL)); -// timestamp.set_nanos(0); -// -// Example 2: Compute Timestamp from POSIX `gettimeofday()`. -// -// struct timeval tv; -// gettimeofday(&tv, NULL); -// -// Timestamp timestamp; -// timestamp.set_seconds(tv.tv_sec); -// timestamp.set_nanos(tv.tv_usec * 1000); -// -// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. -// -// FILETIME ft; -// GetSystemTimeAsFileTime(&ft); -// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; -// -// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z -// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. -// Timestamp timestamp; -// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); -// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); -// -// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. -// -// long millis = System.currentTimeMillis(); -// -// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) -// .setNanos((int) ((millis % 1000) * 1000000)).build(); -// -// -// Example 5: Compute Timestamp from current time in Python. -// -// timestamp = Timestamp() -// timestamp.GetCurrentTime() -// -// # JSON Mapping -// -// In JSON format, the Timestamp type is encoded as a string in the -// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the -// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" -// where {year} is always expressed using four digits while {month}, {day}, -// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional -// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), -// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone -// is required. A proto3 JSON serializer should always use UTC (as indicated by -// "Z") when printing the Timestamp type and a proto3 JSON parser should be -// able to accept both UTC and other timezones (as indicated by an offset). -// -// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past -// 01:30 UTC on January 15, 2017. -// -// In JavaScript, one can convert a Date object to this format using the -// standard -// [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString) -// method. In Python, a standard `datetime.datetime` object can be converted -// to this format using -// [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) with -// the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one can use -// the Joda Time's [`ISODateTimeFormat.dateTime()`]( -// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime%2D%2D -// ) to obtain a formatter capable of generating timestamps in this format. -// -// -message Timestamp { - // Represents seconds of UTC time since Unix epoch - // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to - // 9999-12-31T23:59:59Z inclusive. - int64 seconds = 1; - - // Non-negative fractions of a second at nanosecond resolution. Negative - // second values with fractions must still have non-negative nanos values - // that count forward in time. Must be from 0 to 999,999,999 - // inclusive. - int32 nanos = 2; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto deleted file mode 100644 index cc626250de..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/type.proto +++ /dev/null @@ -1,187 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -syntax = "proto3"; - -package google.protobuf; - -import "google/protobuf/any.proto"; -import "google/protobuf/source_context.proto"; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option java_package = "com.google.protobuf"; -option java_outer_classname = "TypeProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; -option go_package = "types"; - -// A protocol buffer message type. -message Type { - // The fully qualified message name. - string name = 1; - // The list of fields. - repeated Field fields = 2; - // The list of types appearing in `oneof` definitions in this type. - repeated string oneofs = 3; - // The protocol buffer options. - repeated Option options = 4; - // The source context. - SourceContext source_context = 5; - // The source syntax. - Syntax syntax = 6; -} - -// A single field of a message type. -message Field { - // Basic field types. - enum Kind { - // Field type unknown. - TYPE_UNKNOWN = 0; - // Field type double. - TYPE_DOUBLE = 1; - // Field type float. - TYPE_FLOAT = 2; - // Field type int64. - TYPE_INT64 = 3; - // Field type uint64. - TYPE_UINT64 = 4; - // Field type int32. - TYPE_INT32 = 5; - // Field type fixed64. - TYPE_FIXED64 = 6; - // Field type fixed32. - TYPE_FIXED32 = 7; - // Field type bool. - TYPE_BOOL = 8; - // Field type string. - TYPE_STRING = 9; - // Field type group. Proto2 syntax only, and deprecated. - TYPE_GROUP = 10; - // Field type message. - TYPE_MESSAGE = 11; - // Field type bytes. - TYPE_BYTES = 12; - // Field type uint32. - TYPE_UINT32 = 13; - // Field type enum. - TYPE_ENUM = 14; - // Field type sfixed32. - TYPE_SFIXED32 = 15; - // Field type sfixed64. - TYPE_SFIXED64 = 16; - // Field type sint32. - TYPE_SINT32 = 17; - // Field type sint64. - TYPE_SINT64 = 18; - } - - // Whether a field is optional, required, or repeated. - enum Cardinality { - // For fields with unknown cardinality. - CARDINALITY_UNKNOWN = 0; - // For optional fields. - CARDINALITY_OPTIONAL = 1; - // For required fields. Proto2 syntax only. - CARDINALITY_REQUIRED = 2; - // For repeated fields. - CARDINALITY_REPEATED = 3; - }; - - // The field type. - Kind kind = 1; - // The field cardinality. - Cardinality cardinality = 2; - // The field number. - int32 number = 3; - // The field name. - string name = 4; - // The field type URL, without the scheme, for message or enumeration - // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. - string type_url = 6; - // The index of the field type in `Type.oneofs`, for message or enumeration - // types. The first type has index 1; zero means the type is not in the list. - int32 oneof_index = 7; - // Whether to use alternative packed wire representation. - bool packed = 8; - // The protocol buffer options. - repeated Option options = 9; - // The field JSON name. - string json_name = 10; - // The string value of the default value of this field. Proto2 syntax only. - string default_value = 11; -} - -// Enum type definition. -message Enum { - // Enum type name. - string name = 1; - // Enum value definitions. - repeated EnumValue enumvalue = 2; - // Protocol buffer options. - repeated Option options = 3; - // The source context. - SourceContext source_context = 4; - // The source syntax. - Syntax syntax = 5; -} - -// Enum value definition. -message EnumValue { - // Enum value name. - string name = 1; - // Enum value number. - int32 number = 2; - // Protocol buffer options. - repeated Option options = 3; -} - -// A protocol buffer option, which can be attached to a message, field, -// enumeration, etc. -message Option { - // The option's name. For protobuf built-in options (options defined in - // descriptor.proto), this is the short name. For example, `"map_entry"`. - // For custom options, it should be the fully-qualified name. For example, - // `"google.api.http"`. - string name = 1; - // The option's value packed in an Any message. If the value is a primitive, - // the corresponding wrapper type defined in google/protobuf/wrappers.proto - // should be used. If the value is an enum, it should be stored as an int32 - // value using the google.protobuf.Int32Value type. - Any value = 2; -} - -// The syntax in which a protocol buffer element is defined. -enum Syntax { - // Syntax `proto2`. - SYNTAX_PROTO2 = 0; - // Syntax `proto3`. - SYNTAX_PROTO3 = 1; -} diff --git a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto b/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto deleted file mode 100644 index 59b76acde8..0000000000 --- a/vendor/github.com/gogo/protobuf/protobuf/google/protobuf/wrappers.proto +++ /dev/null @@ -1,123 +0,0 @@ -// Protocol Buffers - Google's data interchange format -// Copyright 2008 Google Inc. All rights reserved. -// https://developers.google.com/protocol-buffers/ -// -// Redistribution and use in source and binary forms, with or without -// modification, are permitted provided that the following conditions are -// met: -// -// * Redistributions of source code must retain the above copyright -// notice, this list of conditions and the following disclaimer. -// * Redistributions in binary form must reproduce the above -// copyright notice, this list of conditions and the following disclaimer -// in the documentation and/or other materials provided with the -// distribution. -// * Neither the name of Google Inc. nor the names of its -// contributors may be used to endorse or promote products derived from -// this software without specific prior written permission. -// -// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - -// Wrappers for primitive (non-message) types. These types are useful -// for embedding primitives in the `google.protobuf.Any` type and for places -// where we need to distinguish between the absence of a primitive -// typed field and its default value. -// -// These wrappers have no meaningful use within repeated fields as they lack -// the ability to detect presence on individual elements. -// These wrappers have no meaningful use within a map or a oneof since -// individual entries of a map or fields of a oneof can already detect presence. - -syntax = "proto3"; - -package google.protobuf; - -option csharp_namespace = "Google.Protobuf.WellKnownTypes"; -option cc_enable_arenas = true; -option go_package = "types"; -option java_package = "com.google.protobuf"; -option java_outer_classname = "WrappersProto"; -option java_multiple_files = true; -option objc_class_prefix = "GPB"; - -// Wrapper message for `double`. -// -// The JSON representation for `DoubleValue` is JSON number. -message DoubleValue { - // The double value. - double value = 1; -} - -// Wrapper message for `float`. -// -// The JSON representation for `FloatValue` is JSON number. -message FloatValue { - // The float value. - float value = 1; -} - -// Wrapper message for `int64`. -// -// The JSON representation for `Int64Value` is JSON string. -message Int64Value { - // The int64 value. - int64 value = 1; -} - -// Wrapper message for `uint64`. -// -// The JSON representation for `UInt64Value` is JSON string. -message UInt64Value { - // The uint64 value. - uint64 value = 1; -} - -// Wrapper message for `int32`. -// -// The JSON representation for `Int32Value` is JSON number. -message Int32Value { - // The int32 value. - int32 value = 1; -} - -// Wrapper message for `uint32`. -// -// The JSON representation for `UInt32Value` is JSON number. -message UInt32Value { - // The uint32 value. - uint32 value = 1; -} - -// Wrapper message for `bool`. -// -// The JSON representation for `BoolValue` is JSON `true` and `false`. -message BoolValue { - // The bool value. - bool value = 1; -} - -// Wrapper message for `string`. -// -// The JSON representation for `StringValue` is JSON string. -message StringValue { - // The string value. - string value = 1; -} - -// Wrapper message for `bytes`. -// -// The JSON representation for `BytesValue` is JSON string. -message BytesValue { - // The bytes value. - bytes value = 1; -} diff --git a/vendor/github.com/pkg/errors/.gitignore b/vendor/github.com/pkg/errors/.gitignore deleted file mode 100644 index daf913b1b3..0000000000 --- a/vendor/github.com/pkg/errors/.gitignore +++ /dev/null @@ -1,24 +0,0 @@ -# Compiled Object files, Static and Dynamic libs (Shared Objects) -*.o -*.a -*.so - -# Folders -_obj -_test - -# Architecture specific extensions/prefixes -*.[568vq] -[568vq].out - -*.cgo1.go -*.cgo2.c -_cgo_defun.c -_cgo_gotypes.go -_cgo_export.* - -_testmain.go - -*.exe -*.test -*.prof diff --git a/vendor/github.com/pkg/errors/.travis.yml b/vendor/github.com/pkg/errors/.travis.yml deleted file mode 100644 index 9159de03e0..0000000000 --- a/vendor/github.com/pkg/errors/.travis.yml +++ /dev/null @@ -1,10 +0,0 @@ -language: go -go_import_path: github.com/pkg/errors -go: - - 1.11.x - - 1.12.x - - 1.13.x - - tip - -script: - - make check diff --git a/vendor/github.com/pkg/errors/LICENSE b/vendor/github.com/pkg/errors/LICENSE deleted file mode 100644 index 835ba3e755..0000000000 --- a/vendor/github.com/pkg/errors/LICENSE +++ /dev/null @@ -1,23 +0,0 @@ -Copyright (c) 2015, Dave Cheney -All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are met: - -* Redistributions of source code must retain the above copyright notice, this - list of conditions and the following disclaimer. - -* Redistributions in binary form must reproduce the above copyright notice, - this list of conditions and the following disclaimer in the documentation - and/or other materials provided with the distribution. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" -AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE -IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE -DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE -FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR -SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, -OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pkg/errors/Makefile b/vendor/github.com/pkg/errors/Makefile deleted file mode 100644 index ce9d7cded6..0000000000 --- a/vendor/github.com/pkg/errors/Makefile +++ /dev/null @@ -1,44 +0,0 @@ -PKGS := github.com/pkg/errors -SRCDIRS := $(shell go list -f '{{.Dir}}' $(PKGS)) -GO := go - -check: test vet gofmt misspell unconvert staticcheck ineffassign unparam - -test: - $(GO) test $(PKGS) - -vet: | test - $(GO) vet $(PKGS) - -staticcheck: - $(GO) get honnef.co/go/tools/cmd/staticcheck - staticcheck -checks all $(PKGS) - -misspell: - $(GO) get github.com/client9/misspell/cmd/misspell - misspell \ - -locale GB \ - -error \ - *.md *.go - -unconvert: - $(GO) get github.com/mdempsky/unconvert - unconvert -v $(PKGS) - -ineffassign: - $(GO) get github.com/gordonklaus/ineffassign - find $(SRCDIRS) -name '*.go' | xargs ineffassign - -pedantic: check errcheck - -unparam: - $(GO) get mvdan.cc/unparam - unparam ./... - -errcheck: - $(GO) get github.com/kisielk/errcheck - errcheck $(PKGS) - -gofmt: - @echo Checking code is gofmted - @test -z "$(shell gofmt -s -l -d -e $(SRCDIRS) | tee /dev/stderr)" diff --git a/vendor/github.com/pkg/errors/README.md b/vendor/github.com/pkg/errors/README.md deleted file mode 100644 index 54dfdcb12e..0000000000 --- a/vendor/github.com/pkg/errors/README.md +++ /dev/null @@ -1,59 +0,0 @@ -# errors [![Travis-CI](https://travis-ci.org/pkg/errors.svg)](https://travis-ci.org/pkg/errors) [![AppVeyor](https://ci.appveyor.com/api/projects/status/b98mptawhudj53ep/branch/master?svg=true)](https://ci.appveyor.com/project/davecheney/errors/branch/master) [![GoDoc](https://godoc.org/github.com/pkg/errors?status.svg)](http://godoc.org/github.com/pkg/errors) [![Report card](https://goreportcard.com/badge/github.com/pkg/errors)](https://goreportcard.com/report/github.com/pkg/errors) [![Sourcegraph](https://sourcegraph.com/github.com/pkg/errors/-/badge.svg)](https://sourcegraph.com/github.com/pkg/errors?badge) - -Package errors provides simple error handling primitives. - -`go get github.com/pkg/errors` - -The traditional error handling idiom in Go is roughly akin to -```go -if err != nil { - return err -} -``` -which applied recursively up the call stack results in error reports without context or debugging information. The errors package allows programmers to add context to the failure path in their code in a way that does not destroy the original value of the error. - -## Adding context to an error - -The errors.Wrap function returns a new error that adds context to the original error. For example -```go -_, err := ioutil.ReadAll(r) -if err != nil { - return errors.Wrap(err, "read failed") -} -``` -## Retrieving the cause of an error - -Using `errors.Wrap` constructs a stack of errors, adding context to the preceding error. Depending on the nature of the error it may be necessary to reverse the operation of errors.Wrap to retrieve the original error for inspection. Any error value which implements this interface can be inspected by `errors.Cause`. -```go -type causer interface { - Cause() error -} -``` -`errors.Cause` will recursively retrieve the topmost error which does not implement `causer`, which is assumed to be the original cause. For example: -```go -switch err := errors.Cause(err).(type) { -case *MyError: - // handle specifically -default: - // unknown error -} -``` - -[Read the package documentation for more information](https://godoc.org/github.com/pkg/errors). - -## Roadmap - -With the upcoming [Go2 error proposals](https://go.googlesource.com/proposal/+/master/design/go2draft.md) this package is moving into maintenance mode. The roadmap for a 1.0 release is as follows: - -- 0.9. Remove pre Go 1.9 and Go 1.10 support, address outstanding pull requests (if possible) -- 1.0. Final release. - -## Contributing - -Because of the Go2 errors changes, this package is not accepting proposals for new functionality. With that said, we welcome pull requests, bug fixes and issue reports. - -Before sending a PR, please discuss your change by raising an issue. - -## License - -BSD-2-Clause diff --git a/vendor/github.com/pkg/errors/appveyor.yml b/vendor/github.com/pkg/errors/appveyor.yml deleted file mode 100644 index a932eade02..0000000000 --- a/vendor/github.com/pkg/errors/appveyor.yml +++ /dev/null @@ -1,32 +0,0 @@ -version: build-{build}.{branch} - -clone_folder: C:\gopath\src\github.com\pkg\errors -shallow_clone: true # for startup speed - -environment: - GOPATH: C:\gopath - -platform: - - x64 - -# http://www.appveyor.com/docs/installed-software -install: - # some helpful output for debugging builds - - go version - - go env - # pre-installed MinGW at C:\MinGW is 32bit only - # but MSYS2 at C:\msys64 has mingw64 - - set PATH=C:\msys64\mingw64\bin;%PATH% - - gcc --version - - g++ --version - -build_script: - - go install -v ./... - -test_script: - - set PATH=C:\gopath\bin;%PATH% - - go test -v ./... - -#artifacts: -# - path: '%GOPATH%\bin\*.exe' -deploy: off diff --git a/vendor/github.com/pkg/errors/errors.go b/vendor/github.com/pkg/errors/errors.go deleted file mode 100644 index 161aea2582..0000000000 --- a/vendor/github.com/pkg/errors/errors.go +++ /dev/null @@ -1,288 +0,0 @@ -// Package errors provides simple error handling primitives. -// -// The traditional error handling idiom in Go is roughly akin to -// -// if err != nil { -// return err -// } -// -// which when applied recursively up the call stack results in error reports -// without context or debugging information. The errors package allows -// programmers to add context to the failure path in their code in a way -// that does not destroy the original value of the error. -// -// Adding context to an error -// -// The errors.Wrap function returns a new error that adds context to the -// original error by recording a stack trace at the point Wrap is called, -// together with the supplied message. For example -// -// _, err := ioutil.ReadAll(r) -// if err != nil { -// return errors.Wrap(err, "read failed") -// } -// -// If additional control is required, the errors.WithStack and -// errors.WithMessage functions destructure errors.Wrap into its component -// operations: annotating an error with a stack trace and with a message, -// respectively. -// -// Retrieving the cause of an error -// -// Using errors.Wrap constructs a stack of errors, adding context to the -// preceding error. Depending on the nature of the error it may be necessary -// to reverse the operation of errors.Wrap to retrieve the original error -// for inspection. Any error value which implements this interface -// -// type causer interface { -// Cause() error -// } -// -// can be inspected by errors.Cause. errors.Cause will recursively retrieve -// the topmost error that does not implement causer, which is assumed to be -// the original cause. For example: -// -// switch err := errors.Cause(err).(type) { -// case *MyError: -// // handle specifically -// default: -// // unknown error -// } -// -// Although the causer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// Formatted printing of errors -// -// All error values returned from this package implement fmt.Formatter and can -// be formatted by the fmt package. The following verbs are supported: -// -// %s print the error. If the error has a Cause it will be -// printed recursively. -// %v see %s -// %+v extended format. Each Frame of the error's StackTrace will -// be printed in detail. -// -// Retrieving the stack trace of an error or wrapper -// -// New, Errorf, Wrap, and Wrapf record a stack trace at the point they are -// invoked. This information can be retrieved with the following interface: -// -// type stackTracer interface { -// StackTrace() errors.StackTrace -// } -// -// The returned errors.StackTrace type is defined as -// -// type StackTrace []Frame -// -// The Frame type represents a call site in the stack trace. Frame supports -// the fmt.Formatter interface that can be used for printing information about -// the stack trace of this error. For example: -// -// if err, ok := err.(stackTracer); ok { -// for _, f := range err.StackTrace() { -// fmt.Printf("%+s:%d\n", f, f) -// } -// } -// -// Although the stackTracer interface is not exported by this package, it is -// considered a part of its stable public interface. -// -// See the documentation for Frame.Format for more details. -package errors - -import ( - "fmt" - "io" -) - -// New returns an error with the supplied message. -// New also records the stack trace at the point it was called. -func New(message string) error { - return &fundamental{ - msg: message, - stack: callers(), - } -} - -// Errorf formats according to a format specifier and returns the string -// as a value that satisfies error. -// Errorf also records the stack trace at the point it was called. -func Errorf(format string, args ...interface{}) error { - return &fundamental{ - msg: fmt.Sprintf(format, args...), - stack: callers(), - } -} - -// fundamental is an error that has a message and a stack, but no caller. -type fundamental struct { - msg string - *stack -} - -func (f *fundamental) Error() string { return f.msg } - -func (f *fundamental) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - io.WriteString(s, f.msg) - f.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, f.msg) - case 'q': - fmt.Fprintf(s, "%q", f.msg) - } -} - -// WithStack annotates err with a stack trace at the point WithStack was called. -// If err is nil, WithStack returns nil. -func WithStack(err error) error { - if err == nil { - return nil - } - return &withStack{ - err, - callers(), - } -} - -type withStack struct { - error - *stack -} - -func (w *withStack) Cause() error { return w.error } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withStack) Unwrap() error { return w.error } - -func (w *withStack) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v", w.Cause()) - w.stack.Format(s, verb) - return - } - fallthrough - case 's': - io.WriteString(s, w.Error()) - case 'q': - fmt.Fprintf(s, "%q", w.Error()) - } -} - -// Wrap returns an error annotating err with a stack trace -// at the point Wrap is called, and the supplied message. -// If err is nil, Wrap returns nil. -func Wrap(err error, message string) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: message, - } - return &withStack{ - err, - callers(), - } -} - -// Wrapf returns an error annotating err with a stack trace -// at the point Wrapf is called, and the format specifier. -// If err is nil, Wrapf returns nil. -func Wrapf(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - err = &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } - return &withStack{ - err, - callers(), - } -} - -// WithMessage annotates err with a new message. -// If err is nil, WithMessage returns nil. -func WithMessage(err error, message string) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: message, - } -} - -// WithMessagef annotates err with the format specifier. -// If err is nil, WithMessagef returns nil. -func WithMessagef(err error, format string, args ...interface{}) error { - if err == nil { - return nil - } - return &withMessage{ - cause: err, - msg: fmt.Sprintf(format, args...), - } -} - -type withMessage struct { - cause error - msg string -} - -func (w *withMessage) Error() string { return w.msg + ": " + w.cause.Error() } -func (w *withMessage) Cause() error { return w.cause } - -// Unwrap provides compatibility for Go 1.13 error chains. -func (w *withMessage) Unwrap() error { return w.cause } - -func (w *withMessage) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - if s.Flag('+') { - fmt.Fprintf(s, "%+v\n", w.Cause()) - io.WriteString(s, w.msg) - return - } - fallthrough - case 's', 'q': - io.WriteString(s, w.Error()) - } -} - -// Cause returns the underlying cause of the error, if possible. -// An error value has a cause if it implements the following -// interface: -// -// type causer interface { -// Cause() error -// } -// -// If the error does not implement Cause, the original error will -// be returned. If the error is nil, nil will be returned without further -// investigation. -func Cause(err error) error { - type causer interface { - Cause() error - } - - for err != nil { - cause, ok := err.(causer) - if !ok { - break - } - err = cause.Cause() - } - return err -} diff --git a/vendor/github.com/pkg/errors/go113.go b/vendor/github.com/pkg/errors/go113.go deleted file mode 100644 index be0d10d0c7..0000000000 --- a/vendor/github.com/pkg/errors/go113.go +++ /dev/null @@ -1,38 +0,0 @@ -// +build go1.13 - -package errors - -import ( - stderrors "errors" -) - -// Is reports whether any error in err's chain matches target. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error is considered to match a target if it is equal to that target or if -// it implements a method Is(error) bool such that Is(target) returns true. -func Is(err, target error) bool { return stderrors.Is(err, target) } - -// As finds the first error in err's chain that matches target, and if so, sets -// target to that error value and returns true. -// -// The chain consists of err itself followed by the sequence of errors obtained by -// repeatedly calling Unwrap. -// -// An error matches target if the error's concrete value is assignable to the value -// pointed to by target, or if the error has a method As(interface{}) bool such that -// As(target) returns true. In the latter case, the As method is responsible for -// setting target. -// -// As will panic if target is not a non-nil pointer to either a type that implements -// error, or to any interface type. As returns false if err is nil. -func As(err error, target interface{}) bool { return stderrors.As(err, target) } - -// Unwrap returns the result of calling the Unwrap method on err, if err's -// type contains an Unwrap method returning error. -// Otherwise, Unwrap returns nil. -func Unwrap(err error) error { - return stderrors.Unwrap(err) -} diff --git a/vendor/github.com/pkg/errors/stack.go b/vendor/github.com/pkg/errors/stack.go deleted file mode 100644 index 779a8348fb..0000000000 --- a/vendor/github.com/pkg/errors/stack.go +++ /dev/null @@ -1,177 +0,0 @@ -package errors - -import ( - "fmt" - "io" - "path" - "runtime" - "strconv" - "strings" -) - -// Frame represents a program counter inside a stack frame. -// For historical reasons if Frame is interpreted as a uintptr -// its value represents the program counter + 1. -type Frame uintptr - -// pc returns the program counter for this frame; -// multiple frames may have the same PC value. -func (f Frame) pc() uintptr { return uintptr(f) - 1 } - -// file returns the full path to the file that contains the -// function for this Frame's pc. -func (f Frame) file() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - file, _ := fn.FileLine(f.pc()) - return file -} - -// line returns the line number of source code of the -// function for this Frame's pc. -func (f Frame) line() int { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return 0 - } - _, line := fn.FileLine(f.pc()) - return line -} - -// name returns the name of this function, if known. -func (f Frame) name() string { - fn := runtime.FuncForPC(f.pc()) - if fn == nil { - return "unknown" - } - return fn.Name() -} - -// Format formats the frame according to the fmt.Formatter interface. -// -// %s source file -// %d source line -// %n function name -// %v equivalent to %s:%d -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+s function name and path of source file relative to the compile time -// GOPATH separated by \n\t (\n\t) -// %+v equivalent to %+s:%d -func (f Frame) Format(s fmt.State, verb rune) { - switch verb { - case 's': - switch { - case s.Flag('+'): - io.WriteString(s, f.name()) - io.WriteString(s, "\n\t") - io.WriteString(s, f.file()) - default: - io.WriteString(s, path.Base(f.file())) - } - case 'd': - io.WriteString(s, strconv.Itoa(f.line())) - case 'n': - io.WriteString(s, funcname(f.name())) - case 'v': - f.Format(s, 's') - io.WriteString(s, ":") - f.Format(s, 'd') - } -} - -// MarshalText formats a stacktrace Frame as a text string. The output is the -// same as that of fmt.Sprintf("%+v", f), but without newlines or tabs. -func (f Frame) MarshalText() ([]byte, error) { - name := f.name() - if name == "unknown" { - return []byte(name), nil - } - return []byte(fmt.Sprintf("%s %s:%d", name, f.file(), f.line())), nil -} - -// StackTrace is stack of Frames from innermost (newest) to outermost (oldest). -type StackTrace []Frame - -// Format formats the stack of Frames according to the fmt.Formatter interface. -// -// %s lists source files for each Frame in the stack -// %v lists the source file and line number for each Frame in the stack -// -// Format accepts flags that alter the printing of some verbs, as follows: -// -// %+v Prints filename, function, and line number for each Frame in the stack. -func (st StackTrace) Format(s fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case s.Flag('+'): - for _, f := range st { - io.WriteString(s, "\n") - f.Format(s, verb) - } - case s.Flag('#'): - fmt.Fprintf(s, "%#v", []Frame(st)) - default: - st.formatSlice(s, verb) - } - case 's': - st.formatSlice(s, verb) - } -} - -// formatSlice will format this StackTrace into the given buffer as a slice of -// Frame, only valid when called with '%s' or '%v'. -func (st StackTrace) formatSlice(s fmt.State, verb rune) { - io.WriteString(s, "[") - for i, f := range st { - if i > 0 { - io.WriteString(s, " ") - } - f.Format(s, verb) - } - io.WriteString(s, "]") -} - -// stack represents a stack of program counters. -type stack []uintptr - -func (s *stack) Format(st fmt.State, verb rune) { - switch verb { - case 'v': - switch { - case st.Flag('+'): - for _, pc := range *s { - f := Frame(pc) - fmt.Fprintf(st, "\n%+v", f) - } - } - } -} - -func (s *stack) StackTrace() StackTrace { - f := make([]Frame, len(*s)) - for i := 0; i < len(f); i++ { - f[i] = Frame((*s)[i]) - } - return f -} - -func callers() *stack { - const depth = 32 - var pcs [depth]uintptr - n := runtime.Callers(3, pcs[:]) - var st stack = pcs[0:n] - return &st -} - -// funcname removes the path prefix component of a function's name reported by func.Name(). -func funcname(name string) string { - i := strings.LastIndex(name, "/") - name = name[i+1:] - i = strings.Index(name, ".") - return name[i+1:] -} diff --git a/vendor/modules.txt b/vendor/modules.txt index 1bab041831..009920eb6a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -188,9 +188,6 @@ github.com/opencontainers/go-digest # github.com/phayes/permbits v0.0.0-20190612203442-39d7c581d2ee ## explicit github.com/phayes/permbits -# github.com/pkg/errors v0.9.1 -## explicit -github.com/pkg/errors # github.com/pmezard/go-difflib v1.0.0 ## explicit github.com/pmezard/go-difflib/difflib diff --git a/watch/queue/queue.go b/watch/queue/queue.go index 3fefe24a73..9d65ff7878 100644 --- a/watch/queue/queue.go +++ b/watch/queue/queue.go @@ -2,6 +2,7 @@ package queue import ( "container/list" + "errors" "fmt" "sync" @@ -11,7 +12,7 @@ import ( // ErrQueueFull is returned by a Write operation when that Write causes the // queue to reach its size limit. -var ErrQueueFull = fmt.Errorf("queue closed due to size limit") +var ErrQueueFull = errors.New("queue closed due to size limit") // LimitQueue accepts all messages into a queue for asynchronous consumption by // a sink until an upper limit of messages is reached. When that limit is diff --git a/watch/sinks.go b/watch/sinks.go index b22b4842c3..8030d998c8 100644 --- a/watch/sinks.go +++ b/watch/sinks.go @@ -1,14 +1,14 @@ package watch import ( - "fmt" + "errors" "time" events "github.com/docker/go-events" ) // ErrSinkTimeout is returned from the Write method when a sink times out. -var ErrSinkTimeout = fmt.Errorf("timeout exceeded, tearing down sink") +var ErrSinkTimeout = errors.New("timeout exceeded, tearing down sink") // timeoutSink is a sink that wraps another sink with a timeout. If the // embedded sink fails to complete a Write operation within the specified @@ -52,7 +52,7 @@ type dropErrClosed struct { func (s dropErrClosed) Write(event events.Event) error { err := s.sink.Write(event) - if err == events.ErrSinkClosed { + if errors.Is(err, events.ErrSinkClosed) { return nil } return err