diff --git a/go.mod b/go.mod index b171684e..ecb78260 100644 --- a/go.mod +++ b/go.mod @@ -4,15 +4,15 @@ go 1.24 require ( github.com/blang/semver v3.5.1+incompatible - github.com/containernetworking/cni v1.2.3 - github.com/containernetworking/plugins v1.6.1 + github.com/containernetworking/cni v1.3.0 + github.com/containernetworking/plugins v1.6.2 github.com/imdario/mergo v0.3.16 github.com/json-iterator/go v1.1.12 // indirect - github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.0 + github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6 github.com/onsi/ginkgo v1.16.5 - github.com/onsi/gomega v1.36.2 + github.com/onsi/gomega v1.37.0 github.com/pkg/errors v0.9.1 - gomodules.xyz/jsonpatch/v2 v2.4.0 + gomodules.xyz/jsonpatch/v2 v2.5.0 k8s.io/api v0.31.3 k8s.io/apimachinery v0.31.3 k8s.io/client-go v0.31.3 @@ -20,17 +20,16 @@ require ( k8s.io/kube-openapi v0.0.0-20241127205056-99599406b04f ) -require github.com/go-co-op/gocron/v2 v2.12.4 +require github.com/go-co-op/gocron/v2 v2.16.1 require ( github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/google/gnostic-models v0.6.9 // indirect - github.com/jonboulle/clockwork v0.4.0 // indirect + github.com/jonboulle/clockwork v0.5.0 // indirect github.com/robfig/cron/v3 v3.0.1 // indirect github.com/vishvananda/netns v0.0.4 // indirect github.com/x448/float16 v0.8.4 // indirect - golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 // indirect golang.org/x/sync v0.12.0 // indirect gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect k8s.io/gengo/v2 v2.0.0-20240826214909-a7b603a56eb7 // indirect @@ -40,7 +39,7 @@ require ( require ( github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect github.com/evanphx/json-patch v5.6.0+incompatible // indirect - github.com/fsnotify/fsnotify v1.8.0 + github.com/fsnotify/fsnotify v1.9.0 github.com/go-logr/logr v1.4.2 // indirect github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect @@ -48,7 +47,7 @@ require ( github.com/gogo/protobuf v1.3.2 // indirect github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect github.com/golang/protobuf v1.5.4 // indirect - github.com/google/go-cmp v0.6.0 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/gofuzz v1.2.0 // indirect github.com/google/uuid v1.6.0 // indirect github.com/josharian/intern v1.0.0 // indirect @@ -58,15 +57,15 @@ require ( github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect github.com/nxadm/tail v1.4.8 // indirect github.com/spf13/pflag v1.0.5 // indirect - golang.org/x/mod v0.22.0 // indirect + golang.org/x/mod v0.23.0 // indirect golang.org/x/net v0.38.0 // indirect golang.org/x/oauth2 v0.21.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.8.0 - golang.org/x/tools v0.28.0 // indirect - google.golang.org/protobuf v1.36.1 // indirect + golang.org/x/time v0.11.0 + golang.org/x/tools v0.30.0 // indirect + google.golang.org/protobuf v1.36.5 // indirect gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect diff --git a/go.sum b/go.sum index e3183746..c0d45ea9 100644 --- a/go.sum +++ b/go.sum @@ -1,9 +1,9 @@ github.com/blang/semver v3.5.1+incompatible h1:cQNTCjp13qL8KC3Nbxr/y2Bqb63oX6wdnnjpJbkM4JQ= github.com/blang/semver v3.5.1+incompatible/go.mod h1:kRBLl5iJ+tD4TcOOxsy/0fnwebNt5EWlYSAyrTnjyyk= -github.com/containernetworking/cni v1.2.3 h1:hhOcjNVUQTnzdRJ6alC5XF+wd9mfGIUaj8FuJbEslXM= -github.com/containernetworking/cni v1.2.3/go.mod h1:DuLgF+aPd3DzcTQTtp/Nvl1Kim23oFKdm2okJzBQA5M= -github.com/containernetworking/plugins v1.6.1 h1:bYd2bpE6hEBqexyaiI2/sst0xJ+v7pEMWrjA5qtkxiU= -github.com/containernetworking/plugins v1.6.1/go.mod h1:SP5UG3jDO9LtmfbBJdP+nl3A1atOtbj2MBOYsnaxy64= +github.com/containernetworking/cni v1.3.0 h1:v6EpN8RznAZj9765HhXQrtXgX+ECGebEYEmnuFjskwo= +github.com/containernetworking/cni v1.3.0/go.mod h1:Bs8glZjjFfGPHMw6hQu82RUgEPNGEaBb9KS5KtNMnJ4= +github.com/containernetworking/plugins v1.6.2 h1:pqP8Mq923TLyef5g97XfJ/xpDeVek4yF8A4mzy9Tc4U= +github.com/containernetworking/plugins v1.6.2/go.mod h1:SP5UG3jDO9LtmfbBJdP+nl3A1atOtbj2MBOYsnaxy64= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= @@ -15,12 +15,12 @@ github.com/evanphx/json-patch v5.6.0+incompatible h1:jBYDEEiFBPxA0v50tFdvOzQQTCv github.com/evanphx/json-patch v5.6.0+incompatible/go.mod h1:50XU6AFN0ol/bzJsmQLiYLvXMP4fmwYFNcr97nuDLSk= github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= github.com/fsnotify/fsnotify v1.4.9/go.mod h1:znqG4EE+3YCdAaPaxE2ZRY/06pZUdp0tY4IgpuI1SZQ= -github.com/fsnotify/fsnotify v1.8.0 h1:dAwr6QBTBZIkG8roQaJjGof0pp0EeF+tNV7YBP3F/8M= -github.com/fsnotify/fsnotify v1.8.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= +github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= -github.com/go-co-op/gocron/v2 v2.12.4 h1:h1HWApo3T+61UrZqEY2qG1LUpDnB7tkYITxf6YIK354= -github.com/go-co-op/gocron/v2 v2.12.4/go.mod h1:xY7bJxGazKam1cz04EebrlP4S9q4iWdiAylMGP3jY9w= +github.com/go-co-op/gocron/v2 v2.16.1 h1:ux/5zxVRveCaCuTtNI3DiOk581KC1KpJbpJFYUEVYwo= +github.com/go-co-op/gocron/v2 v2.16.1/go.mod h1:opexeOFy5BplhsKdA7bzY9zeYih8I8/WNJ4arTIFPVc= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= @@ -54,8 +54,8 @@ github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMyw github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= @@ -66,14 +66,14 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= github.com/imdario/mergo v0.3.16 h1:wwQJbIsHYGMUyLSPrEq1CT16AhnhNJQ51+4fdHUnCl4= github.com/imdario/mergo v0.3.16/go.mod h1:WBLT9ZmE3lPoWsEzCh9LPo3TiwVN+ZKEjmz+hD27ysY= -github.com/jonboulle/clockwork v0.4.0 h1:p4Cf1aMWXnXAUh8lVfewRBx1zaTSYKrKMF2g3ST4RZ4= -github.com/jonboulle/clockwork v0.4.0/go.mod h1:xgRqUGwRcjKCO1vbZUEtSLrqKoPSsUpK7fnezOII0kc= +github.com/jonboulle/clockwork v0.5.0 h1:Hyh9A8u51kptdkR+cqRpT1EebBwTn1oK9YfGYbdFz6I= +github.com/jonboulle/clockwork v0.5.0/go.mod h1:3mZlmanh0g2NDKO5TWZVJAfofYk64M7XN3SzBPjZF60= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo= -github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.0 h1:47q2PIbDYHmOaqLxgGnvpLq96v9UKDsJfNW6j/KbfpQ= -github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.0/go.mod h1:KDX0bPKeuhMakcNzLf2sWXKPNZ30wH01bsY/KJZLxFY= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6 h1:lhSaboKtal0XF2yqSw2BqNB1vUL4+a4BFe39I9G/yiM= +github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6/go.mod h1:CM7HAH5PNuIsqjMN0fGc1ydM74Uj+0VZFhob620nklw= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kr/pretty v0.2.1/go.mod h1:ipq/a2n7PKx3OHsz4KJII5eveXtPO4qwEXGdVfWzfnI= @@ -99,12 +99,12 @@ github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+W github.com/onsi/ginkgo v1.12.1/go.mod h1:zj2OWP4+oCPe1qIXoGWkgMRwljMUYCdkwsT2108oapk= github.com/onsi/ginkgo v1.16.5 h1:8xi0RTUf59SOSfEtZMvwTvXYMzG4gV23XVHOZiXNtnE= github.com/onsi/ginkgo v1.16.5/go.mod h1:+E8gABHa3K6zRBolWtd+ROzc/U5bkGt0FwiG042wbpU= -github.com/onsi/ginkgo/v2 v2.22.1 h1:QW7tbJAUDyVDVOM5dFa7qaybo+CRfR7bemlQUN6Z8aM= -github.com/onsi/ginkgo/v2 v2.22.1/go.mod h1:S6aTpoRsSq2cZOd+pssHAlKW/Q/jZt6cPrPlnj4a1xM= +github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= +github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.7.1/go.mod h1:XdKZgCCFLUoM/7CFJVPcG8C1xQ1AJ0vpAezJrB7JYyY= github.com/onsi/gomega v1.10.1/go.mod h1:iN09h71vgCQne3DLsj+A5owkum+a2tYe+TOCB1ybHNo= -github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= -github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/onsi/gomega v1.37.0 h1:CdEG8g0S133B4OswTDC/5XPSzE1OeP29QOioj2PID2Y= +github.com/onsi/gomega v1.37.0/go.mod h1:8D9+Txp43QWKhM24yyOBEdpkzN8FvJyAwecBgsU4KU0= github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= @@ -124,8 +124,8 @@ github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5 github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg= -github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/vishvananda/netns v0.0.4 h1:Oeaw1EM2JMxD51g9uhtC0D7erkIjgmj8+JZc26m1YX8= github.com/vishvananda/netns v0.0.4/go.mod h1:SpkAiCQRtJ6TvvxPnOSyH3BMl6unz3xZlaprSwhNNJM= github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= @@ -137,12 +137,10 @@ go.uber.org/goleak v1.3.0/go.mod h1:CoHD4mav9JJNrW/WLlf7HGZPjdw8EucARQHekz1X6bE= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 h1:yixxcjnhBmY0nkL253HFVIm0JsFHwrHdT3Yh6szTnfY= -golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8/go.mod h1:jj3sYF3dwk5D+ghuXyeI3r5MFf+NT2An6/9dOA95KSI= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= -golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= -golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= +golang.org/x/mod v0.23.0 h1:Zb7khfcRGKk+kqfxFaP5tZqCnDZMjC5VtUBs87Hr6QM= +golang.org/x/mod v0.23.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= @@ -176,29 +174,29 @@ golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.8.0 h1:9i3RxcPv3PZnitoVGMPDKZSq1xW1gK1Xy3ArNOGZfEg= -golang.org/x/time v0.8.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= +golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0= +golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20201224043029-2b0845dc783e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= -golang.org/x/tools v0.28.0 h1:WuB6qZ4RPCQo5aP3WdKZS7i595EdWqWR8vqJTlwTVK8= -golang.org/x/tools v0.28.0/go.mod h1:dcIOrVd3mfQKTgrDVQHqCPMWy6lnhfhtX3hLXYVLfRw= +golang.org/x/tools v0.30.0 h1:BgcpHewrV5AUp2G9MebG4XPFI1E2W41zU1SaqVA9vJY= +golang.org/x/tools v0.30.0/go.mod h1:c347cR/OJfw5TI+GfX7RUPNMdDRRbjvYTS0jPyvsVtY= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -gomodules.xyz/jsonpatch/v2 v2.4.0 h1:Ci3iUJyx9UeRx7CeFN8ARgGbkESwJK+KB9lLcWxY/Zw= -gomodules.xyz/jsonpatch/v2 v2.4.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= +gomodules.xyz/jsonpatch/v2 v2.5.0 h1:JELs8RLM12qJGXU4u/TO3V25KW8GreMKl9pdkk14RM0= +gomodules.xyz/jsonpatch/v2 v2.5.0/go.mod h1:AH3dM2RI6uoBZxn3LVrfvJ3E0/9dG4cSrbuBJT4moAY= google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8= google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0= google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM= google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE= google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo= google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= diff --git a/vendor/github.com/containernetworking/cni/pkg/types/types.go b/vendor/github.com/containernetworking/cni/pkg/types/types.go index 8453bb5d..f4b3ce35 100644 --- a/vendor/github.com/containernetworking/cni/pkg/types/types.go +++ b/vendor/github.com/containernetworking/cni/pkg/types/types.go @@ -56,8 +56,12 @@ func (n *IPNet) UnmarshalJSON(data []byte) error { return nil } -// NetConfType describes a network. -type NetConfType struct { +// Use PluginConf instead of NetConf, the NetConf +// backwards-compat alias will be removed in a future release. +type NetConf = PluginConf + +// PluginConf describes a plugin configuration for a specific network. +type PluginConf struct { CNIVersion string `json:"cniVersion,omitempty"` Name string `json:"name,omitempty"` @@ -73,9 +77,6 @@ type NetConfType struct { ValidAttachments []GCAttachment `json:"cni.dev/valid-attachments,omitempty"` } -// NetConf is defined as different type as custom MarshalJSON() and issue #1096 -type NetConf NetConfType - // GCAttachment is the parameters to a GC call -- namely, // the container ID and ifname pair that represents a // still-valid attachment. @@ -86,11 +87,8 @@ type GCAttachment struct { // Note: DNS should be omit if DNS is empty but default Marshal function // will output empty structure hence need to write a Marshal function -func (n *NetConfType) MarshalJSON() ([]byte, error) { - // use type alias to escape recursion for json.Marshal() to MarshalJSON() - type fixObjType = NetConf - - bytes, err := json.Marshal(fixObjType(*n)) +func (n *PluginConf) MarshalJSON() ([]byte, error) { + bytes, err := json.Marshal(*n) if err != nil { return nil, err } @@ -120,10 +118,10 @@ func (i *IPAM) IsEmpty() bool { type NetConfList struct { CNIVersion string `json:"cniVersion,omitempty"` - Name string `json:"name,omitempty"` - DisableCheck bool `json:"disableCheck,omitempty"` - DisableGC bool `json:"disableGC,omitempty"` - Plugins []*NetConf `json:"plugins,omitempty"` + Name string `json:"name,omitempty"` + DisableCheck bool `json:"disableCheck,omitempty"` + DisableGC bool `json:"disableGC,omitempty"` + Plugins []*PluginConf `json:"plugins,omitempty"` } // Result is an interface that provides the result of plugin execution diff --git a/vendor/github.com/containernetworking/cni/pkg/version/version.go b/vendor/github.com/containernetworking/cni/pkg/version/version.go index a4d442c8..cfb6a12f 100644 --- a/vendor/github.com/containernetworking/cni/pkg/version/version.go +++ b/vendor/github.com/containernetworking/cni/pkg/version/version.go @@ -63,7 +63,7 @@ func NewResult(version string, resultBytes []byte) (types.Result, error) { // ParsePrevResult parses a prevResult in a NetConf structure and sets // the NetConf's PrevResult member to the parsed Result object. -func ParsePrevResult(conf *types.NetConf) error { +func ParsePrevResult(conf *types.PluginConf) error { if conf.RawPrevResult == nil { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml index f4e7dbf3..7f257e99 100644 --- a/vendor/github.com/fsnotify/fsnotify/.cirrus.yml +++ b/vendor/github.com/fsnotify/fsnotify/.cirrus.yml @@ -1,7 +1,7 @@ freebsd_task: name: 'FreeBSD' freebsd_instance: - image_family: freebsd-14-1 + image_family: freebsd-14-2 install_script: - pkg update -f - pkg install -y go diff --git a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md index fa854785..6468d2cf 100644 --- a/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md +++ b/vendor/github.com/fsnotify/fsnotify/CHANGELOG.md @@ -1,6 +1,39 @@ # Changelog -1.8.0 2023-10-31 +1.9.0 2024-04-04 +---------------- + +### Changes and fixes + +- all: make BufferedWatcher buffered again ([#657]) + +- inotify: fix race when adding/removing watches while a watched path is being + deleted ([#678], [#686]) + +- inotify: don't send empty event if a watched path is unmounted ([#655]) + +- inotify: don't register duplicate watches when watching both a symlink and its + target; previously that would get "half-added" and removing the second would + panic ([#679]) + +- kqueue: fix watching relative symlinks ([#681]) + +- kqueue: correctly mark pre-existing entries when watching a link to a dir on + kqueue ([#682]) + +- illumos: don't send error if changed file is deleted while processing the + event ([#678]) + + +[#657]: https://github.com/fsnotify/fsnotify/pull/657 +[#678]: https://github.com/fsnotify/fsnotify/pull/678 +[#686]: https://github.com/fsnotify/fsnotify/pull/686 +[#655]: https://github.com/fsnotify/fsnotify/pull/655 +[#681]: https://github.com/fsnotify/fsnotify/pull/681 +[#679]: https://github.com/fsnotify/fsnotify/pull/679 +[#682]: https://github.com/fsnotify/fsnotify/pull/682 + +1.8.0 2024-10-31 ---------------- ### Additions diff --git a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md index e4ac2a2f..4cc40fa5 100644 --- a/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md +++ b/vendor/github.com/fsnotify/fsnotify/CONTRIBUTING.md @@ -77,6 +77,7 @@ End-of-line escapes with `\` are not supported. debug [yes/no] # Enable/disable FSNOTIFY_DEBUG (tests are run in parallel by default, so -parallel=1 is probably a good idea). + print [any strings] # Print text to stdout; for debugging. touch path mkdir [-p] dir diff --git a/vendor/github.com/fsnotify/fsnotify/README.md b/vendor/github.com/fsnotify/fsnotify/README.md index e480733d..1f4eb583 100644 --- a/vendor/github.com/fsnotify/fsnotify/README.md +++ b/vendor/github.com/fsnotify/fsnotify/README.md @@ -15,7 +15,6 @@ Platform support: | ReadDirectoryChangesW | Windows | Supported | | FEN | illumos | Supported | | fanotify | Linux 5.9+ | [Not yet](https://github.com/fsnotify/fsnotify/issues/114) | -| AHAFS | AIX | [aix branch]; experimental due to lack of maintainer and test environment | | FSEvents | macOS | [Needs support in x/sys/unix][fsevents] | | USN Journals | Windows | [Needs support in x/sys/windows][usn] | | Polling | *All* | [Not yet](https://github.com/fsnotify/fsnotify/issues/9) | @@ -25,7 +24,6 @@ untested. [fsevents]: https://github.com/fsnotify/fsnotify/issues/11#issuecomment-1279133120 [usn]: https://github.com/fsnotify/fsnotify/issues/53#issuecomment-1279829847 -[aix branch]: https://github.com/fsnotify/fsnotify/issues/353#issuecomment-1284590129 Usage ----- diff --git a/vendor/github.com/fsnotify/fsnotify/backend_fen.go b/vendor/github.com/fsnotify/fsnotify/backend_fen.go index c349c326..57fc6928 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_fen.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_fen.go @@ -9,6 +9,7 @@ package fsnotify import ( "errors" "fmt" + "io/fs" "os" "path/filepath" "sync" @@ -19,27 +20,25 @@ import ( ) type fen struct { + *shared Events chan Event Errors chan error mu sync.Mutex port *unix.EventPort - done chan struct{} // Channel for sending a "quit message" to the reader goroutine dirs map[string]Op // Explicitly watched directories watches map[string]Op // Explicitly watched non-directories } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { w := &fen{ + shared: newShared(ev, errs), Events: ev, Errors: errs, dirs: make(map[string]Op), watches: make(map[string]Op), - done: make(chan struct{}), } var err error @@ -52,49 +51,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// sendEvent attempts to send an event to the user, returning true if the event -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendEvent(name string, op Op) (sent bool) { - select { - case <-w.done: - return false - case w.Events <- Event{Name: name, Op: op}: - return true - } -} - -// sendError attempts to send an error to the user, returning true if the error -// was put in the channel successfully and false if the watcher has been closed. -func (w *fen) sendError(err error) (sent bool) { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *fen) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *fen) Close() error { - // Take the lock used by associateFile to prevent lingering events from - // being processed after the close - w.mu.Lock() - defer w.mu.Unlock() - if w.isClosed() { + if w.shared.close() { return nil } - close(w.done) return w.port.Close() } @@ -209,7 +169,7 @@ func (w *fen) readEvents() { return } // There was an error not caused by calling w.Close() - if !w.sendError(err) { + if !w.sendError(fmt.Errorf("port.Get: %w", err)) { return } } @@ -277,13 +237,13 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { isWatched := watchedDir || watchedPath if events&unix.FILE_DELETE != 0 { - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } reRegister = false } if events&unix.FILE_RENAME_FROM != 0 { - if !w.sendEvent(path, Rename) { + if !w.sendEvent(Event{Name: path, Op: Rename}) { return nil } // Don't keep watching the new file name @@ -297,7 +257,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // inotify reports a Remove event in this case, so we simulate this // here. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't keep watching the file that was removed @@ -331,7 +291,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { // get here, the sudirectory is already gone. Clearly we were watching // this path but now it is gone. Let's tell the user that it was // removed. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Suppress extra write events on removed directories; they are not @@ -346,7 +306,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if err != nil { // The symlink still exists, but the target is gone. Report the // Remove similar to above. - if !w.sendEvent(path, Remove) { + if !w.sendEvent(Event{Name: path, Op: Remove}) { return nil } // Don't return the error @@ -359,7 +319,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { return err } } else { - if !w.sendEvent(path, Write) { + if !w.sendEvent(Event{Name: path, Op: Write}) { return nil } } @@ -367,7 +327,7 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if events&unix.FILE_ATTRIB != 0 && stat != nil { // Only send Chmod if perms changed if stat.Mode().Perm() != fmode.Perm() { - if !w.sendEvent(path, Chmod) { + if !w.sendEvent(Event{Name: path, Op: Chmod}) { return nil } } @@ -376,17 +336,27 @@ func (w *fen) handleEvent(event *unix.PortEvent) error { if stat != nil { // If we get here, it means we've hit an event above that requires us to // continue watching the file or directory - return w.associateFile(path, stat, isWatched) + err := w.associateFile(path, stat, isWatched) + if errors.Is(err, fs.ErrNotExist) { + // Path may have been removed since the stat. + err = nil + } + return err } return nil } +// The directory was modified, so we must find unwatched entities and watch +// them. If something was removed from the directory, nothing will happen, as +// everything else should still be watched. func (w *fen) updateDirectory(path string) error { - // The directory was modified, so we must find unwatched entities and watch - // them. If something was removed from the directory, nothing will happen, - // as everything else should still be watched. files, err := os.ReadDir(path) if err != nil { + // Directory no longer exists: probably just deleted since we got the + // event. + if errors.Is(err, fs.ErrNotExist) { + return nil + } return err } @@ -401,10 +371,15 @@ func (w *fen) updateDirectory(path string) error { return err } err = w.associateFile(path, finfo, false) + if errors.Is(err, fs.ErrNotExist) { + // File may have disappeared between getting the dir listing and + // adding the port: that's okay to ignore. + continue + } if !w.sendError(err) { return nil } - if !w.sendEvent(path, Create) { + if !w.sendEvent(Event{Name: path, Op: Create}) { return nil } } @@ -430,7 +405,7 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { // has fired but we haven't processed it yet. err := w.port.DissociatePath(path) if err != nil && !errors.Is(err, unix.ENOENT) { - return err + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) } } @@ -446,14 +421,22 @@ func (w *fen) associateFile(path string, stat os.FileInfo, follow bool) error { if true { events |= unix.FILE_ATTRIB } - return w.port.AssociatePath(path, stat, events, stat.Mode()) + err := w.port.AssociatePath(path, stat, events, stat.Mode()) + if err != nil { + return fmt.Errorf("port.AssociatePath(%q): %w", path, err) + } + return nil } func (w *fen) dissociateFile(path string, stat os.FileInfo, unused bool) error { if !w.port.PathIsWatched(path) { return nil } - return w.port.DissociatePath(path) + err := w.port.DissociatePath(path) + if err != nil { + return fmt.Errorf("port.DissociatePath(%q): %w", path, err) + } + return nil } func (w *fen) WatchList() []string { diff --git a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go index 36c31169..a36cb89d 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_inotify.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_inotify.go @@ -19,6 +19,7 @@ import ( ) type inotify struct { + *shared Events chan Event Errors chan error @@ -27,8 +28,6 @@ type inotify struct { fd int inotifyFile *os.File watches *watches - done chan struct{} // Channel for sending a "quit message" to the reader goroutine - doneMu sync.Mutex doneResp chan struct{} // Channel to respond to Close // Store rename cookies in an array, with the index wrapping to 0. Almost @@ -52,7 +51,6 @@ type inotify struct { type ( watches struct { - mu sync.RWMutex wd map[uint32]*watch // wd → watch path map[string]uint32 // pathname → wd } @@ -75,34 +73,13 @@ func newWatches() *watches { } } -func (w *watches) len() int { - w.mu.RLock() - defer w.mu.RUnlock() - return len(w.wd) -} - -func (w *watches) add(ww *watch) { - w.mu.Lock() - defer w.mu.Unlock() - w.wd[ww.wd] = ww - w.path[ww.path] = ww.wd -} - -func (w *watches) remove(wd uint32) { - w.mu.Lock() - defer w.mu.Unlock() - watch := w.wd[wd] // Could have had Remove() called. See #616. - if watch == nil { - return - } - delete(w.path, watch.path) - delete(w.wd, wd) -} +func (w *watches) byPath(path string) *watch { return w.wd[w.path[path]] } +func (w *watches) byWd(wd uint32) *watch { return w.wd[wd] } +func (w *watches) len() int { return len(w.wd) } +func (w *watches) add(ww *watch) { w.wd[ww.wd] = ww; w.path[ww.path] = ww.wd } +func (w *watches) remove(watch *watch) { delete(w.path, watch.path); delete(w.wd, watch.wd) } func (w *watches) removePath(path string) ([]uint32, error) { - w.mu.Lock() - defer w.mu.Unlock() - path, recurse := recursivePath(path) wd, ok := w.path[path] if !ok { @@ -123,7 +100,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { wds := make([]uint32, 0, 8) wds = append(wds, wd) for p, rwd := range w.path { - if filepath.HasPrefix(p, path) { + if strings.HasPrefix(p, path) { delete(w.path, p) delete(w.wd, rwd) wds = append(wds, rwd) @@ -132,22 +109,7 @@ func (w *watches) removePath(path string) ([]uint32, error) { return wds, nil } -func (w *watches) byPath(path string) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[w.path[path]] -} - -func (w *watches) byWd(wd uint32) *watch { - w.mu.RLock() - defer w.mu.RUnlock() - return w.wd[wd] -} - func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error { - w.mu.Lock() - defer w.mu.Unlock() - var existing *watch wd, ok := w.path[path] if ok { @@ -170,11 +132,9 @@ func (w *watches) updatePath(path string, f func(*watch) (*watch, error)) error return nil } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { // Need to set nonblocking mode for SetDeadline to work, otherwise blocking // I/O operations won't terminate on close. fd, errno := unix.InotifyInit1(unix.IN_CLOEXEC | unix.IN_NONBLOCK) @@ -183,12 +143,12 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error } w := &inotify{ + shared: newShared(ev, errs), Events: ev, Errors: errs, fd: fd, inotifyFile: os.NewFile(uintptr(fd), ""), watches: newWatches(), - done: make(chan struct{}), doneResp: make(chan struct{}), } @@ -196,46 +156,10 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error return w, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *inotify) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *inotify) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *inotify) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *inotify) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() // Causes any blocking reads to return with an error, provided the file // still supports deadline operations. @@ -244,9 +168,7 @@ func (w *inotify) Close() error { return err } - // Wait for goroutine to close - <-w.doneResp - + <-w.doneResp // Wait for readEvents() to finish. return nil } @@ -266,6 +188,43 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } + add := func(path string, with withOpts, recurse bool) error { + var flags uint32 + if with.noFollow { + flags |= unix.IN_DONT_FOLLOW + } + if with.op.Has(Create) { + flags |= unix.IN_CREATE + } + if with.op.Has(Write) { + flags |= unix.IN_MODIFY + } + if with.op.Has(Remove) { + flags |= unix.IN_DELETE | unix.IN_DELETE_SELF + } + if with.op.Has(Rename) { + flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF + } + if with.op.Has(Chmod) { + flags |= unix.IN_ATTRIB + } + if with.op.Has(xUnportableOpen) { + flags |= unix.IN_OPEN + } + if with.op.Has(xUnportableRead) { + flags |= unix.IN_ACCESS + } + if with.op.Has(xUnportableCloseWrite) { + flags |= unix.IN_CLOSE_WRITE + } + if with.op.Has(xUnportableCloseRead) { + flags |= unix.IN_CLOSE_NOWRITE + } + return w.register(path, flags, recurse) + } + + w.mu.Lock() + defer w.mu.Unlock() path, recurse := recursivePath(path) if recurse { return filepath.WalkDir(path, func(root string, d fs.DirEntry, err error) error { @@ -289,46 +248,11 @@ func (w *inotify) AddWith(path string, opts ...addOpt) error { w.sendEvent(Event{Name: root, Op: Create}) } - return w.add(root, with, true) + return add(root, with, true) }) } - return w.add(path, with, false) -} - -func (w *inotify) add(path string, with withOpts, recurse bool) error { - var flags uint32 - if with.noFollow { - flags |= unix.IN_DONT_FOLLOW - } - if with.op.Has(Create) { - flags |= unix.IN_CREATE - } - if with.op.Has(Write) { - flags |= unix.IN_MODIFY - } - if with.op.Has(Remove) { - flags |= unix.IN_DELETE | unix.IN_DELETE_SELF - } - if with.op.Has(Rename) { - flags |= unix.IN_MOVED_TO | unix.IN_MOVED_FROM | unix.IN_MOVE_SELF - } - if with.op.Has(Chmod) { - flags |= unix.IN_ATTRIB - } - if with.op.Has(xUnportableOpen) { - flags |= unix.IN_OPEN - } - if with.op.Has(xUnportableRead) { - flags |= unix.IN_ACCESS - } - if with.op.Has(xUnportableCloseWrite) { - flags |= unix.IN_CLOSE_WRITE - } - if with.op.Has(xUnportableCloseRead) { - flags |= unix.IN_CLOSE_NOWRITE - } - return w.register(path, flags, recurse) + return add(path, with, false) } func (w *inotify) register(path string, flags uint32, recurse bool) error { @@ -342,6 +266,10 @@ func (w *inotify) register(path string, flags uint32, recurse bool) error { return nil, err } + if e, ok := w.watches.wd[uint32(wd)]; ok { + return e, nil + } + if existing == nil { return &watch{ wd: uint32(wd), @@ -365,6 +293,9 @@ func (w *inotify) Remove(name string) error { fmt.Fprintf(os.Stderr, "FSNOTIFY_DEBUG: %s Remove(%q)\n", time.Now().Format("15:04:05.000000000"), name) } + + w.mu.Lock() + defer w.mu.Unlock() return w.remove(filepath.Clean(name)) } @@ -399,13 +330,12 @@ func (w *inotify) WatchList() []string { return nil } + w.mu.Lock() + defer w.mu.Unlock() entries := make([]string, 0, w.watches.len()) - w.watches.mu.RLock() for pathname := range w.watches.path { entries = append(entries, pathname) } - w.watches.mu.RUnlock() - return entries } @@ -418,21 +348,17 @@ func (w *inotify) readEvents() { close(w.Events) }() - var ( - buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events - errno error // Syscall errno - ) + var buf [unix.SizeofInotifyEvent * 4096]byte // Buffer for a maximum of 4096 raw events for { - // See if we have been closed. if w.isClosed() { return } n, err := w.inotifyFile.Read(buf[:]) - switch { - case errors.Unwrap(err) == os.ErrClosed: - return - case err != nil: + if err != nil { + if errors.Is(err, os.ErrClosed) { + return + } if !w.sendError(err) { return } @@ -440,13 +366,9 @@ func (w *inotify) readEvents() { } if n < unix.SizeofInotifyEvent { - var err error + err := errors.New("notify: short read in readEvents()") // Read was too short. if n == 0 { err = io.EOF // If EOF is received. This should really never happen. - } else if n < 0 { - err = errno // If an error occurred while reading. - } else { - err = errors.New("notify: short read in readEvents()") // Read was too short. } if !w.sendError(err) { return @@ -454,134 +376,137 @@ func (w *inotify) readEvents() { continue } - // We don't know how many events we just read into the buffer - // While the offset points to at least one whole event... + // We don't know how many events we just read into the buffer While the + // offset points to at least one whole event. var offset uint32 for offset <= uint32(n-unix.SizeofInotifyEvent) { - var ( - // Point "raw" to the event in the buffer - raw = (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - mask = uint32(raw.Mask) - nameLen = uint32(raw.Len) - // Move to the next event in the buffer - next = func() { offset += unix.SizeofInotifyEvent + nameLen } - ) + // Point to the event in the buffer. + inEvent := (*unix.InotifyEvent)(unsafe.Pointer(&buf[offset])) - if mask&unix.IN_Q_OVERFLOW != 0 { + if inEvent.Mask&unix.IN_Q_OVERFLOW != 0 { if !w.sendError(ErrEventOverflow) { return } } - /// If the event happened to the watched directory or the watched - /// file, the kernel doesn't append the filename to the event, but - /// we would like to always fill the the "Name" field with a valid - /// filename. We retrieve the path of the watch from the "paths" - /// map. - watch := w.watches.byWd(uint32(raw.Wd)) - /// Can be nil if Remove() was called in another goroutine for this - /// path inbetween reading the events from the kernel and reading - /// the internal state. Not much we can do about it, so just skip. - /// See #616. - if watch == nil { - next() - continue + ev, ok := w.handleEvent(inEvent, &buf, offset) + if !ok { + return } - - name := watch.path - if nameLen > 0 { - /// Point "bytes" at the first byte of the filename - bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&buf[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] - /// The filename is padded with NULL bytes. TrimRight() gets rid of those. - name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\000") - } - - if debug { - internal.Debug(name, raw.Mask, raw.Cookie) - } - - if mask&unix.IN_IGNORED != 0 { //&& event.Op != 0 - next() - continue - } - - // inotify will automatically remove the watch on deletes; just need - // to clean our state here. - if mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { - w.watches.remove(watch.wd) - } - - // We can't really update the state when a watched path is moved; - // only IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove - // the watch. - if mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { - if watch.recurse { - next() // Do nothing - continue - } - - err := w.remove(watch.path) - if err != nil && !errors.Is(err, ErrNonExistentWatch) { - if !w.sendError(err) { - return - } - } - } - - /// Skip if we're watching both this path and the parent; the parent - /// will already send a delete so no need to do it twice. - if mask&unix.IN_DELETE_SELF != 0 { - if _, ok := w.watches.path[filepath.Dir(watch.path)]; ok { - next() - continue - } - } - - ev := w.newEvent(name, mask, raw.Cookie) - // Need to update watch path for recurse. - if watch.recurse { - isDir := mask&unix.IN_ISDIR == unix.IN_ISDIR - /// New directory created: set up watch on it. - if isDir && ev.Has(Create) { - err := w.register(ev.Name, watch.flags, true) - if !w.sendError(err) { - return - } - - // This was a directory rename, so we need to update all - // the children. - // - // TODO: this is of course pretty slow; we should use a - // better data structure for storing all of this, e.g. store - // children in the watch. I have some code for this in my - // kqueue refactor we can use in the future. For now I'm - // okay with this as it's not publicly available. - // Correctness first, performance second. - if ev.renamedFrom != "" { - w.watches.mu.Lock() - for k, ww := range w.watches.wd { - if k == watch.wd || ww.path == ev.Name { - continue - } - if strings.HasPrefix(ww.path, ev.renamedFrom) { - ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) - w.watches.wd[k] = ww - } - } - w.watches.mu.Unlock() - } - } - } - - /// Send the events that are not ignored on the events channel if !w.sendEvent(ev) { return } - next() + + // Move to the next event in the buffer + offset += unix.SizeofInotifyEvent + inEvent.Len } } } +func (w *inotify) handleEvent(inEvent *unix.InotifyEvent, buf *[65536]byte, offset uint32) (Event, bool) { + w.mu.Lock() + defer w.mu.Unlock() + + /// If the event happened to the watched directory or the watched file, the + /// kernel doesn't append the filename to the event, but we would like to + /// always fill the the "Name" field with a valid filename. We retrieve the + /// path of the watch from the "paths" map. + /// + /// Can be nil if Remove() was called in another goroutine for this path + /// inbetween reading the events from the kernel and reading the internal + /// state. Not much we can do about it, so just skip. See #616. + watch := w.watches.byWd(uint32(inEvent.Wd)) + if watch == nil { + return Event{}, true + } + + var ( + name = watch.path + nameLen = uint32(inEvent.Len) + ) + if nameLen > 0 { + /// Point "bytes" at the first byte of the filename + bb := *buf + bytes := (*[unix.PathMax]byte)(unsafe.Pointer(&bb[offset+unix.SizeofInotifyEvent]))[:nameLen:nameLen] + /// The filename is padded with NULL bytes. TrimRight() gets rid of those. + name += "/" + strings.TrimRight(string(bytes[0:nameLen]), "\x00") + } + + if debug { + internal.Debug(name, inEvent.Mask, inEvent.Cookie) + } + + if inEvent.Mask&unix.IN_IGNORED != 0 || inEvent.Mask&unix.IN_UNMOUNT != 0 { + w.watches.remove(watch) + return Event{}, true + } + + // inotify will automatically remove the watch on deletes; just need + // to clean our state here. + if inEvent.Mask&unix.IN_DELETE_SELF == unix.IN_DELETE_SELF { + w.watches.remove(watch) + } + + // We can't really update the state when a watched path is moved; only + // IN_MOVE_SELF is sent and not IN_MOVED_{FROM,TO}. So remove the watch. + if inEvent.Mask&unix.IN_MOVE_SELF == unix.IN_MOVE_SELF { + if watch.recurse { // Do nothing + return Event{}, true + } + + err := w.remove(watch.path) + if err != nil && !errors.Is(err, ErrNonExistentWatch) { + if !w.sendError(err) { + return Event{}, false + } + } + } + + /// Skip if we're watching both this path and the parent; the parent will + /// already send a delete so no need to do it twice. + if inEvent.Mask&unix.IN_DELETE_SELF != 0 { + _, ok := w.watches.path[filepath.Dir(watch.path)] + if ok { + return Event{}, true + } + } + + ev := w.newEvent(name, inEvent.Mask, inEvent.Cookie) + // Need to update watch path for recurse. + if watch.recurse { + isDir := inEvent.Mask&unix.IN_ISDIR == unix.IN_ISDIR + /// New directory created: set up watch on it. + if isDir && ev.Has(Create) { + err := w.register(ev.Name, watch.flags, true) + if !w.sendError(err) { + return Event{}, false + } + + // This was a directory rename, so we need to update all the + // children. + // + // TODO: this is of course pretty slow; we should use a better data + // structure for storing all of this, e.g. store children in the + // watch. I have some code for this in my kqueue refactor we can use + // in the future. For now I'm okay with this as it's not publicly + // available. Correctness first, performance second. + if ev.renamedFrom != "" { + for k, ww := range w.watches.wd { + if k == watch.wd || ww.path == ev.Name { + continue + } + if strings.HasPrefix(ww.path, ev.renamedFrom) { + ww.path = strings.Replace(ww.path, ev.renamedFrom, ev.Name, 1) + w.watches.wd[k] = ww + } + } + } + } + } + + return ev, true +} + func (w *inotify) isRecursive(path string) bool { ww := w.watches.byPath(path) if ww == nil { // path could be a file, so also check the Dir. @@ -650,8 +575,8 @@ func (w *inotify) xSupports(op Op) bool { } func (w *inotify) state() { - w.watches.mu.Lock() - defer w.watches.mu.Unlock() + w.mu.Lock() + defer w.mu.Unlock() for wd, ww := range w.watches.wd { fmt.Fprintf(os.Stderr, "%4d: recurse=%t %q\n", wd, ww.recurse, ww.path) } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go index d8de5ab7..340aeec0 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_kqueue.go @@ -16,14 +16,13 @@ import ( ) type kqueue struct { + *shared Events chan Event Errors chan error kq int // File descriptor (as returned by the kqueue() syscall). closepipe [2]int // Pipe used for closing kq. watches *watches - done chan struct{} - doneMu sync.Mutex } type ( @@ -132,14 +131,18 @@ func (w *watches) byPath(path string) (watch, bool) { return info, ok } -func (w *watches) updateDirFlags(path string, flags uint32) { +func (w *watches) updateDirFlags(path string, flags uint32) bool { w.mu.Lock() defer w.mu.Unlock() - fd := w.path[path] + fd, ok := w.path[path] + if !ok { // Already deleted: don't re-set it here. + return false + } info := w.wd[fd] info.dirFlags = flags w.wd[fd] = info + return true } func (w *watches) remove(fd int, path string) bool { @@ -179,22 +182,20 @@ func (w *watches) seenBefore(path string) bool { return ok } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(0, ev, errs) -} +var defaultBufferSize = 0 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { kq, closepipe, err := newKqueue() if err != nil { return nil, err } w := &kqueue{ + shared: newShared(ev, errs), Events: ev, Errors: errs, kq: kq, closepipe: closepipe, - done: make(chan struct{}), watches: newWatches(), } @@ -210,7 +211,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error // all. func newKqueue() (kq int, closepipe [2]int, err error) { kq, err = unix.Kqueue() - if kq == -1 { + if err != nil { return kq, closepipe, err } @@ -239,54 +240,17 @@ func newKqueue() (kq int, closepipe [2]int, err error) { return kq, closepipe, nil } -// Returns true if the event was sent, or false if watcher is closed. -func (w *kqueue) sendEvent(e Event) bool { - select { - case <-w.done: - return false - case w.Events <- e: - return true - } -} - -// Returns true if the error was sent, or false if watcher is closed. -func (w *kqueue) sendError(err error) bool { - if err == nil { - return true - } - select { - case <-w.done: - return false - case w.Errors <- err: - return true - } -} - -func (w *kqueue) isClosed() bool { - select { - case <-w.done: - return true - default: - return false - } -} - func (w *kqueue) Close() error { - w.doneMu.Lock() - if w.isClosed() { - w.doneMu.Unlock() + if w.shared.close() { return nil } - close(w.done) - w.doneMu.Unlock() pathsToRemove := w.watches.listPaths(false) for _, name := range pathsToRemove { w.Remove(name) } - // Send "quit" message to the reader goroutine. - unix.Close(w.closepipe[1]) + unix.Close(w.closepipe[1]) // Send "quit" message to readEvents return nil } @@ -303,7 +267,7 @@ func (w *kqueue) AddWith(name string, opts ...addOpt) error { return fmt.Errorf("%w: %s", xErrUnsupported, with.op) } - _, err := w.addWatch(name, noteAllEvents) + _, err := w.addWatch(name, noteAllEvents, false) if err != nil { return err } @@ -366,7 +330,7 @@ const noteAllEvents = unix.NOTE_DELETE | unix.NOTE_WRITE | unix.NOTE_ATTRIB | un // described in kevent(2). // // Returns the real path to the file which was added, with symlinks resolved. -func (w *kqueue) addWatch(name string, flags uint32) (string, error) { +func (w *kqueue) addWatch(name string, flags uint32, listDir bool) (string, error) { if w.isClosed() { return "", ErrClosed } @@ -385,15 +349,15 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { return "", nil } - // Follow symlinks. - if fi.Mode()&os.ModeSymlink == os.ModeSymlink { + // Follow symlinks, but only for paths added with Add(), and not paths + // we're adding from internalWatch from a listdir. + if !listDir && fi.Mode()&os.ModeSymlink == os.ModeSymlink { link, err := os.Readlink(name) if err != nil { - // Return nil because Linux can add unresolvable symlinks to the - // watch list without problems, so maintain consistency with - // that. There will be no file events for broken symlinks. - // TODO: more specific check; returns os.PathError; ENOENT? - return "", nil + return "", err + } + if !filepath.IsAbs(link) { + link = filepath.Join(filepath.Dir(name), link) } _, alreadyWatching = w.watches.byPath(link) @@ -408,7 +372,7 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { name = link fi, err = os.Lstat(name) if err != nil { - return "", nil + return "", err } } @@ -422,7 +386,6 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if errors.Is(err, unix.EINTR) { continue } - return "", err } @@ -444,10 +407,16 @@ func (w *kqueue) addWatch(name string, flags uint32) (string, error) { if info.isDir { watchDir := (flags&unix.NOTE_WRITE) == unix.NOTE_WRITE && (!alreadyWatching || (info.dirFlags&unix.NOTE_WRITE) != unix.NOTE_WRITE) - w.watches.updateDirFlags(name, flags) + if !w.watches.updateDirFlags(name, flags) { + return "", nil + } if watchDir { - if err := w.watchDirectoryFiles(name); err != nil { + d := name + if info.linkName != "" { + d = info.linkName + } + if err := w.watchDirectoryFiles(d); err != nil { return "", err } } @@ -644,19 +613,22 @@ func (w *kqueue) dirChange(dir string) error { if errors.Is(err, os.ErrNotExist) { return nil } - return fmt.Errorf("fsnotify.dirChange: %w", err) + return fmt.Errorf("fsnotify.dirChange %q: %w", dir, err) } for _, f := range files { fi, err := f.Info() if err != nil { + if errors.Is(err, os.ErrNotExist) { + return nil + } return fmt.Errorf("fsnotify.dirChange: %w", err) } err = w.sendCreateIfNew(filepath.Join(dir, fi.Name()), fi) if err != nil { // Don't need to send an error if this file isn't readable. - if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) { + if errors.Is(err, unix.EACCES) || errors.Is(err, unix.EPERM) || errors.Is(err, os.ErrNotExist) { return nil } return fmt.Errorf("fsnotify.dirChange: %w", err) @@ -688,11 +660,11 @@ func (w *kqueue) internalWatch(name string, fi os.FileInfo) (string, error) { // mimic Linux providing delete events for subdirectories, but preserve // the flags used if currently watching subdirectory info, _ := w.watches.byPath(name) - return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME) + return w.addWatch(name, info.dirFlags|unix.NOTE_DELETE|unix.NOTE_RENAME, true) } - // watch file to mimic Linux inotify - return w.addWatch(name, noteAllEvents) + // Watch file to mimic Linux inotify. + return w.addWatch(name, noteAllEvents, true) } // Register events with the queue. @@ -722,9 +694,9 @@ func (w *kqueue) read(events []unix.Kevent_t) ([]unix.Kevent_t, error) { } func (w *kqueue) xSupports(op Op) bool { - if runtime.GOOS == "freebsd" { - //return true // Supports everything. - } + //if runtime.GOOS == "freebsd" { + // return true // Supports everything. + //} if op.Has(xUnportableOpen) || op.Has(xUnportableRead) || op.Has(xUnportableCloseWrite) || op.Has(xUnportableCloseRead) { return false diff --git a/vendor/github.com/fsnotify/fsnotify/backend_other.go b/vendor/github.com/fsnotify/fsnotify/backend_other.go index 5eb5dbc6..b8c0ad72 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_other.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_other.go @@ -9,12 +9,11 @@ type other struct { Errors chan error } +var defaultBufferSize = 0 + func newBackend(ev chan Event, errs chan error) (backend, error) { return nil, errors.New("fsnotify not supported on the current platform") } -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { - return newBackend(ev, errs) -} func (w *other) Close() error { return nil } func (w *other) WatchList() []string { return nil } func (w *other) Add(name string) error { return nil } diff --git a/vendor/github.com/fsnotify/fsnotify/backend_windows.go b/vendor/github.com/fsnotify/fsnotify/backend_windows.go index c54a6308..3433642d 100644 --- a/vendor/github.com/fsnotify/fsnotify/backend_windows.go +++ b/vendor/github.com/fsnotify/fsnotify/backend_windows.go @@ -28,18 +28,16 @@ type readDirChangesW struct { port windows.Handle // Handle to completion port input chan *input // Inputs to the reader are sent on this channel - quit chan chan<- error + done chan chan<- error mu sync.Mutex // Protects access to watches, closed watches watchMap // Map of watches (key: i-number) closed bool // Set to true when Close() is first called } -func newBackend(ev chan Event, errs chan error) (backend, error) { - return newBufferedBackend(50, ev, errs) -} +var defaultBufferSize = 50 -func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error) { +func newBackend(ev chan Event, errs chan error) (backend, error) { port, err := windows.CreateIoCompletionPort(windows.InvalidHandle, 0, 0, 0) if err != nil { return nil, os.NewSyscallError("CreateIoCompletionPort", err) @@ -50,7 +48,7 @@ func newBufferedBackend(sz uint, ev chan Event, errs chan error) (backend, error port: port, watches: make(watchMap), input: make(chan *input, 1), - quit: make(chan chan<- error, 1), + done: make(chan chan<- error, 1), } go w.readEvents() return w, nil @@ -70,8 +68,8 @@ func (w *readDirChangesW) sendEvent(name, renamedFrom string, mask uint64) bool event := w.newEvent(name, uint32(mask)) event.renamedFrom = renamedFrom select { - case ch := <-w.quit: - w.quit <- ch + case ch := <-w.done: + w.done <- ch case w.Events <- event: } return true @@ -83,10 +81,10 @@ func (w *readDirChangesW) sendError(err error) bool { return true } select { + case <-w.done: + return false case w.Errors <- err: return true - case <-w.quit: - return false } } @@ -99,9 +97,9 @@ func (w *readDirChangesW) Close() error { w.closed = true w.mu.Unlock() - // Send "quit" message to the reader goroutine + // Send "done" message to the reader goroutine ch := make(chan error) - w.quit <- ch + w.done <- ch if err := w.wakeupReader(); err != nil { return err } @@ -495,7 +493,7 @@ func (w *readDirChangesW) readEvents() { watch := (*watch)(unsafe.Pointer(ov)) if watch == nil { select { - case ch := <-w.quit: + case ch := <-w.done: w.mu.Lock() var indexes []indexMap for _, index := range w.watches { diff --git a/vendor/github.com/fsnotify/fsnotify/fsnotify.go b/vendor/github.com/fsnotify/fsnotify/fsnotify.go index 0760efe9..f64be4bf 100644 --- a/vendor/github.com/fsnotify/fsnotify/fsnotify.go +++ b/vendor/github.com/fsnotify/fsnotify/fsnotify.go @@ -244,12 +244,13 @@ var ( // ErrUnsupported is returned by AddWith() when WithOps() specified an // Unportable event that's not supported on this platform. + //lint:ignore ST1012 not relevant xErrUnsupported = errors.New("fsnotify: not supported with this backend") ) // NewWatcher creates a new Watcher. func NewWatcher() (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) + ev, errs := make(chan Event, defaultBufferSize), make(chan error) b, err := newBackend(ev, errs) if err != nil { return nil, err @@ -266,8 +267,8 @@ func NewWatcher() (*Watcher, error) { // cases, and whenever possible you will be better off increasing the kernel // buffers instead of adding a large userspace buffer. func NewBufferedWatcher(sz uint) (*Watcher, error) { - ev, errs := make(chan Event), make(chan error) - b, err := newBufferedBackend(sz, ev, errs) + ev, errs := make(chan Event, sz), make(chan error) + b, err := newBackend(ev, errs) if err != nil { return nil, err } @@ -337,7 +338,8 @@ func (w *Watcher) Close() error { return w.b.Close() } // WatchList returns all paths explicitly added with [Watcher.Add] (and are not // yet removed). // -// Returns nil if [Watcher.Close] was called. +// The order is undefined, and may differ per call. Returns nil if +// [Watcher.Close] was called. func (w *Watcher) WatchList() []string { return w.b.WatchList() } // Supports reports if all the listed operations are supported by this platform. diff --git a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go index b0eab100..0b01bc18 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/darwin.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/darwin.go @@ -9,14 +9,14 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 -// Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ func SetRlimit() { + // Go 1.19 will do this automatically: https://go-review.googlesource.com/c/go/+/393354/ var l syscall.Rlimit err := syscall.Getrlimit(syscall.RLIMIT_NOFILE, &l) if err == nil && l.Cur != l.Max { diff --git a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go index 547df1df..5ac8b507 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/freebsd.go @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/vendor/github.com/fsnotify/fsnotify/internal/unix.go b/vendor/github.com/fsnotify/fsnotify/internal/unix.go index 30976ce9..b251fb80 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/unix.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/unix.go @@ -1,4 +1,4 @@ -//go:build !windows && !darwin && !freebsd +//go:build !windows && !darwin && !freebsd && !plan9 package internal @@ -9,8 +9,8 @@ import ( ) var ( - SyscallEACCES = syscall.EACCES - UnixEACCES = unix.EACCES + ErrSyscallEACCES = syscall.EACCES + ErrUnixEACCES = unix.EACCES ) var maxfiles uint64 diff --git a/vendor/github.com/fsnotify/fsnotify/internal/windows.go b/vendor/github.com/fsnotify/fsnotify/internal/windows.go index a72c6495..896bc2e5 100644 --- a/vendor/github.com/fsnotify/fsnotify/internal/windows.go +++ b/vendor/github.com/fsnotify/fsnotify/internal/windows.go @@ -10,8 +10,8 @@ import ( // Just a dummy. var ( - SyscallEACCES = errors.New("dummy") - UnixEACCES = errors.New("dummy") + ErrSyscallEACCES = errors.New("dummy") + ErrUnixEACCES = errors.New("dummy") ) func SetRlimit() {} diff --git a/vendor/github.com/fsnotify/fsnotify/shared.go b/vendor/github.com/fsnotify/fsnotify/shared.go new file mode 100644 index 00000000..3ee9b58f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/shared.go @@ -0,0 +1,64 @@ +package fsnotify + +import "sync" + +type shared struct { + Events chan Event + Errors chan error + done chan struct{} + mu sync.Mutex +} + +func newShared(ev chan Event, errs chan error) *shared { + return &shared{ + Events: ev, + Errors: errs, + done: make(chan struct{}), + } +} + +// Returns true if the event was sent, or false if watcher is closed. +func (w *shared) sendEvent(e Event) bool { + if e.Op == 0 { + return true + } + select { + case <-w.done: + return false + case w.Events <- e: + return true + } +} + +// Returns true if the error was sent, or false if watcher is closed. +func (w *shared) sendError(err error) bool { + if err == nil { + return true + } + select { + case <-w.done: + return false + case w.Errors <- err: + return true + } +} + +func (w *shared) isClosed() bool { + select { + case <-w.done: + return true + default: + return false + } +} + +// Mark as closed; returns true if it was already closed. +func (w *shared) close() bool { + w.mu.Lock() + defer w.mu.Unlock() + if w.isClosed() { + return true + } + close(w.done) + return false +} diff --git a/vendor/github.com/fsnotify/fsnotify/staticcheck.conf b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf new file mode 100644 index 00000000..8fa7351f --- /dev/null +++ b/vendor/github.com/fsnotify/fsnotify/staticcheck.conf @@ -0,0 +1,3 @@ +checks = ['all', + '-U1000', # Don't complain about unused functions. +] diff --git a/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml b/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml index 9d6ae5d7..e4f0cc10 100644 --- a/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml +++ b/vendor/github.com/go-co-op/gocron/v2/.golangci.yaml @@ -20,10 +20,9 @@ issues: linters: enable: - bodyclose - - exportloopref + - copyloopvar - gofumpt - goimports - - gosec - gosimple - govet - ineffassign @@ -39,6 +38,5 @@ output: - format: colored-line-number print-issued-lines: true print-linter-name: true - uniq-by-line: true path-prefix: "" sort-results: true diff --git a/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml b/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml index 99b237e3..9e57a003 100644 --- a/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml +++ b/vendor/github.com/go-co-op/gocron/v2/.pre-commit-config.yaml @@ -12,7 +12,7 @@ repos: - id: end-of-file-fixer - id: trailing-whitespace - repo: https://github.com/golangci/golangci-lint - rev: v1.55.2 + rev: v1.64.5 hooks: - id: golangci-lint - repo: https://github.com/TekWizely/pre-commit-golang diff --git a/vendor/github.com/go-co-op/gocron/v2/Makefile b/vendor/github.com/go-co-op/gocron/v2/Makefile index abaf708a..c5ba90b2 100644 --- a/vendor/github.com/go-co-op/gocron/v2/Makefile +++ b/vendor/github.com/go-co-op/gocron/v2/Makefile @@ -6,7 +6,7 @@ fmt: @go list -f {{.Dir}} ./... | xargs -I{} gofmt -w -s {} lint: - @grep "^func " example_test.go | sort -c + @grep "^func [a-zA-Z]" example_test.go | sort -c @golangci-lint run test: @@ -16,7 +16,7 @@ test_coverage: @go test -race -v $(GO_FLAGS) -count=1 -coverprofile=coverage.out -covermode=atomic $(GO_PKGS) test_ci: - @TEST_ENV=ci go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) + @go test -race -v $(GO_FLAGS) -count=1 $(GO_PKGS) mocks: @go generate ./... diff --git a/vendor/github.com/go-co-op/gocron/v2/README.md b/vendor/github.com/go-co-op/gocron/v2/README.md index 4a1de758..5dfb5af5 100644 --- a/vendor/github.com/go-co-op/gocron/v2/README.md +++ b/vendor/github.com/go-co-op/gocron/v2/README.md @@ -151,6 +151,7 @@ The provided NewLogger uses the standard library's log package. ### Metrics Metrics may be collected from the execution of each job. - [**Monitor**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#Monitor): +- [**MonitorStatus**](https://pkg.go.dev/github.com/go-co-op/gocron/v2#MonitorStatus) (includes status and error (if any) of the Job) A monitor can be used to collect metrics for each job from a scheduler. - Implementations: [go-co-op monitors](https://github.com/go-co-op?q=-monitor&type=all&language=&sort=) (don't see what you need? request on slack to get a repo created to contribute it!) @@ -168,8 +169,20 @@ We appreciate the support for free and open source software! This project is supported by: -- [Jetbrains](https://www.jetbrains.com/?from=gocron) -- [Sentry](https://sentry.io/welcome/) +[Jetbrains](https://www.jetbrains.com/?from=gocron) + +![JetBrains logo](https://resources.jetbrains.com/storage/products/company/brand/logos/jetbrains.png) + + +[Sentry](https://sentry.io/welcome/) + +

+

+ + Sentry + +

+

## Star History diff --git a/vendor/github.com/go-co-op/gocron/v2/errors.go b/vendor/github.com/go-co-op/gocron/v2/errors.go index d160860b..93946e22 100644 --- a/vendor/github.com/go-co-op/gocron/v2/errors.go +++ b/vendor/github.com/go-co-op/gocron/v2/errors.go @@ -39,6 +39,7 @@ var ( ErrWeeklyJobMinutesSeconds = fmt.Errorf("gocron: WeeklyJob: atTimes minutes and seconds must be between 0 and 59 inclusive") ErrPanicRecovered = fmt.Errorf("gocron: panic recovered") ErrWithClockNil = fmt.Errorf("gocron: WithClock: clock must not be nil") + ErrWithContextNil = fmt.Errorf("gocron: WithContext: context must not be nil") ErrWithDistributedElectorNil = fmt.Errorf("gocron: WithDistributedElector: elector must not be nil") ErrWithDistributedLockerNil = fmt.Errorf("gocron: WithDistributedLocker: locker must not be nil") ErrWithDistributedJobLockerNil = fmt.Errorf("gocron: WithDistributedJobLocker: locker must not be nil") diff --git a/vendor/github.com/go-co-op/gocron/v2/executor.go b/vendor/github.com/go-co-op/gocron/v2/executor.go index af4b7c98..37fccb47 100644 --- a/vendor/github.com/go-co-op/gocron/v2/executor.go +++ b/vendor/github.com/go-co-op/gocron/v2/executor.go @@ -31,8 +31,13 @@ type executor struct { // used to request jobs from the scheduler jobOutRequest chan jobOutRequest + // sends out job needs to update the next runs + jobUpdateNextRuns chan uuid.UUID + // used by the executor to receive a stop signal from the scheduler stopCh chan struct{} + // ensure that stop runs before the next call to start and only runs once + stopOnce *sync.Once // the timeout value when stopping stopTimeout time.Duration // used to signal that the executor has completed shutdown @@ -49,6 +54,8 @@ type executor struct { locker Locker // monitor for reporting metrics monitor Monitor + // monitorStatus for reporting metrics + monitorStatus MonitorStatus } type jobIn struct { @@ -83,6 +90,7 @@ func (e *executor) start() { // any other uses within the executor should create a context // using the executor context as parent. e.ctx, e.cancel = context.WithCancel(context.Background()) + e.stopOnce = &sync.Once{} // the standardJobsWg tracks standardJobsWg := &waitGroupWithMutex{} @@ -126,7 +134,7 @@ func (e *executor) start() { // spin off into a goroutine to unblock the executor and // allow for processing for more work - go func() { + go func(executorCtx context.Context) { // make sure to cancel the above context per the docs // // Canceling this context releases resources associated with it, so code should // // call cancel as soon as the operations running in this Context complete. @@ -206,8 +214,7 @@ func (e *executor) start() { } } else { select { - case <-e.stopCh: - e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) + case <-executorCtx.Done(): return default: } @@ -223,7 +230,7 @@ func (e *executor) start() { }(*j) } } - }() + }(e.ctx) case <-e.stopCh: e.stop(standardJobsWg, singletonJobsWg, limitModeJobsWg) return @@ -245,6 +252,14 @@ func (e *executor) sendOutForRescheduling(jIn *jobIn) { jIn.shouldSendOut = false } +func (e *executor) sendOutForNextRunUpdate(jIn *jobIn) { + select { + case e.jobUpdateNextRuns <- jIn.id: + case <-e.ctx.Done(): + return + } +} + func (e *executor) limitModeRunner(name string, in chan jobIn, wg *waitGroupWithMutex, limitMode LimitMode, rescheduleLimiter chan struct{}) { e.logger.Debug("gocron: limitModeRunner starting", "name", name) for { @@ -368,27 +383,42 @@ func (e *executor) runJob(j internalJob, jIn jobIn) { e.incrementJobCounter(j, Skip) return } - } else if j.locker != nil { + } else if !j.disabledLocker && j.locker != nil { lock, err := j.locker.Lock(j.ctx, j.name) if err != nil { _ = callJobFuncWithParams(j.afterLockError, j.id, j.name, err) e.sendOutForRescheduling(&jIn) e.incrementJobCounter(j, Skip) + e.sendOutForNextRunUpdate(&jIn) return } defer func() { _ = lock.Unlock(j.ctx) }() - } else if e.locker != nil { + } else if !j.disabledLocker && e.locker != nil { lock, err := e.locker.Lock(j.ctx, j.name) if err != nil { _ = callJobFuncWithParams(j.afterLockError, j.id, j.name, err) e.sendOutForRescheduling(&jIn) e.incrementJobCounter(j, Skip) + e.sendOutForNextRunUpdate(&jIn) return } defer func() { _ = lock.Unlock(j.ctx) }() } + _ = callJobFuncWithParams(j.beforeJobRuns, j.id, j.name) + err := callJobFuncWithParams(j.beforeJobRunsSkipIfBeforeFuncErrors, j.id, j.name) + if err != nil { + e.sendOutForRescheduling(&jIn) + + select { + case e.jobsOutCompleted <- j.id: + case <-e.ctx.Done(): + } + + return + } + e.sendOutForRescheduling(&jIn) select { case e.jobsOutCompleted <- j.id: @@ -396,7 +426,6 @@ func (e *executor) runJob(j internalJob, jIn jobIn) { } startTime := time.Now() - var err error if j.afterJobRunsWithPanic != nil { err = e.callJobWithRecover(j) } else { @@ -406,9 +435,11 @@ func (e *executor) runJob(j internalJob, jIn jobIn) { if err != nil { _ = callJobFuncWithParams(j.afterJobRunsWithError, j.id, j.name, err) e.incrementJobCounter(j, Fail) + e.recordJobTimingWithStatus(startTime, time.Now(), j, Fail, err) } else { _ = callJobFuncWithParams(j.afterJobRuns, j.id, j.name) e.incrementJobCounter(j, Success) + e.recordJobTimingWithStatus(startTime, time.Now(), j, Success, nil) } } @@ -431,6 +462,12 @@ func (e *executor) recordJobTiming(start time.Time, end time.Time, j internalJob } } +func (e *executor) recordJobTimingWithStatus(start time.Time, end time.Time, j internalJob, status JobStatus, err error) { + if e.monitorStatus != nil { + e.monitorStatus.RecordJobTimingWithStatus(start, end, j.id, j.name, j.tags, status, err) + } +} + func (e *executor) incrementJobCounter(j internalJob, status JobStatus) { if e.monitor != nil { e.monitor.IncrementJob(j.id, j.name, j.tags, status) @@ -438,86 +475,88 @@ func (e *executor) incrementJobCounter(j internalJob, status JobStatus) { } func (e *executor) stop(standardJobsWg, singletonJobsWg, limitModeJobsWg *waitGroupWithMutex) { - e.logger.Debug("gocron: stopping executor") - // we've been asked to stop. This is either because the scheduler has been told - // to stop all jobs or the scheduler has been asked to completely shutdown. - // - // cancel tells all the functions to stop their work and send in a done response - e.cancel() + e.stopOnce.Do(func() { + e.logger.Debug("gocron: stopping executor") + // we've been asked to stop. This is either because the scheduler has been told + // to stop all jobs or the scheduler has been asked to completely shutdown. + // + // cancel tells all the functions to stop their work and send in a done response + e.cancel() - // the wait for job channels are used to report back whether we successfully waited - // for all jobs to complete or if we hit the configured timeout. - waitForJobs := make(chan struct{}, 1) - waitForSingletons := make(chan struct{}, 1) - waitForLimitMode := make(chan struct{}, 1) + // the wait for job channels are used to report back whether we successfully waited + // for all jobs to complete or if we hit the configured timeout. + waitForJobs := make(chan struct{}, 1) + waitForSingletons := make(chan struct{}, 1) + waitForLimitMode := make(chan struct{}, 1) - // the waiter context is used to cancel the functions waiting on jobs. - // this is done to avoid goroutine leaks. - waiterCtx, waiterCancel := context.WithCancel(context.Background()) + // the waiter context is used to cancel the functions waiting on jobs. + // this is done to avoid goroutine leaks. + waiterCtx, waiterCancel := context.WithCancel(context.Background()) - // wait for standard jobs to complete - go func() { - e.logger.Debug("gocron: waiting for standard jobs to complete") + // wait for standard jobs to complete go func() { - // this is done in a separate goroutine, so we aren't - // blocked by the WaitGroup's Wait call in the event - // that the waiter context is cancelled. - // This particular goroutine could leak in the event that - // some long-running standard job doesn't complete. - standardJobsWg.Wait() - e.logger.Debug("gocron: standard jobs completed") - waitForJobs <- struct{}{} + e.logger.Debug("gocron: waiting for standard jobs to complete") + go func() { + // this is done in a separate goroutine, so we aren't + // blocked by the WaitGroup's Wait call in the event + // that the waiter context is cancelled. + // This particular goroutine could leak in the event that + // some long-running standard job doesn't complete. + standardJobsWg.Wait() + e.logger.Debug("gocron: standard jobs completed") + waitForJobs <- struct{}{} + }() + <-waiterCtx.Done() }() - <-waiterCtx.Done() - }() - // wait for per job singleton limit mode runner jobs to complete - go func() { - e.logger.Debug("gocron: waiting for singleton jobs to complete") + // wait for per job singleton limit mode runner jobs to complete go func() { - singletonJobsWg.Wait() - e.logger.Debug("gocron: singleton jobs completed") - waitForSingletons <- struct{}{} + e.logger.Debug("gocron: waiting for singleton jobs to complete") + go func() { + singletonJobsWg.Wait() + e.logger.Debug("gocron: singleton jobs completed") + waitForSingletons <- struct{}{} + }() + <-waiterCtx.Done() }() - <-waiterCtx.Done() - }() - // wait for limit mode runners to complete - go func() { - e.logger.Debug("gocron: waiting for limit mode jobs to complete") + // wait for limit mode runners to complete go func() { - limitModeJobsWg.Wait() - e.logger.Debug("gocron: limitMode jobs completed") - waitForLimitMode <- struct{}{} + e.logger.Debug("gocron: waiting for limit mode jobs to complete") + go func() { + limitModeJobsWg.Wait() + e.logger.Debug("gocron: limitMode jobs completed") + waitForLimitMode <- struct{}{} + }() + <-waiterCtx.Done() }() - <-waiterCtx.Done() - }() - // now either wait for all the jobs to complete, - // or hit the timeout. - var count int - timeout := time.Now().Add(e.stopTimeout) - for time.Now().Before(timeout) && count < 3 { - select { - case <-waitForJobs: - count++ - case <-waitForSingletons: - count++ - case <-waitForLimitMode: - count++ - default: + // now either wait for all the jobs to complete, + // or hit the timeout. + var count int + timeout := time.Now().Add(e.stopTimeout) + for time.Now().Before(timeout) && count < 3 { + select { + case <-waitForJobs: + count++ + case <-waitForSingletons: + count++ + case <-waitForLimitMode: + count++ + default: + } } - } - if count < 3 { - e.done <- ErrStopJobsTimedOut - e.logger.Debug("gocron: executor stopped - timed out") - } else { - e.done <- nil - e.logger.Debug("gocron: executor stopped") - } - waiterCancel() + if count < 3 { + e.done <- ErrStopJobsTimedOut + e.logger.Debug("gocron: executor stopped - timed out") + } else { + e.done <- nil + e.logger.Debug("gocron: executor stopped") + } + waiterCancel() - if e.limitMode != nil { - e.limitMode.started = false - } + if e.limitMode != nil { + e.limitMode.started = false + } + }) } diff --git a/vendor/github.com/go-co-op/gocron/v2/job.go b/vendor/github.com/go-co-op/gocron/v2/job.go index 890889ed..1a739159 100644 --- a/vendor/github.com/go-co-op/gocron/v2/job.go +++ b/vendor/github.com/go-co-op/gocron/v2/job.go @@ -6,23 +6,25 @@ import ( "errors" "fmt" "math/rand" + "slices" "strings" "time" "github.com/google/uuid" "github.com/jonboulle/clockwork" "github.com/robfig/cron/v3" - "golang.org/x/exp/slices" ) // internalJob stores the information needed by the scheduler // to manage scheduling, starting and stopping the job type internalJob struct { - ctx context.Context - cancel context.CancelFunc - id uuid.UUID - name string - tags []string + ctx context.Context + parentCtx context.Context + cancel context.CancelFunc + id uuid.UUID + name string + tags []string + cron Cron jobSchedule // as some jobs may queue up, it's possible to @@ -40,11 +42,13 @@ type internalJob struct { startImmediately bool stopTime time.Time // event listeners - afterJobRuns func(jobID uuid.UUID, jobName string) - beforeJobRuns func(jobID uuid.UUID, jobName string) - afterJobRunsWithError func(jobID uuid.UUID, jobName string, err error) - afterJobRunsWithPanic func(jobID uuid.UUID, jobName string, recoverData any) - afterLockError func(jobID uuid.UUID, jobName string, err error) + afterJobRuns func(jobID uuid.UUID, jobName string) + beforeJobRuns func(jobID uuid.UUID, jobName string) + beforeJobRunsSkipIfBeforeFuncErrors func(jobID uuid.UUID, jobName string) error + afterJobRunsWithError func(jobID uuid.UUID, jobName string, err error) + afterJobRunsWithPanic func(jobID uuid.UUID, jobName string, recoverData any) + afterLockError func(jobID uuid.UUID, jobName string, err error) + disabledLocker bool locker Locker } @@ -80,6 +84,10 @@ type task struct { type Task func() task // NewTask provides the job's task function and parameters. +// If you set the first argument of your Task func to be a context.Context, +// gocron will pass in a context (either the default Job context, or one +// provided via WithContext) to the job and will cancel the context on shutdown. +// This allows you to listen for and handle cancellation within your job. func NewTask(function any, parameters ...any) Task { return func() task { return task{ @@ -97,6 +105,20 @@ type limitRunsTo struct { runCount uint } +// ----------------------------------------------- +// ----------------------------------------------- +// --------------- Custom Cron ------------------- +// ----------------------------------------------- +// ----------------------------------------------- + +// Cron defines the interface that must be +// implemented to provide a custom cron implementation for +// the job. Pass in the implementation using the JobOption WithCronImplementation. +type Cron interface { + IsValid(crontab string, location *time.Location, now time.Time) error + Next(lastRun time.Time) time.Time +} + // ----------------------------------------------- // ----------------------------------------------- // --------------- Job Variants ------------------ @@ -109,21 +131,29 @@ type JobDefinition interface { setup(j *internalJob, l *time.Location, now time.Time) error } -var _ JobDefinition = (*cronJobDefinition)(nil) +// Default cron implementation -type cronJobDefinition struct { - crontab string - withSeconds bool +func newDefaultCronImplementation(withSeconds bool) Cron { + return &defaultCron{ + withSeconds: withSeconds, + } } -func (c cronJobDefinition) setup(j *internalJob, location *time.Location, now time.Time) error { +var _ Cron = (*defaultCron)(nil) + +type defaultCron struct { + cronSchedule cron.Schedule + withSeconds bool +} + +func (c *defaultCron) IsValid(crontab string, location *time.Location, now time.Time) error { var withLocation string - if strings.HasPrefix(c.crontab, "TZ=") || strings.HasPrefix(c.crontab, "CRON_TZ=") { - withLocation = c.crontab + if strings.HasPrefix(crontab, "TZ=") || strings.HasPrefix(crontab, "CRON_TZ=") { + withLocation = crontab } else { // since the user didn't provide a timezone default to the location // passed in by the scheduler. Default: time.Local - withLocation = fmt.Sprintf("CRON_TZ=%s %s", location.String(), c.crontab) + withLocation = fmt.Sprintf("CRON_TZ=%s %s", location.String(), crontab) } var ( @@ -143,8 +173,32 @@ func (c cronJobDefinition) setup(j *internalJob, location *time.Location, now ti if cronSchedule.Next(now).IsZero() { return ErrCronJobInvalid } + c.cronSchedule = cronSchedule + return nil +} - j.jobSchedule = &cronJob{cronSchedule: cronSchedule} +func (c *defaultCron) Next(lastRun time.Time) time.Time { + return c.cronSchedule.Next(lastRun) +} + +// default cron job implementation +var _ JobDefinition = (*cronJobDefinition)(nil) + +type cronJobDefinition struct { + crontab string + cron Cron +} + +func (c cronJobDefinition) setup(j *internalJob, location *time.Location, now time.Time) error { + if j.cron != nil { + c.cron = j.cron + } + + if err := c.cron.IsValid(c.crontab, location, now); err != nil { + return err + } + + j.jobSchedule = &cronJob{crontab: c.crontab, cronSchedule: c.cron} return nil } @@ -156,8 +210,8 @@ func (c cronJobDefinition) setup(j *internalJob, location *time.Location, now ti // `CRON_TZ=America/Chicago * * * * *` func CronJob(crontab string, withSeconds bool) JobDefinition { return cronJobDefinition{ - crontab: crontab, - withSeconds: withSeconds, + crontab: crontab, + cron: newDefaultCronImplementation(withSeconds), } } @@ -311,8 +365,7 @@ type Weekdays func() []time.Weekday // NewWeekdays provide the days of the week the job should run. func NewWeekdays(weekday time.Weekday, weekdays ...time.Weekday) Weekdays { return func() []time.Weekday { - weekdays = append(weekdays, weekday) - return weekdays + return append([]time.Weekday{weekday}, weekdays...) } } @@ -363,11 +416,9 @@ func (m monthlyJobDefinition) setup(j *internalJob, location *time.Location, _ t } } daysStart = removeSliceDuplicatesInt(daysStart) - slices.Sort(daysStart) ms.days = daysStart daysEnd = removeSliceDuplicatesInt(daysEnd) - slices.Sort(daysEnd) ms.daysFromEnd = daysEnd atTimesDate, err := convertAtTimesToDateTime(m.atTimes, location) @@ -400,8 +451,7 @@ type DaysOfTheMonth func() days // -5 == 5 days before the end of the month. func NewDaysOfTheMonth(day int, moreDays ...int) DaysOfTheMonth { return func() days { - moreDays = append(moreDays, day) - return moreDays + return append([]int{day}, moreDays...) } } @@ -413,6 +463,14 @@ func (a atTime) time(location *time.Location) time.Time { return time.Date(0, 0, 0, int(a.hours), int(a.minutes), int(a.seconds), 0, location) } +// TimeFromAtTime is a helper function to allow converting AtTime into a time.Time value +// Note: the time.Time value will have zero values for all Time fields except Hours, Minutes, Seconds. +// +// For example: time.Date(0, 0, 0, 1, 1, 1, 0, time.UTC) +func TimeFromAtTime(at AtTime, loc *time.Location) time.Time { + return at().time(loc) +} + // AtTime defines a function that returns the internal atTime type AtTime func() atTime @@ -431,8 +489,7 @@ type AtTimes func() []AtTime // the job should be run func NewAtTimes(atTime AtTime, atTimes ...AtTime) AtTimes { return func() []AtTime { - atTimes = append(atTimes, atTime) - return atTimes + return append([]AtTime{atTime}, atTimes...) } } @@ -551,6 +608,16 @@ func WithDistributedJobLocker(locker Locker) JobOption { } } +// WithDisabledDistributedJobLocker disables the distributed job locker. +// This is useful when a global distributed locker has been set on the scheduler +// level using WithDistributedLocker and need to be disabled for specific jobs. +func WithDisabledDistributedJobLocker(disabled bool) JobOption { + return func(j *internalJob, _ time.Time) error { + j.disabledLocker = disabled + return nil + } +} + // WithEventListeners sets the event listeners that should be // run for the job. func WithEventListeners(eventListeners ...EventListener) JobOption { @@ -588,6 +655,15 @@ func WithName(name string) JobOption { } } +// WithCronImplementation sets the custom Cron implementation for the job. +// This is only utilized for the CronJob type. +func WithCronImplementation(c Cron) JobOption { + return func(j *internalJob, _ time.Time) error { + j.cron = c + return nil + } +} + // WithSingletonMode keeps the job from running again if it is already running. // This is useful for jobs that should not overlap, and that occasionally // (but not consistently) run longer than the interval between job runs. @@ -686,6 +762,22 @@ func WithIdentifier(id uuid.UUID) JobOption { } } +// WithContext sets the parent context for the job. +// If you set the first argument of your Task func to be a context.Context, +// gocron will pass in the provided context to the job and will cancel the +// context on shutdown. If you cancel the context the job will no longer be +// scheduled as well. This allows you to both control the job via a context +// and listen for and handle cancellation within your job. +func WithContext(ctx context.Context) JobOption { + return func(j *internalJob, _ time.Time) error { + if ctx == nil { + return ErrWithContextNil + } + j.parentCtx = ctx + return nil + } +} + // ----------------------------------------------- // ----------------------------------------------- // ------------- Job Event Listeners ------------- @@ -708,6 +800,19 @@ func BeforeJobRuns(eventListenerFunc func(jobID uuid.UUID, jobName string)) Even } } +// BeforeJobRunsSkipIfBeforeFuncErrors is used to listen for when a job is about to run and +// then runs the provided function. If the provided function returns an error, the job will be +// rescheduled and the current run will be skipped. +func BeforeJobRunsSkipIfBeforeFuncErrors(eventListenerFunc func(jobID uuid.UUID, jobName string) error) EventListener { + return func(j *internalJob) error { + if eventListenerFunc == nil { + return ErrEventListenerFuncNil + } + j.beforeJobRunsSkipIfBeforeFuncErrors = eventListenerFunc + return nil + } +} + // AfterJobRuns is used to listen for when a job has run // without an error, and then run the provided function. func AfterJobRuns(eventListenerFunc func(jobID uuid.UUID, jobName string)) EventListener { @@ -769,7 +874,8 @@ type jobSchedule interface { var _ jobSchedule = (*cronJob)(nil) type cronJob struct { - cronSchedule cron.Schedule + crontab string + cronSchedule Cron } func (j *cronJob) next(lastRun time.Time) time.Time { @@ -813,7 +919,7 @@ func (d dailyJob) next(lastRun time.Time) time.Time { } firstPass = false - startNextDay := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(d.interval), 0, 0, 0, lastRun.Nanosecond(), lastRun.Location()) + startNextDay := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(d.interval), 0, 0, 0, 0, lastRun.Location()) return d.nextDay(startNextDay, firstPass) } @@ -821,7 +927,7 @@ func (d dailyJob) nextDay(lastRun time.Time, firstPass bool) time.Time { for _, at := range d.atTimes { // sub the at time hour/min/sec onto the lastScheduledRun's values // to use in checks to see if we've got our next run time - atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) + atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day(), at.Hour(), at.Minute(), at.Second(), 0, lastRun.Location()) if firstPass && atDate.After(lastRun) { // checking to see if it is after i.e. greater than, @@ -867,7 +973,7 @@ func (w weeklyJob) nextWeekDayAtTime(lastRun time.Time, firstPass bool) time.Tim for _, at := range w.atTimes { // sub the at time hour/min/sec onto the lastScheduledRun's values // to use in checks to see if we've got our next run time - atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(weekDayDiff), at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) + atDate := time.Date(lastRun.Year(), lastRun.Month(), lastRun.Day()+int(weekDayDiff), at.Hour(), at.Minute(), at.Second(), 0, lastRun.Location()) if firstPass && atDate.After(lastRun) { // checking to see if it is after i.e. greater than, @@ -935,7 +1041,7 @@ func (m monthlyJob) nextMonthDayAtTime(lastRun time.Time, days []int, firstPass for _, at := range m.atTimes { // sub the day, and the at time hour/min/sec onto the lastScheduledRun's values // to use in checks to see if we've got our next run time - atDate := time.Date(lastRun.Year(), lastRun.Month(), day, at.Hour(), at.Minute(), at.Second(), lastRun.Nanosecond(), lastRun.Location()) + atDate := time.Date(lastRun.Year(), lastRun.Month(), day, at.Hour(), at.Minute(), at.Second(), 0, lastRun.Location()) if atDate.Month() != lastRun.Month() { // this check handles if we're setting a day not in the current month @@ -1099,12 +1205,14 @@ func (j job) RunNow() error { defer cancel() resp := make(chan error, 1) + t := time.NewTimer(100 * time.Millisecond) select { case j.runJobRequest <- runJobRequest{ id: j.id, outChan: resp, }: - case <-time.After(100 * time.Millisecond): + t.Stop() + case <-t.C: return ErrJobRunNowFailed } var err error diff --git a/vendor/github.com/go-co-op/gocron/v2/monitor.go b/vendor/github.com/go-co-op/gocron/v2/monitor.go index d3c5bbd9..4f25fd86 100644 --- a/vendor/github.com/go-co-op/gocron/v2/monitor.go +++ b/vendor/github.com/go-co-op/gocron/v2/monitor.go @@ -26,3 +26,11 @@ type Monitor interface { // to handle instantiating and recording the value RecordJobTiming(startTime, endTime time.Time, id uuid.UUID, name string, tags []string) } + +// MonitorStatus extends RecordJobTiming with the job status. +type MonitorStatus interface { + Monitor + // RecordJobTimingWithStatus will provide details about the job, its status, error and the timing and expects the underlying implementation + // to handle instantiating and recording the value + RecordJobTimingWithStatus(startTime, endTime time.Time, id uuid.UUID, name string, tags []string, status JobStatus, err error) +} diff --git a/vendor/github.com/go-co-op/gocron/v2/scheduler.go b/vendor/github.com/go-co-op/gocron/v2/scheduler.go index 90ff5212..1cbc98dc 100644 --- a/vendor/github.com/go-co-op/gocron/v2/scheduler.go +++ b/vendor/github.com/go-co-op/gocron/v2/scheduler.go @@ -5,11 +5,12 @@ import ( "context" "reflect" "runtime" + "slices" + "strings" "time" "github.com/google/uuid" "github.com/jonboulle/clockwork" - "golang.org/x/exp/slices" ) var _ Scheduler = (*scheduler)(nil) @@ -21,6 +22,10 @@ type Scheduler interface { // NewJob creates a new job in the Scheduler. The job is scheduled per the provided // definition when the Scheduler is started. If the Scheduler is already running // the job will be scheduled when the Scheduler is started. + // If you set the first argument of your Task func to be a context.Context, + // gocron will pass in a context (either the default Job context, or one + // provided via WithContext) to the job and will cancel the context on shutdown. + // This allows you to listen for and handle cancellation within your job. NewJob(JobDefinition, Task, ...JobOption) (Job, error) // RemoveByTags removes all jobs that have at least one of the provided tags. RemoveByTags(...string) @@ -133,9 +138,10 @@ func NewScheduler(options ...SchedulerOption) (Scheduler, error) { jobsIn: make(chan jobIn), jobsOutForRescheduling: make(chan uuid.UUID), + jobUpdateNextRuns: make(chan uuid.UUID), jobsOutCompleted: make(chan uuid.UUID), jobOutRequest: make(chan jobOutRequest, 1000), - done: make(chan error), + done: make(chan error, 1), } s := &scheduler{ @@ -171,7 +177,8 @@ func NewScheduler(options ...SchedulerOption) (Scheduler, error) { select { case id := <-s.exec.jobsOutForRescheduling: s.selectExecJobsOutForRescheduling(id) - + case id := <-s.exec.jobUpdateNextRuns: + s.updateNextScheduled(id) case id := <-s.exec.jobsOutCompleted: s.selectExecJobsOutCompleted(id) @@ -233,20 +240,34 @@ func (s *scheduler) stopScheduler() { for _, j := range s.jobs { j.stop() } - for id, j := range s.jobs { + for _, j := range s.jobs { <-j.ctx.Done() - - j.ctx, j.cancel = context.WithCancel(s.shutdownCtx) - s.jobs[id] = j } var err error if s.started { + t := time.NewTimer(s.exec.stopTimeout + 1*time.Second) select { case err = <-s.exec.done: - case <-time.After(s.exec.stopTimeout + 1*time.Second): + t.Stop() + case <-t.C: err = ErrStopExecutorTimedOut } } + for id, j := range s.jobs { + oldCtx := j.ctx + if j.parentCtx == nil { + j.parentCtx = s.shutdownCtx + } + j.ctx, j.cancel = context.WithCancel(j.parentCtx) + + // also replace the old context with the new one in the parameters + if len(j.parameters) > 0 && j.parameters[0] == oldCtx { + j.parameters[0] = j.ctx + } + + s.jobs[id] = j + } + s.stopErrCh <- err s.started = false s.logger.Debug("gocron: scheduler stopped") @@ -261,14 +282,7 @@ func (s *scheduler) selectAllJobsOutRequest(out allJobsOutRequest) { } slices.SortFunc(outJobs, func(a, b Job) int { aID, bID := a.ID().String(), b.ID().String() - switch { - case aID < bID: - return -1 - case aID > bID: - return 1 - default: - return 0 - } + return strings.Compare(aID, bID) }) select { case <-s.shutdownCtx.Done(): @@ -329,7 +343,7 @@ func (s *scheduler) selectExecJobsOutForRescheduling(id uuid.UUID) { return } - scheduleFrom := j.lastRun + var scheduleFrom time.Time if len(j.nextScheduled) > 0 { // always grab the last element in the slice as that is the furthest // out in the future and the time from which we want to calculate @@ -360,6 +374,15 @@ func (s *scheduler) selectExecJobsOutForRescheduling(id uuid.UUID) { } } + if slices.Contains(j.nextScheduled, next) { + // if the next value is a duplicate of what's already in the nextScheduled slice, for example: + // - the job is being rescheduled off the same next run value as before + // increment to the next, next value + for slices.Contains(j.nextScheduled, next) { + next = j.next(next) + } + } + // Clean up any existing timer to prevent leaks if j.timer != nil { j.timer.Stop() @@ -384,6 +407,22 @@ func (s *scheduler) selectExecJobsOutForRescheduling(id uuid.UUID) { s.jobs[id] = j } +func (s *scheduler) updateNextScheduled(id uuid.UUID) { + j, ok := s.jobs[id] + if !ok { + return + } + var newNextScheduled []time.Time + for _, t := range j.nextScheduled { + if t.Before(s.now()) { + continue + } + newNextScheduled = append(newNextScheduled, t) + } + j.nextScheduled = newNextScheduled + s.jobs[id] = j +} + func (s *scheduler) selectExecJobsOutCompleted(id uuid.UUID) { j, ok := s.jobs[id] if !ok { @@ -646,8 +685,6 @@ func (s *scheduler) addOrUpdateJob(id uuid.UUID, definition JobDefinition, taskW j.id = id } - j.ctx, j.cancel = context.WithCancel(s.shutdownCtx) - if taskWrapper == nil { return nil, ErrNewJobTaskNil } @@ -662,10 +699,6 @@ func (s *scheduler) addOrUpdateJob(id uuid.UUID, definition JobDefinition, taskW return nil, ErrNewJobTaskNotFunc } - if err := s.verifyParameterType(taskFunc, tsk); err != nil { - return nil, err - } - j.name = runtime.FuncForPC(taskFunc.Pointer()).Name() j.function = tsk.function j.parameters = tsk.parameters @@ -684,6 +717,28 @@ func (s *scheduler) addOrUpdateJob(id uuid.UUID, definition JobDefinition, taskW } } + if j.parentCtx == nil { + j.parentCtx = s.shutdownCtx + } + j.ctx, j.cancel = context.WithCancel(j.parentCtx) + + if !taskFunc.IsZero() && taskFunc.Type().NumIn() > 0 { + // if the first parameter is a context.Context and params have no context.Context, add current ctx to the params + if taskFunc.Type().In(0) == reflect.TypeOf((*context.Context)(nil)).Elem() { + if len(tsk.parameters) == 0 { + tsk.parameters = []any{j.ctx} + j.parameters = []any{j.ctx} + } else if _, ok := tsk.parameters[0].(context.Context); !ok { + tsk.parameters = append([]any{j.ctx}, tsk.parameters...) + j.parameters = append([]any{j.ctx}, j.parameters...) + } + } + } + + if err := s.verifyParameterType(taskFunc, tsk); err != nil { + return nil, err + } + if err := definition.setup(&j, s.location, s.exec.clock.Now()); err != nil { return nil, err } @@ -741,20 +796,27 @@ func (s *scheduler) StopJobs() error { return nil case s.stopCh <- struct{}{}: } + + t := time.NewTimer(s.exec.stopTimeout + 2*time.Second) select { case err := <-s.stopErrCh: + t.Stop() return err - case <-time.After(s.exec.stopTimeout + 2*time.Second): + case <-t.C: return ErrStopSchedulerTimedOut } } func (s *scheduler) Shutdown() error { s.shutdownCancel() + + t := time.NewTimer(s.exec.stopTimeout + 2*time.Second) select { case err := <-s.stopErrCh: + + t.Stop() return err - case <-time.After(s.exec.stopTimeout + 2*time.Second): + case <-t.C: return ErrStopSchedulerTimedOut } } @@ -809,6 +871,8 @@ func WithDistributedElector(elector Elector) SchedulerOption { // WithDistributedLocker sets the locker to be used by multiple // Scheduler instances to ensure that only one instance of each // job is run. +// To disable this global locker for specific jobs, see +// WithDisabledDistributedJobLocker. func WithDistributedLocker(locker Locker) SchedulerOption { return func(s *scheduler) error { if locker == nil { @@ -945,3 +1009,14 @@ func WithMonitor(monitor Monitor) SchedulerOption { return nil } } + +// WithMonitorStatus sets the metrics provider to be used by the Scheduler. +func WithMonitorStatus(monitor MonitorStatus) SchedulerOption { + return func(s *scheduler) error { + if monitor == nil { + return ErrWithMonitorNil + } + s.exec.monitorStatus = monitor + return nil + } +} diff --git a/vendor/github.com/go-co-op/gocron/v2/util.go b/vendor/github.com/go-co-op/gocron/v2/util.go index a4e5b6fd..2dd1c261 100644 --- a/vendor/github.com/go-co-op/gocron/v2/util.go +++ b/vendor/github.com/go-co-op/gocron/v2/util.go @@ -3,12 +3,11 @@ package gocron import ( "context" "reflect" + "slices" "sync" "time" "github.com/google/uuid" - "golang.org/x/exp/maps" - "golang.org/x/exp/slices" ) func callJobFuncWithParams(jobFunc any, params ...any) error { @@ -63,12 +62,8 @@ func requestJobCtx(ctx context.Context, id uuid.UUID, ch chan jobOutRequest) *in } func removeSliceDuplicatesInt(in []int) []int { - m := make(map[int]struct{}) - - for _, i := range in { - m[i] = struct{}{} - } - return maps.Keys(m) + slices.Sort(in) + return slices.Compact(in) } func convertAtTimesToDateTime(atTimes AtTimes, location *time.Location) ([]time.Time, error) { diff --git a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go index d127d436..def01a6b 100644 --- a/vendor/github.com/google/go-cmp/cmp/internal/function/func.go +++ b/vendor/github.com/google/go-cmp/cmp/internal/function/func.go @@ -19,6 +19,7 @@ const ( tbFunc // func(T) bool ttbFunc // func(T, T) bool + ttiFunc // func(T, T) int trbFunc // func(T, R) bool tibFunc // func(T, I) bool trFunc // func(T) R @@ -28,11 +29,13 @@ const ( Transformer = trFunc // func(T) R ValueFilter = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool + Compare = ttiFunc // func(T, T) int ValuePredicate = tbFunc // func(T) bool KeyValuePredicate = trbFunc // func(T, R) bool ) var boolType = reflect.TypeOf(true) +var intType = reflect.TypeOf(0) // IsType reports whether the reflect.Type is of the specified function type. func IsType(t reflect.Type, ft funcType) bool { @@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool { if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { return true } + case ttiFunc: // func(T, T) int + if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType { + return true + } case trbFunc: // func(T, R) bool if ni == 2 && no == 1 && t.Out(0) == boolType { return true diff --git a/vendor/github.com/google/go-cmp/cmp/options.go b/vendor/github.com/google/go-cmp/cmp/options.go index 754496f3..ba3fce81 100644 --- a/vendor/github.com/google/go-cmp/cmp/options.go +++ b/vendor/github.com/google/go-cmp/cmp/options.go @@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) { if t := s.curPath.Index(-2).Type(); t.Name() != "" { // Named type with unexported fields. name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType - if _, ok := reflect.New(t).Interface().(error); ok { + isProtoMessage := func(t reflect.Type) bool { + m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect") + return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 && + m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" && + m.Type.Out(0).Name() == "Message" + } + if isProtoMessage(t) { + help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types` + } else if _, ok := reflect.New(t).Interface().(error); ok { help = "consider using cmpopts.EquateErrors to compare error values" } else if t.Comparable() { help = "consider using cmpopts.EquateComparable to compare comparable Go types" diff --git a/vendor/github.com/jonboulle/clockwork/README.md b/vendor/github.com/jonboulle/clockwork/README.md index 42970da8..5e9d472e 100644 --- a/vendor/github.com/jonboulle/clockwork/README.md +++ b/vendor/github.com/jonboulle/clockwork/README.md @@ -36,6 +36,7 @@ Now you can easily test `myFunc` with a `FakeClock`: ```go func TestMyFunc(t *testing.T) { + ctx := context.Background() c := clockwork.NewFakeClock() // Start our sleepy function @@ -46,8 +47,12 @@ func TestMyFunc(t *testing.T) { wg.Done() }() - // Ensure we wait until myFunc is sleeping - c.BlockUntil(1) + // Ensure we wait until myFunc is waiting on the clock. + // Use a context to avoid blocking forever if something + // goes wrong. + ctx, cancel := context.WithTimeout(ctx, 10*time.Second) + defer cancel() + c.BlockUntilContext(ctx, 1) assertState() diff --git a/vendor/github.com/jonboulle/clockwork/SECURITY.md b/vendor/github.com/jonboulle/clockwork/SECURITY.md new file mode 100644 index 00000000..0efcad9f --- /dev/null +++ b/vendor/github.com/jonboulle/clockwork/SECURITY.md @@ -0,0 +1,19 @@ +# Security Policy + +If you have discovered a security vulnerability in this project, please report it +privately. **Do not disclose it as a public issue.** This gives me time to work with you +to fix the issue before public exposure, reducing the chance that the exploit will be +used before a patch is released. + +You may submit the report in the following ways: + +- send an email to ???@???; and/or +- send a [private vulnerability report](https://github.com/jonboulle/clockwork/security/advisories/new) + +Please provide the following information in your report: + +- A description of the vulnerability and its impact +- How to reproduce the issue + +This project is maintained by a single maintainer on a reasonable-effort basis. As such, +please give me 90 days to work on a fix before public exposure. diff --git a/vendor/github.com/jonboulle/clockwork/clockwork.go b/vendor/github.com/jonboulle/clockwork/clockwork.go index 3206b36e..85a99346 100644 --- a/vendor/github.com/jonboulle/clockwork/clockwork.go +++ b/vendor/github.com/jonboulle/clockwork/clockwork.go @@ -1,8 +1,10 @@ +// Package clockwork contains a simple fake clock for Go. package clockwork import ( "context" - "sort" + "errors" + "slices" "sync" "time" ) @@ -14,49 +16,18 @@ type Clock interface { Sleep(d time.Duration) Now() time.Time Since(t time.Time) time.Duration + Until(t time.Time) time.Duration NewTicker(d time.Duration) Ticker NewTimer(d time.Duration) Timer AfterFunc(d time.Duration, f func()) Timer } -// FakeClock provides an interface for a clock which can be manually advanced -// through time. -// -// FakeClock maintains a list of "waiters," which consists of all callers -// waiting on the underlying clock (i.e. Tickers and Timers including callers of -// Sleep or After). Users can call BlockUntil to block until the clock has an -// expected number of waiters. -type FakeClock interface { - Clock - // Advance advances the FakeClock to a new point in time, ensuring any existing - // waiters are notified appropriately before returning. - Advance(d time.Duration) - // BlockUntil blocks until the FakeClock has the given number of waiters. - BlockUntil(waiters int) -} - // NewRealClock returns a Clock which simply delegates calls to the actual time // package; it should be used by packages in production. func NewRealClock() Clock { return &realClock{} } -// NewFakeClock returns a FakeClock implementation which can be -// manually advanced through time for testing. The initial time of the -// FakeClock will be the current system time. -// -// Tests that require a deterministic time must use NewFakeClockAt. -func NewFakeClock() FakeClock { - return NewFakeClockAt(time.Now()) -} - -// NewFakeClockAt returns a FakeClock initialised at the given time.Time. -func NewFakeClockAt(t time.Time) FakeClock { - return &fakeClock{ - time: t, - } -} - type realClock struct{} func (rc *realClock) After(d time.Duration) <-chan time.Time { @@ -75,6 +46,10 @@ func (rc *realClock) Since(t time.Time) time.Duration { return rc.Now().Sub(t) } +func (rc *realClock) Until(t time.Time) time.Duration { + return t.Sub(rc.Now()) +} + func (rc *realClock) NewTicker(d time.Duration) Ticker { return realTicker{time.NewTicker(d)} } @@ -87,7 +62,14 @@ func (rc *realClock) AfterFunc(d time.Duration, f func()) Timer { return realTimer{time.AfterFunc(d, f)} } -type fakeClock struct { +// FakeClock provides an interface for a clock which can be manually advanced +// through time. +// +// FakeClock maintains a list of "waiters," which consists of all callers +// waiting on the underlying clock (i.e. Tickers and Timers including callers of +// Sleep or After). Users can call BlockUntil to block until the clock has an +// expected number of waiters. +type FakeClock struct { // l protects all attributes of the clock, including all attributes of all // waiters and blockers. l sync.RWMutex @@ -96,11 +78,27 @@ type fakeClock struct { time time.Time } +// NewFakeClock returns a FakeClock implementation which can be +// manually advanced through time for testing. The initial time of the +// FakeClock will be the current system time. +// +// Tests that require a deterministic time must use NewFakeClockAt. +func NewFakeClock() *FakeClock { + return NewFakeClockAt(time.Now()) +} + +// NewFakeClockAt returns a FakeClock initialised at the given time.Time. +func NewFakeClockAt(t time.Time) *FakeClock { + return &FakeClock{ + time: t, + } +} + // blocker is a caller of BlockUntil. type blocker struct { count int - // ch is closed when the underlying clock has the specificed number of blockers. + // ch is closed when the underlying clock has the specified number of blockers. ch chan struct{} } @@ -111,23 +109,23 @@ type expirer interface { expire(now time.Time) (next *time.Duration) // Get and set the expiration time. - expiry() time.Time - setExpiry(time.Time) + expiration() time.Time + setExpiration(time.Time) } // After mimics [time.After]; it waits for the given duration to elapse on the // fakeClock, then sends the current time on the returned channel. -func (fc *fakeClock) After(d time.Duration) <-chan time.Time { +func (fc *FakeClock) After(d time.Duration) <-chan time.Time { return fc.NewTimer(d).Chan() } // Sleep blocks until the given duration has passed on the fakeClock. -func (fc *fakeClock) Sleep(d time.Duration) { +func (fc *FakeClock) Sleep(d time.Duration) { <-fc.After(d) } // Now returns the current time of the fakeClock -func (fc *fakeClock) Now() time.Time { +func (fc *FakeClock) Now() time.Time { fc.l.RLock() defer fc.l.RUnlock() return fc.time @@ -135,61 +133,73 @@ func (fc *fakeClock) Now() time.Time { // Since returns the duration that has passed since the given time on the // fakeClock. -func (fc *fakeClock) Since(t time.Time) time.Duration { +func (fc *FakeClock) Since(t time.Time) time.Duration { return fc.Now().Sub(t) } +// Until returns the duration that has to pass from the given time on the fakeClock +// to reach the given time. +func (fc *FakeClock) Until(t time.Time) time.Duration { + return t.Sub(fc.Now()) +} + // NewTicker returns a Ticker that will expire only after calls to -// fakeClock.Advance() have moved the clock past the given duration. -func (fc *fakeClock) NewTicker(d time.Duration) Ticker { - var ft *fakeTicker - ft = &fakeTicker{ - firer: newFirer(), - d: d, - reset: func(d time.Duration) { fc.set(ft, d) }, - stop: func() { fc.stop(ft) }, +// FakeClock.Advance() have moved the clock past the given duration. +// +// The duration d must be greater than zero; if not, NewTicker will panic. +func (fc *FakeClock) NewTicker(d time.Duration) Ticker { + // Maintain parity with + // https://cs.opensource.google/go/go/+/refs/tags/go1.20.3:src/time/tick.go;l=23-25 + if d <= 0 { + panic(errors.New("non-positive interval for NewTicker")) } - fc.set(ft, d) + ft := newFakeTicker(fc, d) + fc.l.Lock() + defer fc.l.Unlock() + fc.setExpirer(ft, d) return ft } // NewTimer returns a Timer that will fire only after calls to // fakeClock.Advance() have moved the clock past the given duration. -func (fc *fakeClock) NewTimer(d time.Duration) Timer { - return fc.newTimer(d, nil) +func (fc *FakeClock) NewTimer(d time.Duration) Timer { + t, _ := fc.newTimer(d, nil) + return t } // AfterFunc mimics [time.AfterFunc]; it returns a Timer that will invoke the // given function only after calls to fakeClock.Advance() have moved the clock // past the given duration. -func (fc *fakeClock) AfterFunc(d time.Duration, f func()) Timer { - return fc.newTimer(d, f) +func (fc *FakeClock) AfterFunc(d time.Duration, f func()) Timer { + t, _ := fc.newTimer(d, f) + return t } -// newTimer returns a new timer, using an optional afterFunc. -func (fc *fakeClock) newTimer(d time.Duration, afterfunc func()) *fakeTimer { - var ft *fakeTimer - ft = &fakeTimer{ - firer: newFirer(), - reset: func(d time.Duration) bool { - fc.l.Lock() - defer fc.l.Unlock() - // fc.l must be held across the calls to stopExpirer & setExpirer. - stopped := fc.stopExpirer(ft) - fc.setExpirer(ft, d) - return stopped - }, - stop: func() bool { return fc.stop(ft) }, +// newTimer returns a new timer using an optional afterFunc and the time that +// timer expires. +func (fc *FakeClock) newTimer(d time.Duration, afterfunc func()) (*fakeTimer, time.Time) { + ft := newFakeTimer(fc, afterfunc) + fc.l.Lock() + defer fc.l.Unlock() + fc.setExpirer(ft, d) + return ft, ft.expiration() +} - afterFunc: afterfunc, - } - fc.set(ft, d) +// newTimerAtTime is like newTimer, but uses a time instead of a duration. +// +// It is used to ensure FakeClock's lock is held constant through calling +// fc.After(t.Sub(fc.Now())). It should not be exposed externally. +func (fc *FakeClock) newTimerAtTime(t time.Time, afterfunc func()) *fakeTimer { + ft := newFakeTimer(fc, afterfunc) + fc.l.Lock() + defer fc.l.Unlock() + fc.setExpirer(ft, t.Sub(fc.time)) return ft } // Advance advances fakeClock to a new point in time, ensuring waiters and // blockers are notified appropriately before returning. -func (fc *fakeClock) Advance(d time.Duration) { +func (fc *FakeClock) Advance(d time.Duration) { fc.l.Lock() defer fc.l.Unlock() end := fc.time.Add(d) @@ -198,38 +208,34 @@ func (fc *fakeClock) Advance(d time.Duration) { // // We don't iterate because the callback of the waiter might register a new // waiter, so the list of waiters might change as we execute this. - for len(fc.waiters) > 0 && !end.Before(fc.waiters[0].expiry()) { + for len(fc.waiters) > 0 && !end.Before(fc.waiters[0].expiration()) { w := fc.waiters[0] fc.waiters = fc.waiters[1:] - // Use the waiter's expriation as the current time for this expiration. - now := w.expiry() + // Use the waiter's expiration as the current time for this expiration. + now := w.expiration() fc.time = now if d := w.expire(now); d != nil { - // Set the new exipration if needed. + // Set the new expiration if needed. fc.setExpirer(w, *d) } } fc.time = end } -// BlockUntil blocks until the fakeClock has the given number of waiters. +// BlockUntil blocks until the FakeClock has the given number of waiters. // -// Prefer BlockUntilContext, which offers context cancellation to prevent -// deadlock. +// Prefer BlockUntilContext in new code, which offers context cancellation to +// prevent deadlock. // -// Deprecation warning: This function might be deprecated in later versions. -func (fc *fakeClock) BlockUntil(n int) { - b := fc.newBlocker(n) - if b == nil { - return - } - <-b.ch +// Deprecated: New code should prefer BlockUntilContext. +func (fc *FakeClock) BlockUntil(n int) { + fc.BlockUntilContext(context.TODO(), n) } // BlockUntilContext blocks until the fakeClock has the given number of waiters // or the context is cancelled. -func (fc *fakeClock) BlockUntilContext(ctx context.Context, n int) error { +func (fc *FakeClock) BlockUntilContext(ctx context.Context, n int) error { b := fc.newBlocker(n) if b == nil { return nil @@ -243,7 +249,7 @@ func (fc *fakeClock) BlockUntilContext(ctx context.Context, n int) error { } } -func (fc *fakeClock) newBlocker(n int) *blocker { +func (fc *FakeClock) newBlocker(n int) *blocker { fc.l.Lock() defer fc.l.Unlock() // Fast path: we already have >= n waiters. @@ -260,7 +266,7 @@ func (fc *fakeClock) newBlocker(n int) *blocker { } // stop stops an expirer, returning true if the expirer was stopped. -func (fc *fakeClock) stop(e expirer) bool { +func (fc *FakeClock) stop(e expirer) bool { fc.l.Lock() defer fc.l.Unlock() return fc.stopExpirer(e) @@ -269,81 +275,45 @@ func (fc *fakeClock) stop(e expirer) bool { // stopExpirer stops an expirer, returning true if the expirer was stopped. // // The caller must hold fc.l. -func (fc *fakeClock) stopExpirer(e expirer) bool { - for i, t := range fc.waiters { - if t == e { - // Remove element, maintaining order. - copy(fc.waiters[i:], fc.waiters[i+1:]) - fc.waiters[len(fc.waiters)-1] = nil - fc.waiters = fc.waiters[:len(fc.waiters)-1] - return true - } +func (fc *FakeClock) stopExpirer(e expirer) bool { + idx := slices.Index(fc.waiters, e) + if idx == -1 { + return false } - return false -} - -// set sets an expirer to expire at a future point in time. -func (fc *fakeClock) set(e expirer, d time.Duration) { - fc.l.Lock() - defer fc.l.Unlock() - fc.setExpirer(e, d) + // Remove element, maintaining order, setting inaccessible elements to nil so + // they can be garbage collected. + copy(fc.waiters[idx:], fc.waiters[idx+1:]) + fc.waiters[len(fc.waiters)-1] = nil + fc.waiters = fc.waiters[:len(fc.waiters)-1] + return true } // setExpirer sets an expirer to expire at a future point in time. // // The caller must hold fc.l. -func (fc *fakeClock) setExpirer(e expirer, d time.Duration) { +func (fc *FakeClock) setExpirer(e expirer, d time.Duration) { if d.Nanoseconds() <= 0 { - // special case - trigger immediately, never reset. + // Special case for timers with duration <= 0: trigger immediately, never + // reset. // - // TODO: Explain what cases this covers. + // Tickers never get here, they panic if d is < 0. e.expire(fc.time) return } // Add the expirer to the set of waiters and notify any blockers. - e.setExpiry(fc.time.Add(d)) + e.setExpiration(fc.time.Add(d)) fc.waiters = append(fc.waiters, e) - sort.Slice(fc.waiters, func(i int, j int) bool { - return fc.waiters[i].expiry().Before(fc.waiters[j].expiry()) + slices.SortFunc(fc.waiters, func(a, b expirer) int { + return a.expiration().Compare(b.expiration()) }) - // Notify blockers of our new waiter. - var blocked []*blocker + // Notify blockers of our new waiter. count := len(fc.waiters) - for _, b := range fc.blockers { + fc.blockers = slices.DeleteFunc(fc.blockers, func(b *blocker) bool { if b.count <= count { close(b.ch) - continue + return true } - blocked = append(blocked, b) - } - fc.blockers = blocked -} - -// firer is used by fakeTimer and fakeTicker used to help implement expirer. -type firer struct { - // The channel associated with the firer, used to send expriation times. - c chan time.Time - - // The time when the firer expires. Only meaningful if the firer is currently - // one of a fakeClock's waiters. - exp time.Time -} - -func newFirer() firer { - return firer{c: make(chan time.Time, 1)} -} - -func (f *firer) Chan() <-chan time.Time { - return f.c -} - -// expiry implements expirer. -func (f *firer) expiry() time.Time { - return f.exp -} - -// setExpiry implements expirer. -func (f *firer) setExpiry(t time.Time) { - f.exp = t + return false + }) } diff --git a/vendor/github.com/jonboulle/clockwork/context.go b/vendor/github.com/jonboulle/clockwork/context.go index edbb368f..59242619 100644 --- a/vendor/github.com/jonboulle/clockwork/context.go +++ b/vendor/github.com/jonboulle/clockwork/context.go @@ -2,24 +2,168 @@ package clockwork import ( "context" + "fmt" + "sync" + "time" ) // contextKey is private to this package so we can ensure uniqueness here. This // type identifies context values provided by this package. type contextKey string -// keyClock provides a clock for injecting during tests. If absent, a real clock should be used. +// keyClock provides a clock for injecting during tests. If absent, a real clock +// should be used. var keyClock = contextKey("clock") // clockwork.Clock // AddToContext creates a derived context that references the specified clock. +// +// Be aware this doesn't change the behavior of standard library functions, such +// as [context.WithTimeout] or [context.WithDeadline]. For this reason, users +// should prefer passing explicit [clockwork.Clock] variables rather can passing +// the clock via the context. func AddToContext(ctx context.Context, clock Clock) context.Context { return context.WithValue(ctx, keyClock, clock) } -// FromContext extracts a clock from the context. If not present, a real clock is returned. +// FromContext extracts a clock from the context. If not present, a real clock +// is returned. func FromContext(ctx context.Context) Clock { if clock, ok := ctx.Value(keyClock).(Clock); ok { return clock } return NewRealClock() } + +// ErrFakeClockDeadlineExceeded is the error returned by [context.Context] when +// the deadline passes on a context which uses a [FakeClock]. +// +// It wraps a [context.DeadlineExceeded] error, i.e.: +// +// // The following is true for any Context whose deadline has been exceeded, +// // including contexts made with clockwork.WithDeadline or clockwork.WithTimeout. +// +// errors.Is(ctx.Err(), context.DeadlineExceeded) +// +// // The following can only be true for contexts made +// // with clockwork.WithDeadline or clockwork.WithTimeout. +// +// errors.Is(ctx.Err(), clockwork.ErrFakeClockDeadlineExceeded) +var ErrFakeClockDeadlineExceeded error = fmt.Errorf("clockwork.FakeClock: %w", context.DeadlineExceeded) + +// WithDeadline returns a context with a deadline based on a [FakeClock]. +// +// The returned context ignores parent cancelation if the parent was cancelled +// with a [context.DeadlineExceeded] error. Any other error returned by the +// parent is treated normally, cancelling the returned context. +// +// If the parent is cancelled with a [context.DeadlineExceeded] error, the only +// way to then cancel the returned context is by calling the returned +// context.CancelFunc. +func WithDeadline(parent context.Context, clock Clock, t time.Time) (context.Context, context.CancelFunc) { + if fc, ok := clock.(*FakeClock); ok { + return newFakeClockContext(parent, t, fc.newTimerAtTime(t, nil).Chan()) + } + return context.WithDeadline(parent, t) +} + +// WithTimeout returns a context with a timeout based on a [FakeClock]. +// +// The returned context follows the same behaviors as [WithDeadline]. +func WithTimeout(parent context.Context, clock Clock, d time.Duration) (context.Context, context.CancelFunc) { + if fc, ok := clock.(*FakeClock); ok { + t, deadline := fc.newTimer(d, nil) + return newFakeClockContext(parent, deadline, t.Chan()) + } + return context.WithTimeout(parent, d) +} + +// fakeClockContext implements context.Context, using a fake clock for its +// deadline. +// +// It ignores parent cancellation if the parent is cancelled with +// context.DeadlineExceeded. +type fakeClockContext struct { + parent context.Context + deadline time.Time // The user-facing deadline based on the fake clock's time. + + // Tracks timeout/deadline cancellation. + timerDone <-chan time.Time + + // Tracks manual calls to the cancel function. + cancel func() // Closes cancelCalled wrapped in a sync.Once. + cancelCalled chan struct{} + + // The user-facing data from the context.Context interface. + ctxDone chan struct{} // Returned by Done(). + err error // nil until ctxDone is ready to be closed. +} + +func newFakeClockContext(parent context.Context, deadline time.Time, timer <-chan time.Time) (context.Context, context.CancelFunc) { + cancelCalled := make(chan struct{}) + ctx := &fakeClockContext{ + parent: parent, + deadline: deadline, + timerDone: timer, + cancelCalled: cancelCalled, + ctxDone: make(chan struct{}), + cancel: sync.OnceFunc(func() { + close(cancelCalled) + }), + } + ready := make(chan struct{}, 1) + go ctx.runCancel(ready) + <-ready // Wait until the cancellation goroutine is running. + return ctx, ctx.cancel +} + +func (c *fakeClockContext) Deadline() (time.Time, bool) { + return c.deadline, true +} + +func (c *fakeClockContext) Done() <-chan struct{} { + return c.ctxDone +} + +func (c *fakeClockContext) Err() error { + <-c.Done() // Don't return the error before it is ready. + return c.err +} + +func (c *fakeClockContext) Value(key any) any { + return c.parent.Value(key) +} + +// runCancel runs the fakeClockContext's cancel goroutine and returns the +// fakeClockContext's cancel function. +// +// fakeClockContext is then cancelled when any of the following occur: +// +// - The fakeClockContext.done channel is closed by its timer. +// - The returned CancelFunc is executed. +// - The fakeClockContext's parent context is cancelled with an error other +// than context.DeadlineExceeded. +func (c *fakeClockContext) runCancel(ready chan struct{}) { + parentDone := c.parent.Done() + + // Close ready when done, just in case the ready signal races with other + // branches of our select statement below. + defer close(ready) + + for c.err == nil { + select { + case <-c.timerDone: + c.err = ErrFakeClockDeadlineExceeded + case <-c.cancelCalled: + c.err = context.Canceled + case <-parentDone: + c.err = c.parent.Err() + + case ready <- struct{}{}: + // Signals the cancellation goroutine has begun, in an attempt to minimize + // race conditions related to goroutine startup time. + ready = nil // This case statement can only fire once. + } + } + close(c.ctxDone) + return +} diff --git a/vendor/github.com/jonboulle/clockwork/ticker.go b/vendor/github.com/jonboulle/clockwork/ticker.go index b68e4d77..aa56952e 100644 --- a/vendor/github.com/jonboulle/clockwork/ticker.go +++ b/vendor/github.com/jonboulle/clockwork/ticker.go @@ -19,7 +19,12 @@ func (r realTicker) Chan() <-chan time.Time { } type fakeTicker struct { - firer + // The channel associated with the firer, used to send expiration times. + c chan time.Time + + // The time when the ticker expires. Only meaningful if the ticker is currently + // one of a FakeClock's waiters. + exp time.Time // reset and stop provide the implementation of the respective exported // functions. @@ -30,13 +35,27 @@ type fakeTicker struct { d time.Duration } -func (f *fakeTicker) Reset(d time.Duration) { - f.reset(d) +func newFakeTicker(fc *FakeClock, d time.Duration) *fakeTicker { + var ft *fakeTicker + ft = &fakeTicker{ + c: make(chan time.Time, 1), + d: d, + reset: func(d time.Duration) { + fc.l.Lock() + defer fc.l.Unlock() + ft.d = d + fc.setExpirer(ft, d) + }, + stop: func() { fc.stop(ft) }, + } + return ft } -func (f *fakeTicker) Stop() { - f.stop() -} +func (f *fakeTicker) Chan() <-chan time.Time { return f.c } + +func (f *fakeTicker) Reset(d time.Duration) { f.reset(d) } + +func (f *fakeTicker) Stop() { f.stop() } func (f *fakeTicker) expire(now time.Time) *time.Duration { // Never block on expiration. @@ -46,3 +65,7 @@ func (f *fakeTicker) expire(now time.Time) *time.Duration { } return &f.d } + +func (f *fakeTicker) expiration() time.Time { return f.exp } + +func (f *fakeTicker) setExpiration(t time.Time) { f.exp = t } diff --git a/vendor/github.com/jonboulle/clockwork/timer.go b/vendor/github.com/jonboulle/clockwork/timer.go index 6f928b3d..e7e1d400 100644 --- a/vendor/github.com/jonboulle/clockwork/timer.go +++ b/vendor/github.com/jonboulle/clockwork/timer.go @@ -18,9 +18,14 @@ func (r realTimer) Chan() <-chan time.Time { } type fakeTimer struct { - firer + // The channel associated with the firer, used to send expiration times. + c chan time.Time - // reset and stop provide the implmenetation of the respective exported + // The time when the firer expires. Only meaningful if the firer is currently + // one of a FakeClock's waiters. + exp time.Time + + // reset and stop provide the implementation of the respective exported // functions. reset func(d time.Duration) bool stop func() bool @@ -30,13 +35,30 @@ type fakeTimer struct { afterFunc func() } -func (f *fakeTimer) Reset(d time.Duration) bool { - return f.reset(d) +func newFakeTimer(fc *FakeClock, afterfunc func()) *fakeTimer { + var ft *fakeTimer + ft = &fakeTimer{ + c: make(chan time.Time, 1), + reset: func(d time.Duration) bool { + fc.l.Lock() + defer fc.l.Unlock() + // fc.l must be held across the calls to stopExpirer & setExpirer. + stopped := fc.stopExpirer(ft) + fc.setExpirer(ft, d) + return stopped + }, + stop: func() bool { return fc.stop(ft) }, + + afterFunc: afterfunc, + } + return ft } -func (f *fakeTimer) Stop() bool { - return f.stop() -} +func (f *fakeTimer) Chan() <-chan time.Time { return f.c } + +func (f *fakeTimer) Reset(d time.Duration) bool { return f.reset(d) } + +func (f *fakeTimer) Stop() bool { return f.stop() } func (f *fakeTimer) expire(now time.Time) *time.Duration { if f.afterFunc != nil { @@ -51,3 +73,7 @@ func (f *fakeTimer) expire(now time.Time) *time.Duration { } return nil } + +func (f *fakeTimer) expiration() time.Time { return f.exp } + +func (f *fakeTimer) setExpiration(t time.Time) { f.exp = t } diff --git a/vendor/github.com/onsi/gomega/CHANGELOG.md b/vendor/github.com/onsi/gomega/CHANGELOG.md index a20d997c..890d8922 100644 --- a/vendor/github.com/onsi/gomega/CHANGELOG.md +++ b/vendor/github.com/onsi/gomega/CHANGELOG.md @@ -1,3 +1,24 @@ +## 1.37.0 + +### Features +- add To/ToNot/NotTo aliases for AsyncAssertion [5666f98] + +## 1.36.3 + +### Maintenance + +- bump all the things [adb8b49] +- chore: replace `interface{}` with `any` [7613216] +- Bump google.golang.org/protobuf from 1.36.1 to 1.36.5 (#822) [9fe5259] +- remove spurious "toolchain" from go.mod (#819) [a0e85b9] +- Bump golang.org/x/net from 0.33.0 to 0.35.0 (#823) [604a8b1] +- Bump activesupport from 6.0.6.1 to 6.1.7.5 in /docs (#772) [36fbc84] +- Bump github-pages from 231 to 232 in /docs (#778) [ced70d7] +- Bump rexml from 3.2.6 to 3.3.9 in /docs (#788) [c8b4a07] +- Bump github.com/onsi/ginkgo/v2 from 2.22.1 to 2.22.2 (#812) [06431b9] +- Bump webrick from 1.8.1 to 1.9.1 in /docs (#800) [b55a92d] +- Fix typos (#813) [a1d518b] + ## 1.36.2 ### Maintenance @@ -322,7 +343,7 @@ Require Go 1.22+ ### Features -Introducting [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. +Introducing [gcustom](https://onsi.github.io/gomega/#gcustom-a-convenient-mechanism-for-buildling-custom-matchers) - a convenient mechanism for building custom matchers. This is an RC release for `gcustom`. The external API may be tweaked in response to feedback however it is expected to remain mostly stable. @@ -461,7 +482,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/ - Fix max number of samples in experiments on non-64-bit systems. (#528) [1c84497] - Remove dependency on ginkgo v1.16.4 (#530) [4dea8d5] - Fix for Go 1.18 (#532) [56d2a29] -- Document precendence of timeouts (#533) [b607941] +- Document precedence of timeouts (#533) [b607941] ## 1.18.1 @@ -478,7 +499,7 @@ These improvements are all documented in [Gomega's docs](https://onsi.github.io/ ## Fixes - Gomega now uses ioutil for Go 1.15 and lower (#492) - official support is only for the most recent two major versions of Go but this will unblock users who need to stay on older unsupported versions of Go. [c29c1c0] -## Maintenace +## Maintenance - Remove Travis workflow (#491) [72e6040] - Upgrade to Ginkgo 2.0.0 GA [f383637] - chore: fix description of HaveField matcher (#487) [2b4b2c0] @@ -726,7 +747,7 @@ Improvements: - Added `BeSent` which attempts to send a value down a channel and fails if the attempt blocks. Can be paired with `Eventually` to safely send a value down a channel with a timeout. - `Ω`, `Expect`, `Eventually`, and `Consistently` now immediately `panic` if there is no registered fail handler. This is always a mistake that can hide failing tests. -- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShoudlNot(Receive()) always passes with a closed channel. +- `Receive()` no longer errors when passed a closed channel, it's perfectly fine to attempt to read from a closed channel so Ω(c).Should(Receive()) always fails and Ω(c).ShouldNot(Receive()) always passes with a closed channel. - Added `HavePrefix` and `HaveSuffix` matchers. - `ghttp` can now handle concurrent requests. - Added `Succeed` which allows one to write `Ω(MyFunction()).Should(Succeed())`. @@ -736,7 +757,7 @@ Improvements: - `ghttp` servers can take an `io.Writer`. `ghttp` will write a line to the writer when each request arrives. - Added `WithTransform` matcher to allow munging input data before feeding into the relevant matcher - Added boolean `And`, `Or`, and `Not` matchers to allow creating composite matchers -- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the alloted time. +- Added `gbytes.TimeoutCloser`, `gbytes.TimeoutReader`, and `gbytes.TimeoutWriter` - these are convenience wrappers that timeout if the underlying Closer/Reader/Writer does not return within the allotted time. - Added `gbytes.BufferReader` - this constructs a `gbytes.Buffer` that asynchronously reads the passed-in `io.Reader` into its buffer. Bug Fixes: @@ -781,7 +802,7 @@ New Matchers: Updated Matchers: -- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an objet that satisfies the passed-in matcher. +- `Receive` matcher can take a matcher as an argument and passes only if the channel under test receives an object that satisfies the passed-in matcher. - Matchers that implement `MatchMayChangeInTheFuture(actual interface{}) bool` can inform `Eventually` and/or `Consistently` when a match has no chance of changing status in the future. For example, `Receive` returns `false` when a channel is closed. Misc: diff --git a/vendor/github.com/onsi/gomega/format/format.go b/vendor/github.com/onsi/gomega/format/format.go index 6c168063..96f04b21 100644 --- a/vendor/github.com/onsi/gomega/format/format.go +++ b/vendor/github.com/onsi/gomega/format/format.go @@ -57,7 +57,7 @@ var Indent = " " var longFormThreshold = 20 -// GomegaStringer allows for custom formating of objects for gomega. +// GomegaStringer allows for custom formatting of objects for gomega. type GomegaStringer interface { // GomegaString will be used to custom format an object. // It does not follow UseStringerRepresentation value and will always be called regardless. @@ -73,7 +73,7 @@ If the CustomFormatter does not want to handle the object it should return ("", Strings returned by CustomFormatters are not truncated */ -type CustomFormatter func(value interface{}) (string, bool) +type CustomFormatter func(value any) (string, bool) type CustomFormatterKey uint var customFormatterKey CustomFormatterKey = 1 @@ -125,7 +125,7 @@ If expected is omitted, then the message looks like: */ -func Message(actual interface{}, message string, expected ...interface{}) string { +func Message(actual any, message string, expected ...any) string { if len(expected) == 0 { return fmt.Sprintf("Expected\n%s\n%s", Object(actual, 1), message) } @@ -255,7 +255,7 @@ recursing into the object. Set PrintContextObjects to true to print the content of objects implementing context.Context */ -func Object(object interface{}, indentation uint) string { +func Object(object any, indentation uint) string { indent := strings.Repeat(Indent, int(indentation)) value := reflect.ValueOf(object) commonRepresentation := "" @@ -392,7 +392,7 @@ func formatValue(value reflect.Value, indentation uint) string { } } -func formatString(object interface{}, indentation uint) string { +func formatString(object any, indentation uint) string { if indentation == 1 { s := fmt.Sprintf("%s", object) components := strings.Split(s, "\n") diff --git a/vendor/github.com/onsi/gomega/gomega_dsl.go b/vendor/github.com/onsi/gomega/gomega_dsl.go index 9a028f3f..a491a64b 100644 --- a/vendor/github.com/onsi/gomega/gomega_dsl.go +++ b/vendor/github.com/onsi/gomega/gomega_dsl.go @@ -22,7 +22,7 @@ import ( "github.com/onsi/gomega/types" ) -const GOMEGA_VERSION = "1.36.2" +const GOMEGA_VERSION = "1.37.0" const nilGomegaPanic = `You are trying to make an assertion, but haven't registered Gomega's fail handler. If you're using Ginkgo then you probably forgot to put your assertion in an It(). @@ -191,7 +191,7 @@ func ensureDefaultGomegaIsConfigured() { // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Ω and Expect are identical -func Ω(actual interface{}, extra ...interface{}) Assertion { +func Ω(actual any, extra ...any) Assertion { ensureDefaultGomegaIsConfigured() return Default.Ω(actual, extra...) } @@ -217,7 +217,7 @@ func Ω(actual interface{}, extra ...interface{}) Assertion { // Will succeed only if `MyAmazingThing()` returns `(3, nil)` // // Expect and Ω are identical -func Expect(actual interface{}, extra ...interface{}) Assertion { +func Expect(actual any, extra ...any) Assertion { ensureDefaultGomegaIsConfigured() return Default.Expect(actual, extra...) } @@ -233,7 +233,7 @@ func Expect(actual interface{}, extra ...interface{}) Assertion { // This is most useful in helper functions that make assertions. If you want Gomega's // error message to refer to the calling line in the test (as opposed to the line in the helper function) // set the first argument of `ExpectWithOffset` appropriately. -func ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion { +func ExpectWithOffset(offset int, actual any, extra ...any) Assertion { ensureDefaultGomegaIsConfigured() return Default.ExpectWithOffset(offset, actual, extra...) } @@ -319,19 +319,19 @@ you an also use Eventually().WithContext(ctx) to pass in the context. Passed-in Eventually(client.FetchCount).WithContext(ctx).WithArguments("/users").Should(BeNumerically(">=", 17)) }, SpecTimeout(time.Second)) -Either way the context pasesd to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. +Either way the context passed to Eventually is also passed to the underlying function. Now, when Ginkgo cancels the context both the FetchCount client and Gomega will be informed and can exit. By default, when a context is passed to Eventually *without* an explicit timeout, Gomega will rely solely on the context's cancellation to determine when to stop polling. If you want to specify a timeout in addition to the context you can do so using the .WithTimeout() method. For example: Eventually(client.FetchCount).WithContext(ctx).WithTimeout(10*time.Second).Should(BeNumerically(">=", 17)) -now either the context cacnellation or the timeout will cause Eventually to stop polling. +now either the context cancellation or the timeout will cause Eventually to stop polling. If, instead, you would like to opt out of this behavior and have Gomega's default timeouts govern Eventuallys that take a context you can call: EnforceDefaultTimeoutsWhenUsingContexts() -in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if eitehr the context is cancelled or the default timeout elapses. +in the DSL (or on a Gomega instance). Now all calls to Eventually that take a context will fail if either the context is cancelled or the default timeout elapses. **Category 3: Making assertions _in_ the function passed into Eventually** @@ -390,7 +390,7 @@ is equivalent to Eventually(...).WithTimeout(10*time.Second).WithPolling(2*time.Second).WithContext(ctx).Should(...) */ -func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func Eventually(actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.Eventually(actualOrCtx, args...) } @@ -404,7 +404,7 @@ func Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { // `EventuallyWithOffset` specifying a timeout interval (and an optional polling interval) are // the same as `Eventually(...).WithOffset(...).WithTimeout` or // `Eventually(...).WithOffset(...).WithTimeout(...).WithPolling`. -func EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.EventuallyWithOffset(offset, actualOrCtx, args...) } @@ -424,7 +424,7 @@ Consistently is useful in cases where you want to assert that something *does no This will block for 200 milliseconds and repeatedly check the channel and ensure nothing has been received. */ -func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func Consistently(actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.Consistently(actualOrCtx, args...) } @@ -435,13 +435,13 @@ func Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion { // // `ConsistentlyWithOffset` is the same as `Consistently(...).WithOffset` and // optional `WithTimeout` and `WithPolling`. -func ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion { +func ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion { ensureDefaultGomegaIsConfigured() return Default.ConsistentlyWithOffset(offset, actualOrCtx, args...) } /* -StopTrying can be used to signal to Eventually and Consistentlythat they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. +StopTrying can be used to signal to Eventually and Consistently that they should abort and stop trying. This always results in a failure of the assertion - and the failure message is the content of the StopTrying signal. You can send the StopTrying signal by either returning StopTrying("message") as an error from your passed-in function _or_ by calling StopTrying("message").Now() to trigger a panic and end execution. diff --git a/vendor/github.com/onsi/gomega/internal/assertion.go b/vendor/github.com/onsi/gomega/internal/assertion.go index 08356a61..cc846e7c 100644 --- a/vendor/github.com/onsi/gomega/internal/assertion.go +++ b/vendor/github.com/onsi/gomega/internal/assertion.go @@ -9,19 +9,19 @@ import ( ) type Assertion struct { - actuals []interface{} // actual value plus all extra values - actualIndex int // value to pass to the matcher - vet vetinari // the vet to call before calling Gomega matcher + actuals []any // actual value plus all extra values + actualIndex int // value to pass to the matcher + vet vetinari // the vet to call before calling Gomega matcher offset int g *Gomega } // ...obligatory discworld reference, as "vetineer" doesn't sound ... quite right. -type vetinari func(assertion *Assertion, optionalDescription ...interface{}) bool +type vetinari func(assertion *Assertion, optionalDescription ...any) bool -func NewAssertion(actualInput interface{}, g *Gomega, offset int, extra ...interface{}) *Assertion { +func NewAssertion(actualInput any, g *Gomega, offset int, extra ...any) *Assertion { return &Assertion{ - actuals: append([]interface{}{actualInput}, extra...), + actuals: append([]any{actualInput}, extra...), actualIndex: 0, vet: (*Assertion).vetActuals, offset: offset, @@ -44,37 +44,37 @@ func (assertion *Assertion) Error() types.Assertion { } } -func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } -func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } -func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, true, optionalDescription...) } -func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } -func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *Assertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Assertion", optionalDescription...) return assertion.vet(assertion, optionalDescription...) && assertion.match(matcher, false, optionalDescription...) } -func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) string { +func (assertion *Assertion) buildDescription(optionalDescription ...any) string { switch len(optionalDescription) { case 0: return "" @@ -86,7 +86,7 @@ func (assertion *Assertion) buildDescription(optionalDescription ...interface{}) return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { +func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool { actualInput := assertion.actuals[assertion.actualIndex] matches, err := matcher.Match(actualInput) assertion.g.THelper() @@ -113,7 +113,7 @@ func (assertion *Assertion) match(matcher types.GomegaMatcher, desiredMatch bool // vetActuals vets the actual values, with the (optional) exception of a // specific value, such as the first value in case non-error assertions, or the // last value in case of Error()-based assertions. -func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool { +func (assertion *Assertion) vetActuals(optionalDescription ...any) bool { success, message := vetActuals(assertion.actuals, assertion.actualIndex) if success { return true @@ -129,7 +129,7 @@ func (assertion *Assertion) vetActuals(optionalDescription ...interface{}) bool // the final error value is non-zero. Otherwise, it doesn't vet the actual // values, as these are allowed to take on any values unless there is a non-zero // error value. -func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool { +func (assertion *Assertion) vetError(optionalDescription ...any) bool { if err := assertion.actuals[assertion.actualIndex]; err != nil { // Go error result idiom: all other actual values must be zero values. return assertion.vetActuals(optionalDescription...) @@ -139,7 +139,7 @@ func (assertion *Assertion) vetError(optionalDescription ...interface{}) bool { // vetActuals vets a slice of actual values, optionally skipping a particular // value slice element, such as the first or last value slice element. -func vetActuals(actuals []interface{}, skipIndex int) (bool, string) { +func vetActuals(actuals []any, skipIndex int) (bool, string) { for i, actual := range actuals { if i == skipIndex { continue diff --git a/vendor/github.com/onsi/gomega/internal/async_assertion.go b/vendor/github.com/onsi/gomega/internal/async_assertion.go index 8b4cd1f5..a3a646e4 100644 --- a/vendor/github.com/onsi/gomega/internal/async_assertion.go +++ b/vendor/github.com/onsi/gomega/internal/async_assertion.go @@ -69,8 +69,8 @@ type AsyncAssertion struct { asyncType AsyncAssertionType actualIsFunc bool - actual interface{} - argsToForward []interface{} + actual any + argsToForward []any timeoutInterval time.Duration pollingInterval time.Duration @@ -80,7 +80,7 @@ type AsyncAssertion struct { g *Gomega } -func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput interface{}, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { +func NewAsyncAssertion(asyncType AsyncAssertionType, actualInput any, g *Gomega, timeoutInterval time.Duration, pollingInterval time.Duration, mustPassRepeatedly int, ctx context.Context, offset int) *AsyncAssertion { out := &AsyncAssertion{ asyncType: asyncType, timeoutInterval: timeoutInterval, @@ -129,7 +129,7 @@ func (assertion *AsyncAssertion) WithContext(ctx context.Context) types.AsyncAss return assertion } -func (assertion *AsyncAssertion) WithArguments(argsToForward ...interface{}) types.AsyncAssertion { +func (assertion *AsyncAssertion) WithArguments(argsToForward ...any) types.AsyncAssertion { assertion.argsToForward = argsToForward return assertion } @@ -139,19 +139,31 @@ func (assertion *AsyncAssertion) MustPassRepeatedly(count int) types.AsyncAssert return assertion } -func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *AsyncAssertion) Should(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, true, optionalDescription...) } -func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...interface{}) bool { +func (assertion *AsyncAssertion) To(matcher types.GomegaMatcher, optionalDescription ...any) bool { + return assertion.Should(matcher, optionalDescription...) +} + +func (assertion *AsyncAssertion) ShouldNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { assertion.g.THelper() vetOptionalDescription("Asynchronous assertion", optionalDescription...) return assertion.match(matcher, false, optionalDescription...) } -func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interface{}) string { +func (assertion *AsyncAssertion) ToNot(matcher types.GomegaMatcher, optionalDescription ...any) bool { + return assertion.ShouldNot(matcher, optionalDescription...) +} + +func (assertion *AsyncAssertion) NotTo(matcher types.GomegaMatcher, optionalDescription ...any) bool { + return assertion.ShouldNot(matcher, optionalDescription...) +} + +func (assertion *AsyncAssertion) buildDescription(optionalDescription ...any) string { switch len(optionalDescription) { case 0: return "" @@ -163,7 +175,7 @@ func (assertion *AsyncAssertion) buildDescription(optionalDescription ...interfa return fmt.Sprintf(optionalDescription[0].(string), optionalDescription[1:]...) + "\n" } -func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (interface{}, error) { +func (assertion *AsyncAssertion) processReturnValues(values []reflect.Value) (any, error) { if len(values) == 0 { return nil, &asyncPolledActualError{ message: fmt.Sprintf("The function passed to %s did not return any values", assertion.asyncType), @@ -224,7 +236,7 @@ func (assertion *AsyncAssertion) argumentMismatchError(t reflect.Type, numProvid if numProvided == 1 { have = "has" } - return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the corect set of arguments. + return fmt.Errorf(`The function passed to %s has signature %s takes %d arguments but %d %s been provided. Please use %s().WithArguments() to pass the correct set of arguments. You can learn more at https://onsi.github.io/gomega/#eventually `, assertion.asyncType, t, t.NumIn(), numProvided, have, assertion.asyncType) @@ -237,9 +249,9 @@ You can learn more at https://onsi.github.io/gomega/#eventually `, assertion.asyncType, reason) } -func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error), error) { +func (assertion *AsyncAssertion) buildActualPoller() (func() (any, error), error) { if !assertion.actualIsFunc { - return func() (interface{}, error) { return assertion.actual, nil }, nil + return func() (any, error) { return assertion.actual, nil }, nil } actualValue := reflect.ValueOf(assertion.actual) actualType := reflect.TypeOf(assertion.actual) @@ -301,7 +313,7 @@ func (assertion *AsyncAssertion) buildActualPoller() (func() (interface{}, error return nil, assertion.invalidMustPassRepeatedlyError("parameter can't be < 1") } - return func() (actual interface{}, err error) { + return func() (actual any, err error) { var values []reflect.Value assertionFailure = nil defer func() { @@ -354,14 +366,14 @@ func (assertion *AsyncAssertion) afterPolling() <-chan time.Time { } } -func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value interface{}) bool { +func (assertion *AsyncAssertion) matcherSaysStopTrying(matcher types.GomegaMatcher, value any) bool { if assertion.actualIsFunc || types.MatchMayChangeInTheFuture(matcher, value) { return false } return true } -func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value interface{}) (matches bool, err error) { +func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value any) (matches bool, err error) { defer func() { if e := recover(); e != nil { if _, isAsyncError := AsPollingSignalError(e); isAsyncError { @@ -377,13 +389,13 @@ func (assertion *AsyncAssertion) pollMatcher(matcher types.GomegaMatcher, value return } -func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...interface{}) bool { +func (assertion *AsyncAssertion) match(matcher types.GomegaMatcher, desiredMatch bool, optionalDescription ...any) bool { timer := time.Now() timeout := assertion.afterTimeout() lock := sync.Mutex{} var matches, hasLastValidActual bool - var actual, lastValidActual interface{} + var actual, lastValidActual any var actualErr, matcherErr error var oracleMatcherSaysStop bool diff --git a/vendor/github.com/onsi/gomega/internal/duration_bundle.go b/vendor/github.com/onsi/gomega/internal/duration_bundle.go index 2e026c33..1019deb8 100644 --- a/vendor/github.com/onsi/gomega/internal/duration_bundle.go +++ b/vendor/github.com/onsi/gomega/internal/duration_bundle.go @@ -49,7 +49,7 @@ func durationFromEnv(key string, defaultDuration time.Duration) time.Duration { return duration } -func toDuration(input interface{}) (time.Duration, error) { +func toDuration(input any) (time.Duration, error) { duration, ok := input.(time.Duration) if ok { return duration, nil diff --git a/vendor/github.com/onsi/gomega/internal/gomega.go b/vendor/github.com/onsi/gomega/internal/gomega.go index c6e2fcc0..66dfe7d0 100644 --- a/vendor/github.com/onsi/gomega/internal/gomega.go +++ b/vendor/github.com/onsi/gomega/internal/gomega.go @@ -40,45 +40,45 @@ func (g *Gomega) ConfigureWithT(t types.GomegaTestingT) *Gomega { return g } -func (g *Gomega) Ω(actual interface{}, extra ...interface{}) types.Assertion { +func (g *Gomega) Ω(actual any, extra ...any) types.Assertion { return g.ExpectWithOffset(0, actual, extra...) } -func (g *Gomega) Expect(actual interface{}, extra ...interface{}) types.Assertion { +func (g *Gomega) Expect(actual any, extra ...any) types.Assertion { return g.ExpectWithOffset(0, actual, extra...) } -func (g *Gomega) ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) types.Assertion { +func (g *Gomega) ExpectWithOffset(offset int, actual any, extra ...any) types.Assertion { return NewAssertion(actual, g, offset, extra...) } -func (g *Gomega) Eventually(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) Eventually(actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeEventually, 0, actualOrCtx, args...) } -func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) EventuallyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeEventually, offset, actualOrCtx, args...) } -func (g *Gomega) Consistently(actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) Consistently(actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, 0, actualOrCtx, args...) } -func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) types.AsyncAssertion { return g.makeAsyncAssertion(AsyncAssertionTypeConsistently, offset, actualOrCtx, args...) } -func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx interface{}, args ...interface{}) types.AsyncAssertion { +func (g *Gomega) makeAsyncAssertion(asyncAssertionType AsyncAssertionType, offset int, actualOrCtx any, args ...any) types.AsyncAssertion { baseOffset := 3 timeoutInterval := -time.Duration(1) pollingInterval := -time.Duration(1) - intervals := []interface{}{} + intervals := []any{} var ctx context.Context actual := actualOrCtx startingIndex := 0 if _, isCtx := actualOrCtx.(context.Context); isCtx && len(args) > 0 { - // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argumnent **and** the second argument is not a parseable duration + // the first argument is a context, we should accept it as the context _only if_ it is **not** the only argument **and** the second argument is not a parseable duration // this is due to an unfortunate ambiguity in early version of Gomega in which multi-type durations are allowed after the actual if _, err := toDuration(args[0]); err != nil { ctx = actualOrCtx.(context.Context) diff --git a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go index 3a4f7ddd..450c4033 100644 --- a/vendor/github.com/onsi/gomega/internal/polling_signal_error.go +++ b/vendor/github.com/onsi/gomega/internal/polling_signal_error.go @@ -100,7 +100,7 @@ func (s *PollingSignalErrorImpl) TryAgainDuration() time.Duration { return s.duration } -func AsPollingSignalError(actual interface{}) (*PollingSignalErrorImpl, bool) { +func AsPollingSignalError(actual any) (*PollingSignalErrorImpl, bool) { if actual == nil { return nil, false } diff --git a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go index f2958764..b748de41 100644 --- a/vendor/github.com/onsi/gomega/internal/vetoptdesc.go +++ b/vendor/github.com/onsi/gomega/internal/vetoptdesc.go @@ -10,7 +10,7 @@ import ( // Gomega matcher at the beginning it panics. This allows for rendering Gomega // matchers as part of an optional Description, as long as they're not in the // first slot. -func vetOptionalDescription(assertion string, optionalDescription ...interface{}) { +func vetOptionalDescription(assertion string, optionalDescription ...any) { if len(optionalDescription) == 0 { return } diff --git a/vendor/github.com/onsi/gomega/matchers.go b/vendor/github.com/onsi/gomega/matchers.go index 7ef27dc9..10b6693f 100644 --- a/vendor/github.com/onsi/gomega/matchers.go +++ b/vendor/github.com/onsi/gomega/matchers.go @@ -12,7 +12,7 @@ import ( // Equal uses reflect.DeepEqual to compare actual with expected. Equal is strict about // types when performing comparisons. // It is an error for both actual and expected to be nil. Use BeNil() instead. -func Equal(expected interface{}) types.GomegaMatcher { +func Equal(expected any) types.GomegaMatcher { return &matchers.EqualMatcher{ Expected: expected, } @@ -22,7 +22,7 @@ func Equal(expected interface{}) types.GomegaMatcher { // This is done by converting actual to have the type of expected before // attempting equality with reflect.DeepEqual. // It is an error for actual and expected to be nil. Use BeNil() instead. -func BeEquivalentTo(expected interface{}) types.GomegaMatcher { +func BeEquivalentTo(expected any) types.GomegaMatcher { return &matchers.BeEquivalentToMatcher{ Expected: expected, } @@ -31,7 +31,7 @@ func BeEquivalentTo(expected interface{}) types.GomegaMatcher { // BeComparableTo uses gocmp.Equal from github.com/google/go-cmp (instead of reflect.DeepEqual) to perform a deep comparison. // You can pass cmp.Option as options. // It is an error for actual and expected to be nil. Use BeNil() instead. -func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatcher { +func BeComparableTo(expected any, opts ...cmp.Option) types.GomegaMatcher { return &matchers.BeComparableToMatcher{ Expected: expected, Options: opts, @@ -41,7 +41,7 @@ func BeComparableTo(expected interface{}, opts ...cmp.Option) types.GomegaMatche // BeIdenticalTo uses the == operator to compare actual with expected. // BeIdenticalTo is strict about types when performing comparisons. // It is an error for both actual and expected to be nil. Use BeNil() instead. -func BeIdenticalTo(expected interface{}) types.GomegaMatcher { +func BeIdenticalTo(expected any) types.GomegaMatcher { return &matchers.BeIdenticalToMatcher{ Expected: expected, } @@ -139,7 +139,7 @@ func Succeed() types.GomegaMatcher { // Error interface // // The optional second argument is a description of the error function, if used. This is required when passing a function but is ignored in all other cases. -func MatchError(expected interface{}, functionErrorDescription ...any) types.GomegaMatcher { +func MatchError(expected any, functionErrorDescription ...any) types.GomegaMatcher { return &matchers.MatchErrorMatcher{ Expected: expected, FuncErrDescription: functionErrorDescription, @@ -202,11 +202,11 @@ func BeClosed() types.GomegaMatcher { // Expect(myThing.IsValid()).Should(BeTrue()) // // Finally, if you want to match the received object as well as get the actual received value into a variable, so you can reason further about the value received, -// you can pass a pointer to a variable of the approriate type first, and second a matcher: +// you can pass a pointer to a variable of the appropriate type first, and second a matcher: // // var myThing thing // Eventually(thingChan).Should(Receive(&myThing, ContainSubstring("bar"))) -func Receive(args ...interface{}) types.GomegaMatcher { +func Receive(args ...any) types.GomegaMatcher { return &matchers.ReceiveMatcher{ Args: args, } @@ -224,7 +224,7 @@ func Receive(args ...interface{}) types.GomegaMatcher { // // Of course, the value is actually sent to the channel. The point of `BeSent` is less to make an assertion about the availability of the channel (which is typically an implementation detail that your test should not be concerned with). // Rather, the point of `BeSent` is to make it possible to easily and expressively write tests that can timeout on blocked channel sends. -func BeSent(arg interface{}) types.GomegaMatcher { +func BeSent(arg any) types.GomegaMatcher { return &matchers.BeSentMatcher{ Arg: arg, } @@ -233,7 +233,7 @@ func BeSent(arg interface{}) types.GomegaMatcher { // MatchRegexp succeeds if actual is a string or stringer that matches the // passed-in regexp. Optional arguments can be provided to construct a regexp // via fmt.Sprintf(). -func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { +func MatchRegexp(regexp string, args ...any) types.GomegaMatcher { return &matchers.MatchRegexpMatcher{ Regexp: regexp, Args: args, @@ -243,7 +243,7 @@ func MatchRegexp(regexp string, args ...interface{}) types.GomegaMatcher { // ContainSubstring succeeds if actual is a string or stringer that contains the // passed-in substring. Optional arguments can be provided to construct the substring // via fmt.Sprintf(). -func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { +func ContainSubstring(substr string, args ...any) types.GomegaMatcher { return &matchers.ContainSubstringMatcher{ Substr: substr, Args: args, @@ -253,7 +253,7 @@ func ContainSubstring(substr string, args ...interface{}) types.GomegaMatcher { // HavePrefix succeeds if actual is a string or stringer that contains the // passed-in string as a prefix. Optional arguments can be provided to construct // via fmt.Sprintf(). -func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { +func HavePrefix(prefix string, args ...any) types.GomegaMatcher { return &matchers.HavePrefixMatcher{ Prefix: prefix, Args: args, @@ -263,7 +263,7 @@ func HavePrefix(prefix string, args ...interface{}) types.GomegaMatcher { // HaveSuffix succeeds if actual is a string or stringer that contains the // passed-in string as a suffix. Optional arguments can be provided to construct // via fmt.Sprintf(). -func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { +func HaveSuffix(suffix string, args ...any) types.GomegaMatcher { return &matchers.HaveSuffixMatcher{ Suffix: suffix, Args: args, @@ -273,7 +273,7 @@ func HaveSuffix(suffix string, args ...interface{}) types.GomegaMatcher { // MatchJSON succeeds if actual is a string or stringer of JSON that matches // the expected JSON. The JSONs are decoded and the resulting objects are compared via // reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. -func MatchJSON(json interface{}) types.GomegaMatcher { +func MatchJSON(json any) types.GomegaMatcher { return &matchers.MatchJSONMatcher{ JSONToMatch: json, } @@ -282,7 +282,7 @@ func MatchJSON(json interface{}) types.GomegaMatcher { // MatchXML succeeds if actual is a string or stringer of XML that matches // the expected XML. The XMLs are decoded and the resulting objects are compared via // reflect.DeepEqual so things like whitespaces shouldn't matter. -func MatchXML(xml interface{}) types.GomegaMatcher { +func MatchXML(xml any) types.GomegaMatcher { return &matchers.MatchXMLMatcher{ XMLToMatch: xml, } @@ -291,7 +291,7 @@ func MatchXML(xml interface{}) types.GomegaMatcher { // MatchYAML succeeds if actual is a string or stringer of YAML that matches // the expected YAML. The YAML's are decoded and the resulting objects are compared via // reflect.DeepEqual so things like key-ordering and whitespace shouldn't matter. -func MatchYAML(yaml interface{}) types.GomegaMatcher { +func MatchYAML(yaml any) types.GomegaMatcher { return &matchers.MatchYAMLMatcher{ YAMLToMatch: yaml, } @@ -338,7 +338,7 @@ func BeZero() types.GomegaMatcher { // // var findings []string // Expect([]string{"Foo", "FooBar"}).Should(ContainElement(ContainSubString("Bar", &findings))) -func ContainElement(element interface{}, result ...interface{}) types.GomegaMatcher { +func ContainElement(element any, result ...any) types.GomegaMatcher { return &matchers.ContainElementMatcher{ Element: element, Result: result, @@ -358,7 +358,7 @@ func ContainElement(element interface{}, result ...interface{}) types.GomegaMatc // Expect(2).Should(BeElementOf(1, 2)) // // Actual must be typed. -func BeElementOf(elements ...interface{}) types.GomegaMatcher { +func BeElementOf(elements ...any) types.GomegaMatcher { return &matchers.BeElementOfMatcher{ Elements: elements, } @@ -368,7 +368,7 @@ func BeElementOf(elements ...interface{}) types.GomegaMatcher { // BeKeyOf() always uses Equal() to perform the match between actual and the map keys. // // Expect("foo").Should(BeKeyOf(map[string]bool{"foo": true, "bar": false})) -func BeKeyOf(element interface{}) types.GomegaMatcher { +func BeKeyOf(element any) types.GomegaMatcher { return &matchers.BeKeyOfMatcher{ Map: element, } @@ -388,14 +388,14 @@ func BeKeyOf(element interface{}) types.GomegaMatcher { // // Expect([]string{"Foo", "FooBar"}).Should(ConsistOf([]string{"FooBar", "Foo"})) // -// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []interface{} are different types - hence the need for this special rule. -func ConsistOf(elements ...interface{}) types.GomegaMatcher { +// Note that Go's type system does not allow you to write this as ConsistOf([]string{"FooBar", "Foo"}...) as []string and []any are different types - hence the need for this special rule. +func ConsistOf(elements ...any) types.GomegaMatcher { return &matchers.ConsistOfMatcher{ Elements: elements, } } -// HaveExactElements succeeds if actual contains elements that precisely match the elemets passed into the matcher. The ordering of the elements does matter. +// HaveExactElements succeeds if actual contains elements that precisely match the elements passed into the matcher. The ordering of the elements does matter. // By default HaveExactElements() uses Equal() to match the elements, however custom matchers can be passed in instead. Here are some examples: // // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements("Foo", "FooBar")) @@ -403,7 +403,7 @@ func ConsistOf(elements ...interface{}) types.GomegaMatcher { // Expect([]string{"Foo", "FooBar"}).Should(HaveExactElements(ContainSubstring("Foo"), ContainSubstring("Foo"))) // // Actual must be an array or slice. -func HaveExactElements(elements ...interface{}) types.GomegaMatcher { +func HaveExactElements(elements ...any) types.GomegaMatcher { return &matchers.HaveExactElementsMatcher{ Elements: elements, } @@ -417,7 +417,7 @@ func HaveExactElements(elements ...interface{}) types.GomegaMatcher { // // Actual must be an array, slice or map. // For maps, ContainElements searches through the map's values. -func ContainElements(elements ...interface{}) types.GomegaMatcher { +func ContainElements(elements ...any) types.GomegaMatcher { return &matchers.ContainElementsMatcher{ Elements: elements, } @@ -432,7 +432,7 @@ func ContainElements(elements ...interface{}) types.GomegaMatcher { // // Actual must be an array, slice or map. // For maps, HaveEach searches through the map's values. -func HaveEach(element interface{}) types.GomegaMatcher { +func HaveEach(element any) types.GomegaMatcher { return &matchers.HaveEachMatcher{ Element: element, } @@ -443,7 +443,7 @@ func HaveEach(element interface{}) types.GomegaMatcher { // matcher can be passed in instead: // // Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKey(MatchRegexp(`.+Foo$`))) -func HaveKey(key interface{}) types.GomegaMatcher { +func HaveKey(key any) types.GomegaMatcher { return &matchers.HaveKeyMatcher{ Key: key, } @@ -455,7 +455,7 @@ func HaveKey(key interface{}) types.GomegaMatcher { // // Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue("Foo", "Bar")) // Expect(map[string]string{"Foo": "Bar", "BazFoo": "Duck"}).Should(HaveKeyWithValue(MatchRegexp(`.+Foo$`), "Bar")) -func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { +func HaveKeyWithValue(key any, value any) types.GomegaMatcher { return &matchers.HaveKeyWithValueMatcher{ Key: key, Value: value, @@ -483,7 +483,7 @@ func HaveKeyWithValue(key interface{}, value interface{}) types.GomegaMatcher { // Expect(book).To(HaveField("Title", ContainSubstring("Les")) // Expect(book).To(HaveField("Author.FirstName", Equal("Victor")) // Expect(book).To(HaveField("Author.DOB.Year()", BeNumerically("<", 1900)) -func HaveField(field string, expected interface{}) types.GomegaMatcher { +func HaveField(field string, expected any) types.GomegaMatcher { return &matchers.HaveFieldMatcher{ Field: field, Expected: expected, @@ -535,7 +535,7 @@ func HaveValue(matcher types.GomegaMatcher) types.GomegaMatcher { // Expect(1.0).Should(BeNumerically(">=", 1.0)) // Expect(1.0).Should(BeNumerically("<", 3)) // Expect(1.0).Should(BeNumerically("<=", 1.0)) -func BeNumerically(comparator string, compareTo ...interface{}) types.GomegaMatcher { +func BeNumerically(comparator string, compareTo ...any) types.GomegaMatcher { return &matchers.BeNumericallyMatcher{ Comparator: comparator, CompareTo: compareTo, @@ -562,7 +562,7 @@ func BeTemporally(comparator string, compareTo time.Time, threshold ...time.Dura // Expect(5).Should(BeAssignableToTypeOf(-1)) // different values same type // Expect("foo").Should(BeAssignableToTypeOf("bar")) // different values same type // Expect(struct{ Foo string }{}).Should(BeAssignableToTypeOf(struct{ Foo string }{})) -func BeAssignableToTypeOf(expected interface{}) types.GomegaMatcher { +func BeAssignableToTypeOf(expected any) types.GomegaMatcher { return &matchers.AssignableToTypeOfMatcher{ Expected: expected, } @@ -581,7 +581,7 @@ func Panic() types.GomegaMatcher { // matcher can be passed in instead: // // Expect(fn).Should(PanicWith(MatchRegexp(`.+Foo$`))) -func PanicWith(expected interface{}) types.GomegaMatcher { +func PanicWith(expected any) types.GomegaMatcher { return &matchers.PanicMatcher{Expected: expected} } @@ -610,7 +610,7 @@ func BeADirectory() types.GomegaMatcher { // Expect(resp).Should(HaveHTTPStatus(http.StatusOK)) // asserts that resp.StatusCode == 200 // Expect(resp).Should(HaveHTTPStatus("404 Not Found")) // asserts that resp.Status == "404 Not Found" // Expect(resp).Should(HaveHTTPStatus(http.StatusOK, http.StatusNoContent)) // asserts that resp.StatusCode == 200 || resp.StatusCode == 204 -func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { +func HaveHTTPStatus(expected ...any) types.GomegaMatcher { return &matchers.HaveHTTPStatusMatcher{Expected: expected} } @@ -618,7 +618,7 @@ func HaveHTTPStatus(expected ...interface{}) types.GomegaMatcher { // Actual must be either a *http.Response or *httptest.ResponseRecorder. // Expected must be a string header name, followed by a header value which // can be a string, or another matcher. -func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatcher { +func HaveHTTPHeaderWithValue(header string, value any) types.GomegaMatcher { return &matchers.HaveHTTPHeaderWithValueMatcher{ Header: header, Value: value, @@ -628,7 +628,7 @@ func HaveHTTPHeaderWithValue(header string, value interface{}) types.GomegaMatch // HaveHTTPBody matches if the body matches. // Actual must be either a *http.Response or *httptest.ResponseRecorder. // Expected must be either a string, []byte, or other matcher -func HaveHTTPBody(expected interface{}) types.GomegaMatcher { +func HaveHTTPBody(expected any) types.GomegaMatcher { return &matchers.HaveHTTPBodyMatcher{Expected: expected} } @@ -687,15 +687,15 @@ func Not(matcher types.GomegaMatcher) types.GomegaMatcher { // Expect(1).To(WithTransform(failingplus1, Equal(2))) // // And(), Or(), Not() and WithTransform() allow matchers to be composed into complex expressions. -func WithTransform(transform interface{}, matcher types.GomegaMatcher) types.GomegaMatcher { +func WithTransform(transform any, matcher types.GomegaMatcher) types.GomegaMatcher { return matchers.NewWithTransformMatcher(transform, matcher) } // Satisfy matches the actual value against the `predicate` function. -// The given predicate must be a function of one paramter that returns bool. +// The given predicate must be a function of one parameter that returns bool. // // var isEven = func(i int) bool { return i%2 == 0 } // Expect(2).To(Satisfy(isEven)) -func Satisfy(predicate interface{}) types.GomegaMatcher { +func Satisfy(predicate any) types.GomegaMatcher { return matchers.NewSatisfyMatcher(predicate) } diff --git a/vendor/github.com/onsi/gomega/matchers/and.go b/vendor/github.com/onsi/gomega/matchers/and.go index 6bd826ad..db48e90b 100644 --- a/vendor/github.com/onsi/gomega/matchers/and.go +++ b/vendor/github.com/onsi/gomega/matchers/and.go @@ -14,7 +14,7 @@ type AndMatcher struct { firstFailedMatcher types.GomegaMatcher } -func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { +func (m *AndMatcher) Match(actual any) (success bool, err error) { m.firstFailedMatcher = nil for _, matcher := range m.Matchers { success, err := matcher.Match(actual) @@ -26,16 +26,16 @@ func (m *AndMatcher) Match(actual interface{}) (success bool, err error) { return true, nil } -func (m *AndMatcher) FailureMessage(actual interface{}) (message string) { +func (m *AndMatcher) FailureMessage(actual any) (message string) { return m.firstFailedMatcher.FailureMessage(actual) } -func (m *AndMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *AndMatcher) NegatedFailureMessage(actual any) (message string) { // not the most beautiful list of matchers, but not bad either... return format.Message(actual, fmt.Sprintf("To not satisfy all of these matchers: %s", m.Matchers)) } -func (m *AndMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *AndMatcher) MatchMayChangeInTheFuture(actual any) bool { /* Example with 3 matchers: A, B, C diff --git a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go index be483952..a100e5c0 100644 --- a/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/assignable_to_type_of_matcher.go @@ -10,10 +10,10 @@ import ( ) type AssignableToTypeOfMatcher struct { - Expected interface{} + Expected any } -func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *AssignableToTypeOfMatcher) Match(actual any) (success bool, err error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } else if matcher.Expected == nil { @@ -28,10 +28,10 @@ func (matcher *AssignableToTypeOfMatcher) Match(actual interface{}) (success boo return actualType.AssignableTo(expectedType), nil } -func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual interface{}) string { +func (matcher *AssignableToTypeOfMatcher) FailureMessage(actual any) string { return format.Message(actual, fmt.Sprintf("to be assignable to the type: %T", matcher.Expected)) } -func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual interface{}) string { +func (matcher *AssignableToTypeOfMatcher) NegatedFailureMessage(actual any) string { return format.Message(actual, fmt.Sprintf("not to be assignable to the type: %T", matcher.Expected)) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go index 93d4497c..1d823604 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_directory.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_directory.go @@ -24,11 +24,11 @@ func (t notADirectoryError) Error() string { } type BeADirectoryMatcher struct { - expected interface{} + expected any err error } -func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeADirectoryMatcher) Match(actual any) (success bool, err error) { actualFilename, ok := actual.(string) if !ok { return false, fmt.Errorf("BeADirectoryMatcher matcher expects a file path") @@ -47,10 +47,10 @@ func (matcher *BeADirectoryMatcher) Match(actual interface{}) (success bool, err return true, nil } -func (matcher *BeADirectoryMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeADirectoryMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("to be a directory: %s", matcher.err)) } -func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeADirectoryMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not be a directory") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go index 8fefc4de..3e53d628 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_a_regular_file.go @@ -24,11 +24,11 @@ func (t notARegularFileError) Error() string { } type BeARegularFileMatcher struct { - expected interface{} + expected any err error } -func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeARegularFileMatcher) Match(actual any) (success bool, err error) { actualFilename, ok := actual.(string) if !ok { return false, fmt.Errorf("BeARegularFileMatcher matcher expects a file path") @@ -47,10 +47,10 @@ func (matcher *BeARegularFileMatcher) Match(actual interface{}) (success bool, e return true, nil } -func (matcher *BeARegularFileMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeARegularFileMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("to be a regular file: %s", matcher.err)) } -func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeARegularFileMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not be a regular file") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go index e2bdd281..04f156db 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go +++ b/vendor/github.com/onsi/gomega/matchers/be_an_existing_file.go @@ -10,10 +10,10 @@ import ( ) type BeAnExistingFileMatcher struct { - expected interface{} + expected any } -func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeAnExistingFileMatcher) Match(actual any) (success bool, err error) { actualFilename, ok := actual.(string) if !ok { return false, fmt.Errorf("BeAnExistingFileMatcher matcher expects a file path") @@ -31,10 +31,10 @@ func (matcher *BeAnExistingFileMatcher) Match(actual interface{}) (success bool, return true, nil } -func (matcher *BeAnExistingFileMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeAnExistingFileMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to exist") } -func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeAnExistingFileMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to exist") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go index f13c2449..4319dde4 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_closed_matcher.go @@ -12,7 +12,7 @@ import ( type BeClosedMatcher struct { } -func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeClosedMatcher) Match(actual any) (success bool, err error) { if !isChan(actual) { return false, fmt.Errorf("BeClosed matcher expects a channel. Got:\n%s", format.Object(actual, 1)) } @@ -39,10 +39,10 @@ func (matcher *BeClosedMatcher) Match(actual interface{}) (success bool, err err return closed, nil } -func (matcher *BeClosedMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeClosedMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be closed") } -func (matcher *BeClosedMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeClosedMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "to be open") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go index 4e389785..532fc374 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_comparable_to_matcher.go @@ -9,11 +9,11 @@ import ( ) type BeComparableToMatcher struct { - Expected interface{} + Expected any Options cmp.Options } -func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, matchErr error) { +func (matcher *BeComparableToMatcher) Match(actual any) (success bool, matchErr error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } @@ -40,10 +40,10 @@ func (matcher *BeComparableToMatcher) Match(actual interface{}) (success bool, m return cmp.Equal(actual, matcher.Expected, matcher.Options...), nil } -func (matcher *BeComparableToMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeComparableToMatcher) FailureMessage(actual any) (message string) { return fmt.Sprint("Expected object to be comparable, diff: ", cmp.Diff(actual, matcher.Expected, matcher.Options...)) } -func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeComparableToMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be comparable to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go index 9ee75a5d..406fe548 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_element_of_matcher.go @@ -10,10 +10,10 @@ import ( ) type BeElementOfMatcher struct { - Elements []interface{} + Elements []any } -func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeElementOfMatcher) Match(actual any) (success bool, err error) { if reflect.TypeOf(actual) == nil { return false, fmt.Errorf("BeElement matcher expects actual to be typed") } @@ -34,10 +34,10 @@ func (matcher *BeElementOfMatcher) Match(actual interface{}) (success bool, err return false, lastError } -func (matcher *BeElementOfMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeElementOfMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be an element of", presentable(matcher.Elements)) } -func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeElementOfMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be an element of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go index bd7f0b96..e9e0644f 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_empty_matcher.go @@ -13,7 +13,7 @@ import ( type BeEmptyMatcher struct { } -func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeEmptyMatcher) Match(actual any) (success bool, err error) { // short-circuit the iterator case, as we only need to see the first // element, if any. if miter.IsIter(actual) { @@ -34,10 +34,10 @@ func (matcher *BeEmptyMatcher) Match(actual interface{}) (success bool, err erro return length == 0, nil } -func (matcher *BeEmptyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeEmptyMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be empty") } -func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeEmptyMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be empty") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go index 263627f4..37b3080b 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_equivalent_to_matcher.go @@ -10,10 +10,10 @@ import ( ) type BeEquivalentToMatcher struct { - Expected interface{} + Expected any } -func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeEquivalentToMatcher) Match(actual any) (success bool, err error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Both actual and expected must not be nil.") } @@ -27,10 +27,10 @@ func (matcher *BeEquivalentToMatcher) Match(actual interface{}) (success bool, e return reflect.DeepEqual(convertedActual, matcher.Expected), nil } -func (matcher *BeEquivalentToMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeEquivalentToMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be equivalent to", matcher.Expected) } -func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeEquivalentToMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be equivalent to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go index 8ee2b1c5..55e86951 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_false_matcher.go @@ -12,7 +12,7 @@ type BeFalseMatcher struct { Reason string } -func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeFalseMatcher) Match(actual any) (success bool, err error) { if !isBool(actual) { return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) } @@ -20,7 +20,7 @@ func (matcher *BeFalseMatcher) Match(actual interface{}) (success bool, err erro return actual == false, nil } -func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeFalseMatcher) FailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "to be false") } else { @@ -28,7 +28,7 @@ func (matcher *BeFalseMatcher) FailureMessage(actual interface{}) (message strin } } -func (matcher *BeFalseMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeFalseMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "not to be false") } else { diff --git a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go index 631ce11e..579aa41b 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_identical_to.go +++ b/vendor/github.com/onsi/gomega/matchers/be_identical_to.go @@ -10,10 +10,10 @@ import ( ) type BeIdenticalToMatcher struct { - Expected interface{} + Expected any } -func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, matchErr error) { +func (matcher *BeIdenticalToMatcher) Match(actual any) (success bool, matchErr error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } @@ -30,10 +30,10 @@ func (matcher *BeIdenticalToMatcher) Match(actual interface{}) (success bool, ma return actual == matcher.Expected, nil } -func (matcher *BeIdenticalToMatcher) FailureMessage(actual interface{}) string { +func (matcher *BeIdenticalToMatcher) FailureMessage(actual any) string { return format.Message(actual, "to be identical to", matcher.Expected) } -func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual interface{}) string { +func (matcher *BeIdenticalToMatcher) NegatedFailureMessage(actual any) string { return format.Message(actual, "not to be identical to", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go index 449a291e..3fff3df7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_key_of_matcher.go @@ -8,10 +8,10 @@ import ( ) type BeKeyOfMatcher struct { - Map interface{} + Map any } -func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeKeyOfMatcher) Match(actual any) (success bool, err error) { if !isMap(matcher.Map) { return false, fmt.Errorf("BeKeyOf matcher needs expected to be a map type") } @@ -36,10 +36,10 @@ func (matcher *BeKeyOfMatcher) Match(actual interface{}) (success bool, err erro return false, lastError } -func (matcher *BeKeyOfMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeKeyOfMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be a key of", presentable(valuesOf(matcher.Map))) } -func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeKeyOfMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be a key of", presentable(valuesOf(matcher.Map))) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go index 551d99d7..cab37f4f 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_nil_matcher.go @@ -7,14 +7,14 @@ import "github.com/onsi/gomega/format" type BeNilMatcher struct { } -func (matcher *BeNilMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeNilMatcher) Match(actual any) (success bool, err error) { return isNil(actual), nil } -func (matcher *BeNilMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeNilMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be nil") } -func (matcher *BeNilMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeNilMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be nil") } diff --git a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go index 100735de..7e6ce154 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_numerically_matcher.go @@ -11,18 +11,18 @@ import ( type BeNumericallyMatcher struct { Comparator string - CompareTo []interface{} + CompareTo []any } -func (matcher *BeNumericallyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeNumericallyMatcher) FailureMessage(actual any) (message string) { return matcher.FormatFailureMessage(actual, false) } -func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeNumericallyMatcher) NegatedFailureMessage(actual any) (message string) { return matcher.FormatFailureMessage(actual, true) } -func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, negated bool) (message string) { +func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual any, negated bool) (message string) { if len(matcher.CompareTo) == 1 { message = fmt.Sprintf("to be %s", matcher.Comparator) } else { @@ -34,7 +34,7 @@ func (matcher *BeNumericallyMatcher) FormatFailureMessage(actual interface{}, ne return format.Message(actual, message, matcher.CompareTo[0]) } -func (matcher *BeNumericallyMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeNumericallyMatcher) Match(actual any) (success bool, err error) { if len(matcher.CompareTo) == 0 || len(matcher.CompareTo) > 2 { return false, fmt.Errorf("BeNumerically requires 1 or 2 CompareTo arguments. Got:\n%s", format.Object(matcher.CompareTo, 1)) } diff --git a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go index cf582a3f..14ffbf6c 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_sent_matcher.go @@ -10,11 +10,11 @@ import ( ) type BeSentMatcher struct { - Arg interface{} + Arg any channelClosed bool } -func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeSentMatcher) Match(actual any) (success bool, err error) { if !isChan(actual) { return false, fmt.Errorf("BeSent expects a channel. Got:\n%s", format.Object(actual, 1)) } @@ -56,15 +56,15 @@ func (matcher *BeSentMatcher) Match(actual interface{}) (success bool, err error return didSend, nil } -func (matcher *BeSentMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeSentMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to send:", matcher.Arg) } -func (matcher *BeSentMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeSentMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to send:", matcher.Arg) } -func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (matcher *BeSentMatcher) MatchMayChangeInTheFuture(actual any) bool { if !isChan(actual) { return false } diff --git a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go index dec4db02..edb647c6 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_temporally_matcher.go @@ -15,17 +15,17 @@ type BeTemporallyMatcher struct { Threshold []time.Duration } -func (matcher *BeTemporallyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeTemporallyMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("to be %s", matcher.Comparator), matcher.CompareTo) } -func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeTemporallyMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, fmt.Sprintf("not to be %s", matcher.Comparator), matcher.CompareTo) } -func (matcher *BeTemporallyMatcher) Match(actual interface{}) (bool, error) { +func (matcher *BeTemporallyMatcher) Match(actual any) (bool, error) { // predicate to test for time.Time type - isTime := func(t interface{}) bool { + isTime := func(t any) bool { _, ok := t.(time.Time) return ok } diff --git a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go index 3576aac8..a010bec5 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_true_matcher.go @@ -12,7 +12,7 @@ type BeTrueMatcher struct { Reason string } -func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeTrueMatcher) Match(actual any) (success bool, err error) { if !isBool(actual) { return false, fmt.Errorf("Expected a boolean. Got:\n%s", format.Object(actual, 1)) } @@ -20,7 +20,7 @@ func (matcher *BeTrueMatcher) Match(actual interface{}) (success bool, err error return actual.(bool), nil } -func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeTrueMatcher) FailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "to be true") } else { @@ -28,7 +28,7 @@ func (matcher *BeTrueMatcher) FailureMessage(actual interface{}) (message string } } -func (matcher *BeTrueMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeTrueMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.Reason == "" { return format.Message(actual, "not to be true") } else { diff --git a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go index 26196f16..f5f5d7f7 100644 --- a/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/be_zero_matcher.go @@ -9,7 +9,7 @@ import ( type BeZeroMatcher struct { } -func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *BeZeroMatcher) Match(actual any) (success bool, err error) { if actual == nil { return true, nil } @@ -19,10 +19,10 @@ func (matcher *BeZeroMatcher) Match(actual interface{}) (success bool, err error } -func (matcher *BeZeroMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *BeZeroMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to be zero-valued") } -func (matcher *BeZeroMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *BeZeroMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to be zero-valued") } diff --git a/vendor/github.com/onsi/gomega/matchers/consist_of.go b/vendor/github.com/onsi/gomega/matchers/consist_of.go index a1118818..05c751b6 100644 --- a/vendor/github.com/onsi/gomega/matchers/consist_of.go +++ b/vendor/github.com/onsi/gomega/matchers/consist_of.go @@ -12,12 +12,12 @@ import ( ) type ConsistOfMatcher struct { - Elements []interface{} - missingElements []interface{} - extraElements []interface{} + Elements []any + missingElements []any + extraElements []any } -func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ConsistOfMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("ConsistOf matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } @@ -35,19 +35,19 @@ func (matcher *ConsistOfMatcher) Match(actual interface{}) (success bool, err er return true, nil } - var missingMatchers []interface{} + var missingMatchers []any matcher.extraElements, missingMatchers = bipartiteGraph.FreeLeftRight(edges) matcher.missingElements = equalMatchersToElements(missingMatchers) return false, nil } -func neighbours(value, matcher interface{}) (bool, error) { +func neighbours(value, matcher any) (bool, error) { match, err := matcher.(omegaMatcher).Match(value) return match && err == nil, nil } -func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { +func equalMatchersToElements(matchers []any) (elements []any) { for _, matcher := range matchers { if equalMatcher, ok := matcher.(*EqualMatcher); ok { elements = append(elements, equalMatcher.Expected) @@ -60,7 +60,7 @@ func equalMatchersToElements(matchers []interface{}) (elements []interface{}) { return } -func flatten(elems []interface{}) []interface{} { +func flatten(elems []any) []any { if len(elems) != 1 || !(isArrayOrSlice(elems[0]) || (miter.IsIter(elems[0]) && !miter.IsSeq2(elems[0]))) { @@ -77,14 +77,14 @@ func flatten(elems []interface{}) []interface{} { } value := reflect.ValueOf(elems[0]) - flattened := make([]interface{}, value.Len()) + flattened := make([]any, value.Len()) for i := 0; i < value.Len(); i++ { flattened[i] = value.Index(i).Interface() } return flattened } -func matchers(expectedElems []interface{}) (matchers []interface{}) { +func matchers(expectedElems []any) (matchers []any) { for _, e := range flatten(expectedElems) { if e == nil { matchers = append(matchers, &BeNilMatcher{}) @@ -97,11 +97,11 @@ func matchers(expectedElems []interface{}) (matchers []interface{}) { return } -func presentable(elems []interface{}) interface{} { +func presentable(elems []any) any { elems = flatten(elems) if len(elems) == 0 { - return []interface{}{} + return []any{} } sv := reflect.ValueOf(elems) @@ -125,9 +125,9 @@ func presentable(elems []interface{}) interface{} { return ss.Interface() } -func valuesOf(actual interface{}) []interface{} { +func valuesOf(actual any) []any { value := reflect.ValueOf(actual) - values := []interface{}{} + values := []any{} if miter.IsIter(actual) { if miter.IsSeq2(actual) { miter.IterateKV(actual, func(k, v reflect.Value) bool { @@ -154,7 +154,7 @@ func valuesOf(actual interface{}) []interface{} { return values } -func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ConsistOfMatcher) FailureMessage(actual any) (message string) { message = format.Message(actual, "to consist of", presentable(matcher.Elements)) message = appendMissingElements(message, matcher.missingElements) if len(matcher.extraElements) > 0 { @@ -164,7 +164,7 @@ func (matcher *ConsistOfMatcher) FailureMessage(actual interface{}) (message str return } -func appendMissingElements(message string, missingElements []interface{}) string { +func appendMissingElements(message string, missingElements []any) string { if len(missingElements) == 0 { return message } @@ -172,6 +172,6 @@ func appendMissingElements(message string, missingElements []interface{}) string format.Object(presentable(missingElements), 1)) } -func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ConsistOfMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to consist of", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go index 830239c7..8337a526 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_element_matcher.go @@ -12,11 +12,11 @@ import ( ) type ContainElementMatcher struct { - Element interface{} - Result []interface{} + Element any + Result []any } -func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ContainElementMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("ContainElement matcher expects an array/slice/map/iterator. Got:\n%s", format.Object(actual, 1)) } @@ -132,14 +132,14 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e var lastError error if !miter.IsIter(actual) { - var valueAt func(int) interface{} + var valueAt func(int) any var foundAt func(int) // We're dealing with an array/slice/map, so in all cases we can iterate // over the elements in actual using indices (that can be considered // keys in case of maps). if isMap(actual) { keys := value.MapKeys() - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.MapIndex(keys[i]).Interface() } if result.Kind() != reflect.Invalid { @@ -150,7 +150,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e } } } else { - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.Index(i).Interface() } if result.Kind() != reflect.Invalid { @@ -251,7 +251,7 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e } // pick up any findings the test is interested in as it specified a non-nil - // result reference. However, the expection always is that there are at + // result reference. However, the expectation always is that there are at // least one or multiple findings. So, if a result is expected, but we had // no findings, then this is an error. findings := getFindings() @@ -284,10 +284,10 @@ func (matcher *ContainElementMatcher) Match(actual interface{}) (success bool, e return true, nil } -func (matcher *ContainElementMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to contain element matching", matcher.Element) } -func (matcher *ContainElementMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain element matching", matcher.Element) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go index d9fcb8b8..ce304189 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_elements_matcher.go @@ -9,11 +9,11 @@ import ( ) type ContainElementsMatcher struct { - Elements []interface{} - missingElements []interface{} + Elements []any + missingElements []any } -func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ContainElementsMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("ContainElements matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) } @@ -35,11 +35,11 @@ func (matcher *ContainElementsMatcher) Match(actual interface{}) (success bool, return false, nil } -func (matcher *ContainElementsMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementsMatcher) FailureMessage(actual any) (message string) { message = format.Message(actual, "to contain elements", presentable(matcher.Elements)) return appendMissingElements(message, matcher.missingElements) } -func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ContainElementsMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go index e725f8c2..d9980ee2 100644 --- a/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/contain_substring_matcher.go @@ -11,10 +11,10 @@ import ( type ContainSubstringMatcher struct { Substr string - Args []interface{} + Args []any } -func (matcher *ContainSubstringMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ContainSubstringMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("ContainSubstring matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) @@ -31,10 +31,10 @@ func (matcher *ContainSubstringMatcher) stringToMatch() string { return stringToMatch } -func (matcher *ContainSubstringMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *ContainSubstringMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to contain substring", matcher.stringToMatch()) } -func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *ContainSubstringMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain substring", matcher.stringToMatch()) } diff --git a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go index befb7bdf..4ad16615 100644 --- a/vendor/github.com/onsi/gomega/matchers/equal_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/equal_matcher.go @@ -9,10 +9,10 @@ import ( ) type EqualMatcher struct { - Expected interface{} + Expected any } -func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *EqualMatcher) Match(actual any) (success bool, err error) { if actual == nil && matcher.Expected == nil { return false, fmt.Errorf("Refusing to compare to .\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.") } @@ -27,7 +27,7 @@ func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error) return reflect.DeepEqual(actual, matcher.Expected), nil } -func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *EqualMatcher) FailureMessage(actual any) (message string) { actualString, actualOK := actual.(string) expectedString, expectedOK := matcher.Expected.(string) if actualOK && expectedOK { @@ -37,6 +37,6 @@ func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) return format.Message(actual, "to equal", matcher.Expected) } -func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *EqualMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to equal", matcher.Expected) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go index 9856752f..a4fcfc42 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go @@ -12,7 +12,7 @@ type HaveCapMatcher struct { Count int } -func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveCapMatcher) Match(actual any) (success bool, err error) { length, ok := capOf(actual) if !ok { return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1)) @@ -21,10 +21,10 @@ func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err erro return length == matcher.Count, nil } -func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveCapMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count) } -func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveCapMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go index 4111f2b8..4c45063b 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go @@ -9,10 +9,10 @@ import ( ) type HaveEachMatcher struct { - Element interface{} + Element any } -func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveEachMatcher) Match(actual any) (success bool, err error) { if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) { return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s", format.Object(actual, 1)) @@ -61,14 +61,14 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err format.Object(actual, 1)) } - var valueAt func(int) interface{} + var valueAt func(int) any if isMap(actual) { keys := value.MapKeys() - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.MapIndex(keys[i]).Interface() } } else { - valueAt = func(i int) interface{} { + valueAt = func(i int) any { return value.Index(i).Interface() } } @@ -89,11 +89,11 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err } // FailureMessage returns a suitable failure message. -func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveEachMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to contain element matching", matcher.Element) } // NegatedFailureMessage returns a suitable negated failure message. -func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveEachMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain element matching", matcher.Element) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go index 23799f1c..8b2d297c 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go +++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go @@ -14,13 +14,13 @@ type mismatchFailure struct { } type HaveExactElementsMatcher struct { - Elements []interface{} + Elements []any mismatchFailures []mismatchFailure missingIndex int extraIndex int } -func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveExactElementsMatcher) Match(actual any) (success bool, err error) { matcher.resetState() if isMap(actual) || miter.IsSeq2(actual) { @@ -108,7 +108,7 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool return success, nil } -func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveExactElementsMatcher) FailureMessage(actual any) (message string) { message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements)) if matcher.missingIndex > 0 { message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex) @@ -125,7 +125,7 @@ func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (mes return } -func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to contain elements", presentable(matcher.Elements)) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go index b5701874..a5a028e9 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go @@ -11,7 +11,7 @@ type HaveExistingFieldMatcher struct { Field string } -func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveExistingFieldMatcher) Match(actual any) (success bool, err error) { // we don't care about the field's actual value, just about any error in // trying to find the field (or method). _, err = extractField(actual, matcher.Field, "HaveExistingField") @@ -27,10 +27,10 @@ func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool return false, err } -func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveExistingFieldMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field) } -func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go index 293457e8..d9fbeaf7 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_field.go +++ b/vendor/github.com/onsi/gomega/matchers/have_field.go @@ -17,7 +17,7 @@ func (e missingFieldError) Error() string { return string(e) } -func extractField(actual interface{}, field string, matchername string) (any, error) { +func extractField(actual any, field string, matchername string) (any, error) { fields := strings.SplitN(field, ".", 2) actualValue := reflect.ValueOf(actual) @@ -68,7 +68,7 @@ func extractField(actual interface{}, field string, matchername string) (any, er type HaveFieldMatcher struct { Field string - Expected interface{} + Expected any } func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { @@ -80,7 +80,7 @@ func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher { return expectedMatcher } -func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveFieldMatcher) Match(actual any) (success bool, err error) { extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { return false, err @@ -89,7 +89,7 @@ func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err er return matcher.expectedMatcher().Match(extractedField) } -func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveFieldMatcher) FailureMessage(actual any) (message string) { extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { // this really shouldn't happen @@ -101,7 +101,7 @@ func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message str return message } -func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual any) (message string) { extractedField, err := extractField(actual, matcher.Field, "HaveField") if err != nil { // this really shouldn't happen diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go index d14d9e5f..2d561b9a 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go @@ -11,12 +11,12 @@ import ( ) type HaveHTTPBodyMatcher struct { - Expected interface{} - cachedResponse interface{} + Expected any + cachedResponse any cachedBody []byte } -func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) { +func (matcher *HaveHTTPBodyMatcher) Match(actual any) (bool, error) { body, err := matcher.body(actual) if err != nil { return false, err @@ -34,7 +34,7 @@ func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) { } } -func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual any) (message string) { body, err := matcher.body(actual) if err != nil { return fmt.Sprintf("failed to read body: %s", err) @@ -52,7 +52,7 @@ func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message } } -func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual any) (message string) { body, err := matcher.body(actual) if err != nil { return fmt.Sprintf("failed to read body: %s", err) @@ -73,7 +73,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m // body returns the body. It is cached because once we read it in Match() // the Reader is closed and it is not readable again in FailureMessage() // or NegatedFailureMessage() -func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) { +func (matcher *HaveHTTPBodyMatcher) body(actual any) ([]byte, error) { if matcher.cachedResponse == actual && matcher.cachedBody != nil { return matcher.cachedBody, nil } diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go index c256f452..75672265 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go @@ -11,10 +11,10 @@ import ( type HaveHTTPHeaderWithValueMatcher struct { Header string - Value interface{} + Value any } -func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual any) (success bool, err error) { headerValue, err := matcher.extractHeader(actual) if err != nil { return false, err @@ -28,7 +28,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (succes return headerMatcher.Match(headerValue) } -func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string { +func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual any) string { headerValue, err := matcher.extractHeader(actual) if err != nil { panic(err) // protected by Match() @@ -43,7 +43,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{} return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff) } -func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual any) (message string) { headerValue, err := matcher.extractHeader(actual) if err != nil { panic(err) // protected by Match() @@ -69,7 +69,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatc } } -func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) { +func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual any) (string, error) { switch r := actual.(type) { case *http.Response: return r.Header.Get(matcher.Header), nil diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go index 0f66e46e..8b25b3a9 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go @@ -12,10 +12,10 @@ import ( ) type HaveHTTPStatusMatcher struct { - Expected []interface{} + Expected []any } -func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveHTTPStatusMatcher) Match(actual any) (success bool, err error) { var resp *http.Response switch a := actual.(type) { case *http.Response: @@ -48,11 +48,11 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e return false, nil } -func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString()) } -func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString()) } @@ -64,7 +64,7 @@ func (matcher *HaveHTTPStatusMatcher) expectedString() string { return strings.Join(lines, "\n") } -func formatHttpResponse(input interface{}) string { +func formatHttpResponse(input any) string { var resp *http.Response switch r := input.(type) { case *http.Response: diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go index b62ee93c..9e16dcf5 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go @@ -11,10 +11,10 @@ import ( ) type HaveKeyMatcher struct { - Key interface{} + Key any } -func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) { if !isMap(actual) && !miter.IsSeq2(actual) { return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } @@ -52,7 +52,7 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro return false, nil } -func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyMatcher) FailureMessage(actual any) (message string) { switch matcher.Key.(type) { case omegaMatcher: return format.Message(actual, "to have key matching", matcher.Key) @@ -61,7 +61,7 @@ func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message strin } } -func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual any) (message string) { switch matcher.Key.(type) { case omegaMatcher: return format.Message(actual, "not to have key matching", matcher.Key) diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go index 3d608f63..1c53f1e5 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go @@ -11,11 +11,11 @@ import ( ) type HaveKeyWithValueMatcher struct { - Key interface{} - Value interface{} + Key any + Value any } -func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err error) { if !isMap(actual) && !miter.IsSeq2(actual) { return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1)) } @@ -70,7 +70,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, return false, nil } -func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual any) (message string) { str := "to have {key: value}" if _, ok := matcher.Key.(omegaMatcher); ok { str += " matching" @@ -78,12 +78,12 @@ func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (mess str += " matching" } - expect := make(map[interface{}]interface{}, 1) + expect := make(map[any]any, 1) expect[matcher.Key] = matcher.Value return format.Message(actual, str, expect) } -func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual any) (message string) { kStr := "not to have key" if _, ok := matcher.Key.(omegaMatcher); ok { kStr = "not to have key matching" diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go index ca25713f..c334d4c0 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go @@ -10,7 +10,7 @@ type HaveLenMatcher struct { Count int } -func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveLenMatcher) Match(actual any) (success bool, err error) { length, ok := lengthOf(actual) if !ok { return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1)) @@ -19,10 +19,10 @@ func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err erro return length == matcher.Count, nil } -func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveLenMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count) } -func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveLenMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go index 22a1b673..a240f1a1 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go @@ -11,7 +11,7 @@ import ( type HaveOccurredMatcher struct { } -func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveOccurredMatcher) Match(actual any) (success bool, err error) { // is purely nil? if actual == nil { return false, nil @@ -26,10 +26,10 @@ func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err return !isNil(actual), nil } -func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveOccurredMatcher) FailureMessage(actual any) (message string) { return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1)) } -func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual any) (message string) { return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred") } diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go index 1d8e8027..7987d41f 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go @@ -8,10 +8,10 @@ import ( type HavePrefixMatcher struct { Prefix string - Args []interface{} + Args []any } -func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HavePrefixMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) @@ -27,10 +27,10 @@ func (matcher *HavePrefixMatcher) prefix() string { return matcher.Prefix } -func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HavePrefixMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to have prefix", matcher.prefix()) } -func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to have prefix", matcher.prefix()) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go index 40a3526e..2aa4ceac 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go @@ -8,10 +8,10 @@ import ( type HaveSuffixMatcher struct { Suffix string - Args []interface{} + Args []any } -func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *HaveSuffixMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1)) @@ -27,10 +27,10 @@ func (matcher *HaveSuffixMatcher) suffix() string { return matcher.Suffix } -func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *HaveSuffixMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to have suffix", matcher.suffix()) } -func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to have suffix", matcher.suffix()) } diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go index f6725283..4c39e0db 100644 --- a/vendor/github.com/onsi/gomega/matchers/have_value.go +++ b/vendor/github.com/onsi/gomega/matchers/have_value.go @@ -12,10 +12,10 @@ const maxIndirections = 31 type HaveValueMatcher struct { Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value. - resolvedActual interface{} // the ("resolved") value. + resolvedActual any // the ("resolved") value. } -func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) { +func (m *HaveValueMatcher) Match(actual any) (bool, error) { val := reflect.ValueOf(actual) for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- { // return an error if value isn't valid. Please note that we cannot @@ -45,10 +45,10 @@ func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) { return false, errors.New(format.Message(actual, "too many indirections")) } -func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) { +func (m *HaveValueMatcher) FailureMessage(_ any) (message string) { return m.Matcher.FailureMessage(m.resolvedActual) } -func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) { +func (m *HaveValueMatcher) NegatedFailureMessage(_ any) (message string) { return m.Matcher.NegatedFailureMessage(m.resolvedActual) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go index c539dd38..f9d31377 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go @@ -71,14 +71,14 @@ func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) { format.Object(expected, 1)) } -func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchErrorMatcher) FailureMessage(actual any) (message string) { if matcher.isFunc { return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0])) } return format.Message(actual, "to match error", matcher.Expected) } -func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.isFunc { return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0])) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go index f962f139..331f289a 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go @@ -9,18 +9,18 @@ import ( ) type MatchJSONMatcher struct { - JSONToMatch interface{} - firstFailurePath []interface{} + JSONToMatch any + firstFailurePath []any } -func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchJSONMatcher) Match(actual any) (success bool, err error) { actualString, expectedString, err := matcher.prettyPrint(actual) if err != nil { return false, err } - var aval interface{} - var eval interface{} + var aval any + var eval any // this is guarded by prettyPrint json.Unmarshal([]byte(actualString), &aval) @@ -30,17 +30,17 @@ func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err er return equal, nil } -func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchJSONMatcher) FailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.prettyPrint(actual) return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.prettyPrint(actual) return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) { +func (matcher *MatchJSONMatcher) prettyPrint(actual any) (actualFormatted, expectedFormatted string, err error) { actualString, ok := toString(actual) if !ok { return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go index adac5db6..779be683 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go @@ -9,10 +9,10 @@ import ( type MatchRegexpMatcher struct { Regexp string - Args []interface{} + Args []any } -func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchRegexpMatcher) Match(actual any) (success bool, err error) { actualString, ok := toString(actual) if !ok { return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1)) @@ -26,11 +26,11 @@ func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err return match, nil } -func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchRegexpMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to match regular expression", matcher.regexp()) } -func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "not to match regular expression", matcher.regexp()) } diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go index 5c815f5a..f7dcaf6f 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go @@ -15,10 +15,10 @@ import ( ) type MatchXMLMatcher struct { - XMLToMatch interface{} + XMLToMatch any } -func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchXMLMatcher) Match(actual any) (success bool, err error) { actualString, expectedString, err := matcher.formattedPrint(actual) if err != nil { return false, err @@ -37,17 +37,17 @@ func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err err return reflect.DeepEqual(aval, eval), nil } -func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchXMLMatcher) FailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.formattedPrint(actual) return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString) } -func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.formattedPrint(actual) return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString) } -func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) { +func (matcher *MatchXMLMatcher) formattedPrint(actual any) (actualString, expectedString string, err error) { var ok bool actualString, ok = toString(actual) if !ok { diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go index 2cb6b47d..95057c26 100644 --- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go @@ -9,18 +9,18 @@ import ( ) type MatchYAMLMatcher struct { - YAMLToMatch interface{} - firstFailurePath []interface{} + YAMLToMatch any + firstFailurePath []any } -func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *MatchYAMLMatcher) Match(actual any) (success bool, err error) { actualString, expectedString, err := matcher.toStrings(actual) if err != nil { return false, err } - var aval interface{} - var eval interface{} + var aval any + var eval any if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil { return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err) @@ -34,23 +34,23 @@ func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err er return equal, nil } -func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *MatchYAMLMatcher) FailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.toNormalisedStrings(actual) return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual any) (message string) { actualString, expectedString, _ := matcher.toNormalisedStrings(actual) return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath) } -func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { +func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual any) (actualFormatted, expectedFormatted string, err error) { actualString, expectedString, err := matcher.toStrings(actual) return normalise(actualString), normalise(expectedString), err } func normalise(input string) string { - var val interface{} + var val any err := yaml.Unmarshal([]byte(input), &val) if err != nil { panic(err) // unreachable since Match already calls Unmarshal @@ -62,7 +62,7 @@ func normalise(input string) string { return strings.TrimSpace(string(output)) } -func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) { +func (matcher *MatchYAMLMatcher) toStrings(actual any) (actualFormatted, expectedFormatted string, err error) { actualString, ok := toString(actual) if !ok { return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1)) diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go index 78b71910..c598b789 100644 --- a/vendor/github.com/onsi/gomega/matchers/not.go +++ b/vendor/github.com/onsi/gomega/matchers/not.go @@ -8,7 +8,7 @@ type NotMatcher struct { Matcher types.GomegaMatcher } -func (m *NotMatcher) Match(actual interface{}) (bool, error) { +func (m *NotMatcher) Match(actual any) (bool, error) { success, err := m.Matcher.Match(actual) if err != nil { return false, err @@ -16,14 +16,14 @@ func (m *NotMatcher) Match(actual interface{}) (bool, error) { return !success, nil } -func (m *NotMatcher) FailureMessage(actual interface{}) (message string) { +func (m *NotMatcher) FailureMessage(actual any) (message string) { return m.Matcher.NegatedFailureMessage(actual) // works beautifully } -func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *NotMatcher) NegatedFailureMessage(actual any) (message string) { return m.Matcher.FailureMessage(actual) // works beautifully } -func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *NotMatcher) MatchMayChangeInTheFuture(actual any) bool { return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value } diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go index 841ae26a..6578404b 100644 --- a/vendor/github.com/onsi/gomega/matchers/or.go +++ b/vendor/github.com/onsi/gomega/matchers/or.go @@ -14,7 +14,7 @@ type OrMatcher struct { firstSuccessfulMatcher types.GomegaMatcher } -func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { +func (m *OrMatcher) Match(actual any) (success bool, err error) { m.firstSuccessfulMatcher = nil for _, matcher := range m.Matchers { success, err := matcher.Match(actual) @@ -29,16 +29,16 @@ func (m *OrMatcher) Match(actual interface{}) (success bool, err error) { return false, nil } -func (m *OrMatcher) FailureMessage(actual interface{}) (message string) { +func (m *OrMatcher) FailureMessage(actual any) (message string) { // not the most beautiful list of matchers, but not bad either... return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers)) } -func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *OrMatcher) NegatedFailureMessage(actual any) (message string) { return m.firstSuccessfulMatcher.NegatedFailureMessage(actual) } -func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (m *OrMatcher) MatchMayChangeInTheFuture(actual any) bool { /* Example with 3 matchers: A, B, C diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go index adc8cee6..8be5a7cc 100644 --- a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go @@ -8,11 +8,11 @@ import ( ) type PanicMatcher struct { - Expected interface{} - object interface{} + Expected any + object any } -func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *PanicMatcher) Match(actual any) (success bool, err error) { if actual == nil { return false, fmt.Errorf("PanicMatcher expects a non-nil actual.") } @@ -52,7 +52,7 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) return } -func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *PanicMatcher) FailureMessage(actual any) (message string) { if matcher.Expected == nil { // We wanted any panic to occur, but none did. return format.Message(actual, "to panic") @@ -91,7 +91,7 @@ func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) } } -func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *PanicMatcher) NegatedFailureMessage(actual any) (message string) { if matcher.Expected == nil { // We didn't want any panic to occur, but one did. return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1))) diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go index 948164ea..1d9f61d6 100644 --- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go @@ -11,12 +11,12 @@ import ( ) type ReceiveMatcher struct { - Args []interface{} + Args []any receivedValue reflect.Value channelClosed bool } -func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *ReceiveMatcher) Match(actual any) (success bool, err error) { if !isChan(actual) { return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1)) } @@ -30,7 +30,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro var subMatcher omegaMatcher var hasSubMatcher bool - var resultReference interface{} + var resultReference any // Valid arg formats are as follows, always with optional POINTER before // optional MATCHER: @@ -115,8 +115,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro return false, nil } -func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) { - var matcherArg interface{} +func (matcher *ReceiveMatcher) FailureMessage(actual any) (message string) { + var matcherArg any if len(matcher.Args) > 0 { matcherArg = matcher.Args[len(matcher.Args)-1] } @@ -136,8 +136,8 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin return format.Message(actual, "to receive something."+closedAddendum) } -func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) { - var matcherArg interface{} +func (matcher *ReceiveMatcher) NegatedFailureMessage(actual any) (message string) { + var matcherArg any if len(matcher.Args) > 0 { matcherArg = matcher.Args[len(matcher.Args)-1] } @@ -157,7 +157,7 @@ func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (messag return format.Message(actual, "not to receive anything."+closedAddendum) } -func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool { +func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual any) bool { if !isChan(actual) { return false } diff --git a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go index ec68fe8b..2adc4825 100644 --- a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go @@ -8,13 +8,13 @@ import ( ) type SatisfyMatcher struct { - Predicate interface{} + Predicate any // cached type predicateArgType reflect.Type } -func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher { +func NewSatisfyMatcher(predicate any) *SatisfyMatcher { if predicate == nil { panic("predicate cannot be nil") } @@ -35,7 +35,7 @@ func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher { } } -func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) { +func (m *SatisfyMatcher) Match(actual any) (success bool, err error) { // prepare a parameter to pass to the predicate var param reflect.Value if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) { @@ -57,10 +57,10 @@ func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) { return result[0].Bool(), nil } -func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) { +func (m *SatisfyMatcher) FailureMessage(actual any) (message string) { return format.Message(actual, "to satisfy predicate", m.Predicate) } -func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (m *SatisfyMatcher) NegatedFailureMessage(actual any) (message string) { return format.Message(actual, "to not satisfy predicate", m.Predicate) } diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go index 1369c1e8..30dd58f4 100644 --- a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go +++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go @@ -8,7 +8,7 @@ import ( "strings" ) -func formattedMessage(comparisonMessage string, failurePath []interface{}) string { +func formattedMessage(comparisonMessage string, failurePath []any) string { var diffMessage string if len(failurePath) == 0 { diffMessage = "" @@ -18,7 +18,7 @@ func formattedMessage(comparisonMessage string, failurePath []interface{}) strin return fmt.Sprintf("%s%s", comparisonMessage, diffMessage) } -func formattedFailurePath(failurePath []interface{}) string { +func formattedFailurePath(failurePath []any) string { formattedPaths := []string{} for i := len(failurePath) - 1; i >= 0; i-- { switch p := failurePath[i].(type) { @@ -34,33 +34,33 @@ func formattedFailurePath(failurePath []interface{}) string { return strings.Join(formattedPaths, "") } -func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { - var errorPath []interface{} +func deepEqual(a any, b any) (bool, []any) { + var errorPath []any if reflect.TypeOf(a) != reflect.TypeOf(b) { return false, errorPath } switch a.(type) { - case []interface{}: - if len(a.([]interface{})) != len(b.([]interface{})) { + case []any: + if len(a.([]any)) != len(b.([]any)) { return false, errorPath } - for i, v := range a.([]interface{}) { - elementEqual, keyPath := deepEqual(v, b.([]interface{})[i]) + for i, v := range a.([]any) { + elementEqual, keyPath := deepEqual(v, b.([]any)[i]) if !elementEqual { return false, append(keyPath, i) } } return true, errorPath - case map[interface{}]interface{}: - if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) { + case map[any]any: + if len(a.(map[any]any)) != len(b.(map[any]any)) { return false, errorPath } - for k, v1 := range a.(map[interface{}]interface{}) { - v2, ok := b.(map[interface{}]interface{})[k] + for k, v1 := range a.(map[any]any) { + v2, ok := b.(map[any]any)[k] if !ok { return false, errorPath } @@ -71,13 +71,13 @@ func deepEqual(a interface{}, b interface{}) (bool, []interface{}) { } return true, errorPath - case map[string]interface{}: - if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) { + case map[string]any: + if len(a.(map[string]any)) != len(b.(map[string]any)) { return false, errorPath } - for k, v1 := range a.(map[string]interface{}) { - v2, ok := b.(map[string]interface{})[k] + for k, v1 := range a.(map[string]any) { + v2, ok := b.(map[string]any)[k] if !ok { return false, errorPath } diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go index 327350f7..f0b2c4aa 100644 --- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go +++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go @@ -14,7 +14,7 @@ type formattedGomegaError interface { type SucceedMatcher struct { } -func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) { +func (matcher *SucceedMatcher) Match(actual any) (success bool, err error) { // is purely nil? if actual == nil { return true, nil @@ -29,7 +29,7 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro return isNil(actual), nil } -func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) { +func (matcher *SucceedMatcher) FailureMessage(actual any) (message string) { var fgErr formattedGomegaError if errors.As(actual.(error), &fgErr) { return fgErr.FormattedGomegaError() @@ -37,6 +37,6 @@ func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message strin return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1)) } -func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) { +func (matcher *SucceedMatcher) NegatedFailureMessage(actual any) (message string) { return "Expected failure, but got no error." } diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go index 830e3082..0d78779d 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go @@ -11,7 +11,7 @@ type BipartiteGraph struct { Edges EdgeSet } -func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) { +func NewBipartiteGraph(leftValues, rightValues []any, neighbours func(any, any) (bool, error)) (*BipartiteGraph, error) { left := NodeOrderedSet{} for i, v := range leftValues { left = append(left, Node{ID: i, Value: v}) @@ -41,7 +41,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in // FreeLeftRight returns left node values and right node values // of the BipartiteGraph's nodes which are not part of the given edges. -func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) { +func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []any) { for _, node := range bg.Left { if edges.Free(node) { leftValues = append(leftValues, node.Value) diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go index cd597a2f..66d3578d 100644 --- a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go +++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go @@ -2,7 +2,7 @@ package node type Node struct { ID int - Value interface{} + Value any } type NodeOrderedSet []Node diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go index b9440ac7..d020dedc 100644 --- a/vendor/github.com/onsi/gomega/matchers/type_support.go +++ b/vendor/github.com/onsi/gomega/matchers/type_support.go @@ -20,16 +20,16 @@ import ( ) type omegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) + Match(actual any) (success bool, err error) + FailureMessage(actual any) (message string) + NegatedFailureMessage(actual any) (message string) } -func isBool(a interface{}) bool { +func isBool(a any) bool { return reflect.TypeOf(a).Kind() == reflect.Bool } -func isNumber(a interface{}) bool { +func isNumber(a any) bool { if a == nil { return false } @@ -37,22 +37,22 @@ func isNumber(a interface{}) bool { return reflect.Int <= kind && kind <= reflect.Float64 } -func isInteger(a interface{}) bool { +func isInteger(a any) bool { kind := reflect.TypeOf(a).Kind() return reflect.Int <= kind && kind <= reflect.Int64 } -func isUnsignedInteger(a interface{}) bool { +func isUnsignedInteger(a any) bool { kind := reflect.TypeOf(a).Kind() return reflect.Uint <= kind && kind <= reflect.Uint64 } -func isFloat(a interface{}) bool { +func isFloat(a any) bool { kind := reflect.TypeOf(a).Kind() return reflect.Float32 <= kind && kind <= reflect.Float64 } -func toInteger(a interface{}) int64 { +func toInteger(a any) int64 { if isInteger(a) { return reflect.ValueOf(a).Int() } else if isUnsignedInteger(a) { @@ -63,7 +63,7 @@ func toInteger(a interface{}) int64 { panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } -func toUnsignedInteger(a interface{}) uint64 { +func toUnsignedInteger(a any) uint64 { if isInteger(a) { return uint64(reflect.ValueOf(a).Int()) } else if isUnsignedInteger(a) { @@ -74,7 +74,7 @@ func toUnsignedInteger(a interface{}) uint64 { panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } -func toFloat(a interface{}) float64 { +func toFloat(a any) float64 { if isInteger(a) { return float64(reflect.ValueOf(a).Int()) } else if isUnsignedInteger(a) { @@ -85,26 +85,26 @@ func toFloat(a interface{}) float64 { panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a)) } -func isError(a interface{}) bool { +func isError(a any) bool { _, ok := a.(error) return ok } -func isChan(a interface{}) bool { +func isChan(a any) bool { if isNil(a) { return false } return reflect.TypeOf(a).Kind() == reflect.Chan } -func isMap(a interface{}) bool { +func isMap(a any) bool { if a == nil { return false } return reflect.TypeOf(a).Kind() == reflect.Map } -func isArrayOrSlice(a interface{}) bool { +func isArrayOrSlice(a any) bool { if a == nil { return false } @@ -116,14 +116,14 @@ func isArrayOrSlice(a interface{}) bool { } } -func isString(a interface{}) bool { +func isString(a any) bool { if a == nil { return false } return reflect.TypeOf(a).Kind() == reflect.String } -func toString(a interface{}) (string, bool) { +func toString(a any) (string, bool) { aString, isString := a.(string) if isString { return aString, true @@ -147,7 +147,7 @@ func toString(a interface{}) (string, bool) { return "", false } -func lengthOf(a interface{}) (int, bool) { +func lengthOf(a any) (int, bool) { if a == nil { return 0, false } @@ -169,7 +169,7 @@ func lengthOf(a interface{}) (int, bool) { return 0, false } } -func capOf(a interface{}) (int, bool) { +func capOf(a any) (int, bool) { if a == nil { return 0, false } @@ -181,7 +181,7 @@ func capOf(a interface{}) (int, bool) { } } -func isNil(a interface{}) bool { +func isNil(a any) bool { if a == nil { return true } diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go index 6f743b1b..6231c3b4 100644 --- a/vendor/github.com/onsi/gomega/matchers/with_transform.go +++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go @@ -9,20 +9,20 @@ import ( type WithTransformMatcher struct { // input - Transform interface{} // must be a function of one parameter that returns one value and an optional error + Transform any // must be a function of one parameter that returns one value and an optional error Matcher types.GomegaMatcher // cached value transformArgType reflect.Type // state - transformedValue interface{} + transformedValue any } // reflect.Type for error var errorT = reflect.TypeOf((*error)(nil)).Elem() -func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher { +func NewWithTransformMatcher(transform any, matcher types.GomegaMatcher) *WithTransformMatcher { if transform == nil { panic("transform function cannot be nil") } @@ -43,7 +43,7 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) } } -func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { +func (m *WithTransformMatcher) Match(actual any) (bool, error) { // prepare a parameter to pass to the Transform function var param reflect.Value if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) { @@ -72,15 +72,15 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) { return m.Matcher.Match(m.transformedValue) } -func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) { +func (m *WithTransformMatcher) FailureMessage(_ any) (message string) { return m.Matcher.FailureMessage(m.transformedValue) } -func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) { +func (m *WithTransformMatcher) NegatedFailureMessage(_ any) (message string) { return m.Matcher.NegatedFailureMessage(m.transformedValue) } -func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool { +func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ any) bool { // TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.) // // Querying the next matcher is fine if the transformer always will return the same value. diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go index 30f2beed..685a46f3 100644 --- a/vendor/github.com/onsi/gomega/types/types.go +++ b/vendor/github.com/onsi/gomega/types/types.go @@ -10,20 +10,20 @@ type GomegaFailHandler func(message string, callerSkip ...int) // A simple *testing.T interface wrapper type GomegaTestingT interface { Helper() - Fatalf(format string, args ...interface{}) + Fatalf(format string, args ...any) } -// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers +// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers type Gomega interface { - Ω(actual interface{}, extra ...interface{}) Assertion - Expect(actual interface{}, extra ...interface{}) Assertion - ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion + Ω(actual any, extra ...any) Assertion + Expect(actual any, extra ...any) Assertion + ExpectWithOffset(offset int, actual any, extra ...any) Assertion - Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Eventually(actualOrCtx any, args ...any) AsyncAssertion + EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion - Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion - ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion + Consistently(actualOrCtx any, args ...any) AsyncAssertion + ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion SetDefaultEventuallyTimeout(time.Duration) SetDefaultEventuallyPollingInterval(time.Duration) @@ -37,9 +37,9 @@ type Gomega interface { // // For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers type GomegaMatcher interface { - Match(actual interface{}) (success bool, err error) - FailureMessage(actual interface{}) (message string) - NegatedFailureMessage(actual interface{}) (message string) + Match(actual any) (success bool, err error) + FailureMessage(actual any) (message string) + NegatedFailureMessage(actual any) (message string) } /* @@ -52,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore. */ type OracleMatcher interface { - MatchMayChangeInTheFuture(actual interface{}) bool + MatchMayChangeInTheFuture(actual any) bool } -func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { +func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool { oracleMatcher, ok := matcher.(OracleMatcher) if !ok { return true @@ -67,8 +67,13 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool { // AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure // they are eventually satisfied type AsyncAssertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool + + // equivalent to above + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) AsyncAssertion WithTimeout(interval time.Duration) AsyncAssertion @@ -76,18 +81,18 @@ type AsyncAssertion interface { Within(timeout time.Duration) AsyncAssertion ProbeEvery(interval time.Duration) AsyncAssertion WithContext(ctx context.Context) AsyncAssertion - WithArguments(argsToForward ...interface{}) AsyncAssertion + WithArguments(argsToForward ...any) AsyncAssertion MustPassRepeatedly(count int) AsyncAssertion } // Assertions are returned by Ω and Expect and enable assertions against Gomega matchers type Assertion interface { - Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool + Should(matcher GomegaMatcher, optionalDescription ...any) bool + ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool - To(matcher GomegaMatcher, optionalDescription ...interface{}) bool - ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool - NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool + To(matcher GomegaMatcher, optionalDescription ...any) bool + ToNot(matcher GomegaMatcher, optionalDescription ...any) bool + NotTo(matcher GomegaMatcher, optionalDescription ...any) bool WithOffset(offset int) Assertion diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE deleted file mode 100644 index 6a66aea5..00000000 --- a/vendor/golang.org/x/exp/LICENSE +++ /dev/null @@ -1,27 +0,0 @@ -Copyright (c) 2009 The Go Authors. All rights reserved. - -Redistribution and use in source and binary forms, with or without -modification, are permitted provided that the following conditions are -met: - - * Redistributions of source code must retain the above copyright -notice, this list of conditions and the following disclaimer. - * Redistributions in binary form must reproduce the above -copyright notice, this list of conditions and the following disclaimer -in the documentation and/or other materials provided with the -distribution. - * Neither the name of Google Inc. nor the names of its -contributors may be used to endorse or promote products derived from -this software without specific prior written permission. - -THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR -A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT -OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, -SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT -LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY -THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT -(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE -OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS deleted file mode 100644 index 73309904..00000000 --- a/vendor/golang.org/x/exp/PATENTS +++ /dev/null @@ -1,22 +0,0 @@ -Additional IP Rights Grant (Patents) - -"This implementation" means the copyrightable works distributed by -Google as part of the Go project. - -Google hereby grants to You a perpetual, worldwide, non-exclusive, -no-charge, royalty-free, irrevocable (except as stated in this section) -patent license to make, have made, use, offer to sell, sell, import, -transfer and otherwise run, modify and propagate the contents of this -implementation of Go, where such license applies only to those patent -claims, both currently owned or controlled by Google and acquired in -the future, licensable by Google that are necessarily infringed by this -implementation of Go. This grant does not include claims that would be -infringed only as a consequence of further modification of this -implementation. If you or your agent or exclusive licensee institute or -order or agree to the institution of patent litigation against any -entity (including a cross-claim or counterclaim in a lawsuit) alleging -that this implementation of Go or any code incorporated within this -implementation of Go constitutes direct or contributory patent -infringement, or inducement of patent infringement, then any patent -rights granted to you under this License for this implementation of Go -shall terminate as of the date such litigation is filed. diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go deleted file mode 100644 index 2c033dff..00000000 --- a/vendor/golang.org/x/exp/constraints/constraints.go +++ /dev/null @@ -1,50 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package constraints defines a set of useful constraints to be used -// with type parameters. -package constraints - -// Signed is a constraint that permits any signed integer type. -// If future releases of Go add new predeclared signed integer types, -// this constraint will be modified to include them. -type Signed interface { - ~int | ~int8 | ~int16 | ~int32 | ~int64 -} - -// Unsigned is a constraint that permits any unsigned integer type. -// If future releases of Go add new predeclared unsigned integer types, -// this constraint will be modified to include them. -type Unsigned interface { - ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr -} - -// Integer is a constraint that permits any integer type. -// If future releases of Go add new predeclared integer types, -// this constraint will be modified to include them. -type Integer interface { - Signed | Unsigned -} - -// Float is a constraint that permits any floating-point type. -// If future releases of Go add new predeclared floating-point types, -// this constraint will be modified to include them. -type Float interface { - ~float32 | ~float64 -} - -// Complex is a constraint that permits any complex numeric type. -// If future releases of Go add new predeclared complex numeric types, -// this constraint will be modified to include them. -type Complex interface { - ~complex64 | ~complex128 -} - -// Ordered is a constraint that permits any ordered type: any type -// that supports the operators < <= >= >. -// If future releases of Go add new ordered types, -// this constraint will be modified to include them. -type Ordered interface { - Integer | Float | ~string -} diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go deleted file mode 100644 index ecc0dabb..00000000 --- a/vendor/golang.org/x/exp/maps/maps.go +++ /dev/null @@ -1,94 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package maps defines various functions useful with maps of any type. -package maps - -// Keys returns the keys of the map m. -// The keys will be in an indeterminate order. -func Keys[M ~map[K]V, K comparable, V any](m M) []K { - r := make([]K, 0, len(m)) - for k := range m { - r = append(r, k) - } - return r -} - -// Values returns the values of the map m. -// The values will be in an indeterminate order. -func Values[M ~map[K]V, K comparable, V any](m M) []V { - r := make([]V, 0, len(m)) - for _, v := range m { - r = append(r, v) - } - return r -} - -// Equal reports whether two maps contain the same key/value pairs. -// Values are compared using ==. -func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || v1 != v2 { - return false - } - } - return true -} - -// EqualFunc is like Equal, but compares values using eq. -// Keys are still compared with ==. -func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { - if len(m1) != len(m2) { - return false - } - for k, v1 := range m1 { - if v2, ok := m2[k]; !ok || !eq(v1, v2) { - return false - } - } - return true -} - -// Clear removes all entries from m, leaving it empty. -func Clear[M ~map[K]V, K comparable, V any](m M) { - for k := range m { - delete(m, k) - } -} - -// Clone returns a copy of m. This is a shallow clone: -// the new keys and values are set using ordinary assignment. -func Clone[M ~map[K]V, K comparable, V any](m M) M { - // Preserve nil in case it matters. - if m == nil { - return nil - } - r := make(M, len(m)) - for k, v := range m { - r[k] = v - } - return r -} - -// Copy copies all key/value pairs in src adding them to dst. -// When a key in src is already present in dst, -// the value in dst will be overwritten by the value associated -// with the key in src. -func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { - for k, v := range src { - dst[k] = v - } -} - -// DeleteFunc deletes any key/value pairs from m for which del returns true. -func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { - for k, v := range m { - if del(k, v) { - delete(m, k) - } - } -} diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go deleted file mode 100644 index fbf1934a..00000000 --- a/vendor/golang.org/x/exp/slices/cmp.go +++ /dev/null @@ -1,44 +0,0 @@ -// Copyright 2023 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// min is a version of the predeclared function from the Go 1.21 release. -func min[T constraints.Ordered](a, b T) T { - if a < b || isNaN(a) { - return a - } - return b -} - -// max is a version of the predeclared function from the Go 1.21 release. -func max[T constraints.Ordered](a, b T) T { - if a > b || isNaN(a) { - return a - } - return b -} - -// cmpLess is a copy of cmp.Less from the Go 1.21 release. -func cmpLess[T constraints.Ordered](x, y T) bool { - return (isNaN(x) && !isNaN(y)) || x < y -} - -// cmpCompare is a copy of cmp.Compare from the Go 1.21 release. -func cmpCompare[T constraints.Ordered](x, y T) int { - xNaN := isNaN(x) - yNaN := isNaN(y) - if xNaN && yNaN { - return 0 - } - if xNaN || x < y { - return -1 - } - if yNaN || x > y { - return +1 - } - return 0 -} diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go deleted file mode 100644 index 46ceac34..00000000 --- a/vendor/golang.org/x/exp/slices/slices.go +++ /dev/null @@ -1,515 +0,0 @@ -// Copyright 2021 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -// Package slices defines various functions useful with slices of any type. -package slices - -import ( - "unsafe" - - "golang.org/x/exp/constraints" -) - -// Equal reports whether two slices are equal: the same length and all -// elements equal. If the lengths are different, Equal returns false. -// Otherwise, the elements are compared in increasing index order, and the -// comparison stops at the first unequal pair. -// Floating point NaNs are not considered equal. -func Equal[S ~[]E, E comparable](s1, s2 S) bool { - if len(s1) != len(s2) { - return false - } - for i := range s1 { - if s1[i] != s2[i] { - return false - } - } - return true -} - -// EqualFunc reports whether two slices are equal using an equality -// function on each pair of elements. If the lengths are different, -// EqualFunc returns false. Otherwise, the elements are compared in -// increasing index order, and the comparison stops at the first index -// for which eq returns false. -func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool { - if len(s1) != len(s2) { - return false - } - for i, v1 := range s1 { - v2 := s2[i] - if !eq(v1, v2) { - return false - } - } - return true -} - -// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair -// of elements. The elements are compared sequentially, starting at index 0, -// until one element is not equal to the other. -// The result of comparing the first non-matching elements is returned. -// If both slices are equal until one of them ends, the shorter slice is -// considered less than the longer one. -// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2. -func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmpCompare(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 -} - -// CompareFunc is like [Compare] but uses a custom comparison function on each -// pair of elements. -// The result is the first non-zero result of cmp; if cmp always -// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2), -// and +1 if len(s1) > len(s2). -func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int { - for i, v1 := range s1 { - if i >= len(s2) { - return +1 - } - v2 := s2[i] - if c := cmp(v1, v2); c != 0 { - return c - } - } - if len(s1) < len(s2) { - return -1 - } - return 0 -} - -// Index returns the index of the first occurrence of v in s, -// or -1 if not present. -func Index[S ~[]E, E comparable](s S, v E) int { - for i := range s { - if v == s[i] { - return i - } - } - return -1 -} - -// IndexFunc returns the first index i satisfying f(s[i]), -// or -1 if none do. -func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { - for i := range s { - if f(s[i]) { - return i - } - } - return -1 -} - -// Contains reports whether v is present in s. -func Contains[S ~[]E, E comparable](s S, v E) bool { - return Index(s, v) >= 0 -} - -// ContainsFunc reports whether at least one -// element e of s satisfies f(e). -func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { - return IndexFunc(s, f) >= 0 -} - -// Insert inserts the values v... into s at index i, -// returning the modified slice. -// The elements at s[i:] are shifted up to make room. -// In the returned slice r, r[i] == v[0], -// and r[i+len(v)] == value originally at r[i]. -// Insert panics if i is out of range. -// This function is O(len(s) + len(v)). -func Insert[S ~[]E, E any](s S, i int, v ...E) S { - m := len(v) - if m == 0 { - return s - } - n := len(s) - if i == n { - return append(s, v...) - } - if n+m > cap(s) { - // Use append rather than make so that we bump the size of - // the slice up to the next storage class. - // This is what Grow does but we don't call Grow because - // that might copy the values twice. - s2 := append(s[:i], make(S, n+m-i)...) - copy(s2[i:], v) - copy(s2[i+m:], s[i:]) - return s2 - } - s = s[:n+m] - - // before: - // s: aaaaaaaabbbbccccccccdddd - // ^ ^ ^ ^ - // i i+m n n+m - // after: - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // - // a are the values that don't move in s. - // v are the values copied in from v. - // b and c are the values from s that are shifted up in index. - // d are the values that get overwritten, never to be seen again. - - if !overlaps(v, s[i+m:]) { - // Easy case - v does not overlap either the c or d regions. - // (It might be in some of a or b, or elsewhere entirely.) - // The data we copy up doesn't write to v at all, so just do it. - - copy(s[i+m:], s[i:]) - - // Now we have - // s: aaaaaaaabbbbbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // Note the b values are duplicated. - - copy(s[i:], v) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s - } - - // The hard case - v overlaps c or d. We can't just shift up - // the data because we'd move or clobber the values we're trying - // to insert. - // So instead, write v on top of d, then rotate. - copy(s[n:], v) - - // Now we have - // s: aaaaaaaabbbbccccccccvvvv - // ^ ^ ^ ^ - // i i+m n n+m - - rotateRight(s[i:], m) - - // Now we have - // s: aaaaaaaavvvvbbbbcccccccc - // ^ ^ ^ ^ - // i i+m n n+m - // That's the result we want. - return s -} - -// clearSlice sets all elements up to the length of s to the zero value of E. -// We may use the builtin clear func instead, and remove clearSlice, when upgrading -// to Go 1.21+. -func clearSlice[S ~[]E, E any](s S) { - var zero E - for i := range s { - s[i] = zero - } -} - -// Delete removes the elements s[i:j] from s, returning the modified slice. -// Delete panics if j > len(s) or s[i:j] is not a valid slice of s. -// Delete is O(len(s)-i), so if many items must be deleted, it is better to -// make a single call deleting them all together than to delete one at a time. -// Delete zeroes the elements s[len(s)-(j-i):len(s)]. -func Delete[S ~[]E, E any](s S, i, j int) S { - _ = s[i:j:len(s)] // bounds check - - if i == j { - return s - } - - oldlen := len(s) - s = append(s[:i], s[j:]...) - clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC - return s -} - -// DeleteFunc removes any elements from s for which del returns true, -// returning the modified slice. -// DeleteFunc zeroes the elements between the new length and the original length. -func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { - i := IndexFunc(s, del) - if i == -1 { - return s - } - // Don't start copying elements until we find one to delete. - for j := i + 1; j < len(s); j++ { - if v := s[j]; !del(v) { - s[i] = v - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Replace replaces the elements s[i:j] by the given v, and returns the -// modified slice. Replace panics if s[i:j] is not a valid slice of s. -// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length. -func Replace[S ~[]E, E any](s S, i, j int, v ...E) S { - _ = s[i:j] // verify that i:j is a valid subslice - - if i == j { - return Insert(s, i, v...) - } - if j == len(s) { - return append(s[:i], v...) - } - - tot := len(s[:i]) + len(v) + len(s[j:]) - if tot > cap(s) { - // Too big to fit, allocate and copy over. - s2 := append(s[:i], make(S, tot-i)...) // See Insert - copy(s2[i:], v) - copy(s2[i+len(v):], s[j:]) - return s2 - } - - r := s[:tot] - - if i+len(v) <= j { - // Easy, as v fits in the deleted portion. - copy(r[i:], v) - if i+len(v) != j { - copy(r[i+len(v):], s[j:]) - } - clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC - return r - } - - // We are expanding (v is bigger than j-i). - // The situation is something like this: - // (example has i=4,j=8,len(s)=16,len(v)=6) - // s: aaaaxxxxbbbbbbbbyy - // ^ ^ ^ ^ - // i j len(s) tot - // a: prefix of s - // x: deleted range - // b: more of s - // y: area to expand into - - if !overlaps(r[i+len(v):], v) { - // Easy, as v is not clobbered by the first copy. - copy(r[i+len(v):], s[j:]) - copy(r[i:], v) - return r - } - - // This is a situation where we don't have a single place to which - // we can copy v. Parts of it need to go to two different places. - // We want to copy the prefix of v into y and the suffix into x, then - // rotate |y| spots to the right. - // - // v[2:] v[:2] - // | | - // s: aaaavvvvbbbbbbbbvv - // ^ ^ ^ ^ - // i j len(s) tot - // - // If either of those two destinations don't alias v, then we're good. - y := len(v) - (j - i) // length of y portion - - if !overlaps(r[i:j], v) { - copy(r[i:j], v[y:]) - copy(r[len(s):], v[:y]) - rotateRight(r[i:], y) - return r - } - if !overlaps(r[len(s):], v) { - copy(r[len(s):], v[:y]) - copy(r[i:j], v[y:]) - rotateRight(r[i:], y) - return r - } - - // Now we know that v overlaps both x and y. - // That means that the entirety of b is *inside* v. - // So we don't need to preserve b at all; instead we - // can copy v first, then copy the b part of v out of - // v to the right destination. - k := startIdx(v, s[j:]) - copy(r[i:], v) - copy(r[i+len(v):], r[i+k:]) - return r -} - -// Clone returns a copy of the slice. -// The elements are copied using assignment, so this is a shallow clone. -func Clone[S ~[]E, E any](s S) S { - // Preserve nil in case it matters. - if s == nil { - return nil - } - return append(S([]E{}), s...) -} - -// Compact replaces consecutive runs of equal elements with a single copy. -// This is like the uniq command found on Unix. -// Compact modifies the contents of the slice s and returns the modified slice, -// which may have a smaller length. -// Compact zeroes the elements between the new length and the original length. -func Compact[S ~[]E, E comparable](s S) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if s[k] != s[k-1] { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// CompactFunc is like [Compact] but uses an equality function to compare elements. -// For runs of elements that compare equal, CompactFunc keeps the first one. -// CompactFunc zeroes the elements between the new length and the original length. -func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S { - if len(s) < 2 { - return s - } - i := 1 - for k := 1; k < len(s); k++ { - if !eq(s[k], s[k-1]) { - if i != k { - s[i] = s[k] - } - i++ - } - } - clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC - return s[:i] -} - -// Grow increases the slice's capacity, if necessary, to guarantee space for -// another n elements. After Grow(n), at least n elements can be appended -// to the slice without another allocation. If n is negative or too large to -// allocate the memory, Grow panics. -func Grow[S ~[]E, E any](s S, n int) S { - if n < 0 { - panic("cannot be negative") - } - if n -= cap(s) - len(s); n > 0 { - // TODO(https://go.dev/issue/53888): Make using []E instead of S - // to workaround a compiler bug where the runtime.growslice optimization - // does not take effect. Revert when the compiler is fixed. - s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)] - } - return s -} - -// Clip removes unused capacity from the slice, returning s[:len(s):len(s)]. -func Clip[S ~[]E, E any](s S) S { - return s[:len(s):len(s)] -} - -// Rotation algorithm explanation: -// -// rotate left by 2 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join first parts -// 89234567 01 -// recursively rotate first left part by 2 -// 23456789 01 -// join at the end -// 2345678901 -// -// rotate left by 8 -// start with -// 0123456789 -// split up like this -// 01 234567 89 -// swap first 2 and last 2 -// 89 234567 01 -// join last parts -// 89 23456701 -// recursively rotate second part left by 6 -// 89 01234567 -// join at the end -// 8901234567 - -// TODO: There are other rotate algorithms. -// This algorithm has the desirable property that it moves each element exactly twice. -// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes. -// The follow-cycles algorithm can be 1-write but it is not very cache friendly. - -// rotateLeft rotates b left by n spaces. -// s_final[i] = s_orig[i+r], wrapping around. -func rotateLeft[E any](s []E, r int) { - for r != 0 && r != len(s) { - if r*2 <= len(s) { - swap(s[:r], s[len(s)-r:]) - s = s[:len(s)-r] - } else { - swap(s[:len(s)-r], s[r:]) - s, r = s[len(s)-r:], r*2-len(s) - } - } -} -func rotateRight[E any](s []E, r int) { - rotateLeft(s, len(s)-r) -} - -// swap swaps the contents of x and y. x and y must be equal length and disjoint. -func swap[E any](x, y []E) { - for i := 0; i < len(x); i++ { - x[i], y[i] = y[i], x[i] - } -} - -// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap. -func overlaps[E any](a, b []E) bool { - if len(a) == 0 || len(b) == 0 { - return false - } - elemSize := unsafe.Sizeof(a[0]) - if elemSize == 0 { - return false - } - // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445. - // Also see crypto/internal/alias/alias.go:AnyOverlap - return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) && - uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1) -} - -// startIdx returns the index in haystack where the needle starts. -// prerequisite: the needle must be aliased entirely inside the haystack. -func startIdx[E any](haystack, needle []E) int { - p := &needle[0] - for i := range haystack { - if p == &haystack[i] { - return i - } - } - // TODO: what if the overlap is by a non-integral number of Es? - panic("needle not found") -} - -// Reverse reverses the elements of the slice in place. -func Reverse[S ~[]E, E any](s S) { - for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 { - s[i], s[j] = s[j], s[i] - } -} diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go deleted file mode 100644 index f58bbc7b..00000000 --- a/vendor/golang.org/x/exp/slices/sort.go +++ /dev/null @@ -1,197 +0,0 @@ -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp - -package slices - -import ( - "math/bits" - - "golang.org/x/exp/constraints" -) - -// Sort sorts a slice of any ordered type in ascending order. -// When sorting floating-point numbers, NaNs are ordered before other values. -func Sort[S ~[]E, E constraints.Ordered](x S) { - n := len(x) - pdqsortOrdered(x, 0, n, bits.Len(uint(n))) -} - -// SortFunc sorts the slice x in ascending order as determined by the cmp -// function. This sort is not guaranteed to be stable. -// cmp(a, b) should return a negative number when a < b, a positive number when -// a > b and zero when a == b or when a is not comparable to b in the sense -// of the formal definition of Strict Weak Ordering. -// -// SortFunc requires that cmp is a strict weak ordering. -// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings. -// To indicate 'uncomparable', return 0 from the function. -func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - n := len(x) - pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp) -} - -// SortStableFunc sorts the slice x while keeping the original order of equal -// elements, using cmp to compare elements in the same way as [SortFunc]. -func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) { - stableCmpFunc(x, len(x), cmp) -} - -// IsSorted reports whether x is sorted in ascending order. -func IsSorted[S ~[]E, E constraints.Ordered](x S) bool { - for i := len(x) - 1; i > 0; i-- { - if cmpLess(x[i], x[i-1]) { - return false - } - } - return true -} - -// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the -// comparison function as defined by [SortFunc]. -func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool { - for i := len(x) - 1; i > 0; i-- { - if cmp(x[i], x[i-1]) < 0 { - return false - } - } - return true -} - -// Min returns the minimal value in x. It panics if x is empty. -// For floating-point numbers, Min propagates NaNs (any NaN value in x -// forces the output to be NaN). -func Min[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Min: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = min(m, x[i]) - } - return m -} - -// MinFunc returns the minimal value in x, using cmp to compare elements. -// It panics if x is empty. If there is more than one minimal element -// according to the cmp function, MinFunc returns the first one. -func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MinFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) < 0 { - m = x[i] - } - } - return m -} - -// Max returns the maximal value in x. It panics if x is empty. -// For floating-point E, Max propagates NaNs (any NaN value in x -// forces the output to be NaN). -func Max[S ~[]E, E constraints.Ordered](x S) E { - if len(x) < 1 { - panic("slices.Max: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - m = max(m, x[i]) - } - return m -} - -// MaxFunc returns the maximal value in x, using cmp to compare elements. -// It panics if x is empty. If there is more than one maximal element -// according to the cmp function, MaxFunc returns the first one. -func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E { - if len(x) < 1 { - panic("slices.MaxFunc: empty list") - } - m := x[0] - for i := 1; i < len(x); i++ { - if cmp(x[i], m) > 0 { - m = x[i] - } - } - return m -} - -// BinarySearch searches for target in a sorted slice and returns the position -// where target is found, or the position where target would appear in the -// sort order; it also returns a bool saying whether the target is really found -// in the slice. The slice must be sorted in increasing order. -func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) { - // Inlining is faster than calling BinarySearchFunc with a lambda. - n := len(x) - // Define x[-1] < target and x[n] >= target. - // Invariant: x[i-1] < target, x[j] >= target. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmpLess(x[h], target) { - i = h + 1 // preserves x[i-1] < target - } else { - j = h // preserves x[j] >= target - } - } - // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i. - return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target))) -} - -// BinarySearchFunc works like [BinarySearch], but uses a custom comparison -// function. The slice must be sorted in increasing order, where "increasing" -// is defined by cmp. cmp should return 0 if the slice element matches -// the target, a negative number if the slice element precedes the target, -// or a positive number if the slice element follows the target. -// cmp must implement the same ordering as the slice, such that if -// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice. -func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) { - n := len(x) - // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 . - // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0. - i, j := 0, n - for i < j { - h := int(uint(i+j) >> 1) // avoid overflow when computing h - // i ≤ h < j - if cmp(x[h], target) < 0 { - i = h + 1 // preserves cmp(x[i - 1], target) < 0 - } else { - j = h // preserves cmp(x[j], target) >= 0 - } - } - // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i. - return i, i < n && cmp(x[i], target) == 0 -} - -type sortedHint int // hint for pdqsort when choosing the pivot - -const ( - unknownHint sortedHint = iota - increasingHint - decreasingHint -) - -// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf -type xorshift uint64 - -func (r *xorshift) Next() uint64 { - *r ^= *r << 13 - *r ^= *r >> 17 - *r ^= *r << 5 - return uint64(*r) -} - -func nextPowerOfTwo(length int) uint { - return 1 << bits.Len(uint(length)) -} - -// isNaN reports whether x is a NaN without requiring the math package. -// This will always return false if T is not floating-point. -func isNaN[T constraints.Ordered](x T) bool { - return x != x -} diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go deleted file mode 100644 index 06f2c7a2..00000000 --- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go +++ /dev/null @@ -1,479 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -// insertionSortCmpFunc sorts data[a:b] using insertion sort. -func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownCmpFunc implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) { - child++ - } - if !(cmp(data[first+root], data[first+child]) < 0) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownCmpFunc(data, i, hi, first, cmp) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownCmpFunc(data, lo, i, first, cmp) - } -} - -// pdqsortCmpFunc sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortCmpFunc(data, a, b, cmp) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortCmpFunc(data, a, b, cmp) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsCmpFunc(data, a, b, cmp) - limit-- - } - - pivot, hint := choosePivotCmpFunc(data, a, b, cmp) - if hint == decreasingHint { - reverseRangeCmpFunc(data, a, b, cmp) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortCmpFunc(data, a, b, cmp) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) { - mid := partitionEqualCmpFunc(data, a, b, pivot, cmp) - a = mid - continue - } - - mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortCmpFunc(data, a, mid, limit, cmp) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortCmpFunc(data, mid+1, b, limit, cmp) - b = mid - } - } -} - -// partitionCmpFunc does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && (cmp(data[i], data[a]) < 0) { - i++ - } - for i <= j && !(cmp(data[j], data[a]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !(cmp(data[a], data[i]) < 0) { - i++ - } - for i <= j && (cmp(data[a], data[j]) < 0) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !(cmp(data[i], data[i-1]) < 0) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !(cmp(data[j], data[j-1]) < 0) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotCmpFunc chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentCmpFunc(data, i, &swaps, cmp) - j = medianAdjacentCmpFunc(data, j, &swaps, cmp) - k = medianAdjacentCmpFunc(data, k, &swaps, cmp) - } - // Find the median among i, j, k and stores it into j. - j = medianCmpFunc(data, i, j, k, &swaps, cmp) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) { - if cmp(data[b], data[a]) < 0 { - *swaps++ - return b, a - } - return a, b -} - -// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int { - a, b = order2CmpFunc(data, a, b, swaps, cmp) - b, c = order2CmpFunc(data, b, c, swaps, cmp) - a, b = order2CmpFunc(data, a, b, swaps, cmp) - return b -} - -// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int { - return medianCmpFunc(data, a-1, a, a+1, swaps, cmp) -} - -func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortCmpFunc(data, a, b, cmp) - a = b - b += blockSize - } - insertionSortCmpFunc(data, a, n, cmp) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeCmpFunc(data, a, a+blockSize, b, cmp) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeCmpFunc(data, a, m, n, cmp) - } - blockSize *= 2 - } -} - -// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmp(data[h], data[a]) < 0 { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !(cmp(data[m], data[h]) < 0) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !(cmp(data[p-c], data[c]) < 0) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateCmpFunc(data, start, m, end, cmp) - } - if a < start && start < mid { - symMergeCmpFunc(data, a, start, mid, cmp) - } - if mid < end && end < b { - symMergeCmpFunc(data, mid, end, b, cmp) - } -} - -// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeCmpFunc(data, m-i, m, j, cmp) - i -= j - } else { - swapRangeCmpFunc(data, m-i, m+j-i, i, cmp) - j -= i - } - } - // i == j - swapRangeCmpFunc(data, m-i, m, i, cmp) -} diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go deleted file mode 100644 index 99b47c39..00000000 --- a/vendor/golang.org/x/exp/slices/zsortordered.go +++ /dev/null @@ -1,481 +0,0 @@ -// Code generated by gen_sort_variants.go; DO NOT EDIT. - -// Copyright 2022 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package slices - -import "golang.org/x/exp/constraints" - -// insertionSortOrdered sorts data[a:b] using insertion sort. -func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) { - for i := a + 1; i < b; i++ { - for j := i; j > a && cmpLess(data[j], data[j-1]); j-- { - data[j], data[j-1] = data[j-1], data[j] - } - } -} - -// siftDownOrdered implements the heap property on data[lo:hi]. -// first is an offset into the array where the root of the heap lies. -func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) { - root := lo - for { - child := 2*root + 1 - if child >= hi { - break - } - if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) { - child++ - } - if !cmpLess(data[first+root], data[first+child]) { - return - } - data[first+root], data[first+child] = data[first+child], data[first+root] - root = child - } -} - -func heapSortOrdered[E constraints.Ordered](data []E, a, b int) { - first := a - lo := 0 - hi := b - a - - // Build heap with greatest element at top. - for i := (hi - 1) / 2; i >= 0; i-- { - siftDownOrdered(data, i, hi, first) - } - - // Pop elements, largest first, into end of data. - for i := hi - 1; i >= 0; i-- { - data[first], data[first+i] = data[first+i], data[first] - siftDownOrdered(data, lo, i, first) - } -} - -// pdqsortOrdered sorts data[a:b]. -// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort. -// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf -// C++ implementation: https://github.com/orlp/pdqsort -// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/ -// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort. -func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) { - const maxInsertion = 12 - - var ( - wasBalanced = true // whether the last partitioning was reasonably balanced - wasPartitioned = true // whether the slice was already partitioned - ) - - for { - length := b - a - - if length <= maxInsertion { - insertionSortOrdered(data, a, b) - return - } - - // Fall back to heapsort if too many bad choices were made. - if limit == 0 { - heapSortOrdered(data, a, b) - return - } - - // If the last partitioning was imbalanced, we need to breaking patterns. - if !wasBalanced { - breakPatternsOrdered(data, a, b) - limit-- - } - - pivot, hint := choosePivotOrdered(data, a, b) - if hint == decreasingHint { - reverseRangeOrdered(data, a, b) - // The chosen pivot was pivot-a elements after the start of the array. - // After reversing it is pivot-a elements before the end of the array. - // The idea came from Rust's implementation. - pivot = (b - 1) - (pivot - a) - hint = increasingHint - } - - // The slice is likely already sorted. - if wasBalanced && wasPartitioned && hint == increasingHint { - if partialInsertionSortOrdered(data, a, b) { - return - } - } - - // Probably the slice contains many duplicate elements, partition the slice into - // elements equal to and elements greater than the pivot. - if a > 0 && !cmpLess(data[a-1], data[pivot]) { - mid := partitionEqualOrdered(data, a, b, pivot) - a = mid - continue - } - - mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot) - wasPartitioned = alreadyPartitioned - - leftLen, rightLen := mid-a, b-mid - balanceThreshold := length / 8 - if leftLen < rightLen { - wasBalanced = leftLen >= balanceThreshold - pdqsortOrdered(data, a, mid, limit) - a = mid + 1 - } else { - wasBalanced = rightLen >= balanceThreshold - pdqsortOrdered(data, mid+1, b, limit) - b = mid - } - } -} - -// partitionOrdered does one quicksort partition. -// Let p = data[pivot] -// Moves elements in data[a:b] around, so that data[i]

=p for inewpivot. -// On return, data[newpivot] = p -func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - data[j], data[a] = data[a], data[j] - return j, true - } - data[i], data[j] = data[j], data[i] - i++ - j-- - - for { - for i <= j && cmpLess(data[i], data[a]) { - i++ - } - for i <= j && !cmpLess(data[j], data[a]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - data[j], data[a] = data[a], data[j] - return j, false -} - -// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot]. -// It assumed that data[a:b] does not contain elements smaller than the data[pivot]. -func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) { - data[a], data[pivot] = data[pivot], data[a] - i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned - - for { - for i <= j && !cmpLess(data[a], data[i]) { - i++ - } - for i <= j && cmpLess(data[a], data[j]) { - j-- - } - if i > j { - break - } - data[i], data[j] = data[j], data[i] - i++ - j-- - } - return i -} - -// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end. -func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool { - const ( - maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted - shortestShifting = 50 // don't shift any elements on short arrays - ) - i := a + 1 - for j := 0; j < maxSteps; j++ { - for i < b && !cmpLess(data[i], data[i-1]) { - i++ - } - - if i == b { - return true - } - - if b-a < shortestShifting { - return false - } - - data[i], data[i-1] = data[i-1], data[i] - - // Shift the smaller one to the left. - if i-a >= 2 { - for j := i - 1; j >= 1; j-- { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - // Shift the greater one to the right. - if b-i >= 2 { - for j := i + 1; j < b; j++ { - if !cmpLess(data[j], data[j-1]) { - break - } - data[j], data[j-1] = data[j-1], data[j] - } - } - } - return false -} - -// breakPatternsOrdered scatters some elements around in an attempt to break some patterns -// that might cause imbalanced partitions in quicksort. -func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) { - length := b - a - if length >= 8 { - random := xorshift(length) - modulus := nextPowerOfTwo(length) - - for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ { - other := int(uint(random.Next()) & (modulus - 1)) - if other >= length { - other -= length - } - data[idx], data[a+other] = data[a+other], data[idx] - } - } -} - -// choosePivotOrdered chooses a pivot in data[a:b]. -// -// [0,8): chooses a static pivot. -// [8,shortestNinther): uses the simple median-of-three method. -// [shortestNinther,∞): uses the Tukey ninther method. -func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) { - const ( - shortestNinther = 50 - maxSwaps = 4 * 3 - ) - - l := b - a - - var ( - swaps int - i = a + l/4*1 - j = a + l/4*2 - k = a + l/4*3 - ) - - if l >= 8 { - if l >= shortestNinther { - // Tukey ninther method, the idea came from Rust's implementation. - i = medianAdjacentOrdered(data, i, &swaps) - j = medianAdjacentOrdered(data, j, &swaps) - k = medianAdjacentOrdered(data, k, &swaps) - } - // Find the median among i, j, k and stores it into j. - j = medianOrdered(data, i, j, k, &swaps) - } - - switch swaps { - case 0: - return j, increasingHint - case maxSwaps: - return j, decreasingHint - default: - return j, unknownHint - } -} - -// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a. -func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) { - if cmpLess(data[b], data[a]) { - *swaps++ - return b, a - } - return a, b -} - -// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c. -func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int { - a, b = order2Ordered(data, a, b, swaps) - b, c = order2Ordered(data, b, c, swaps) - a, b = order2Ordered(data, a, b, swaps) - return b -} - -// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a. -func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int { - return medianOrdered(data, a-1, a, a+1, swaps) -} - -func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) { - i := a - j := b - 1 - for i < j { - data[i], data[j] = data[j], data[i] - i++ - j-- - } -} - -func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) { - for i := 0; i < n; i++ { - data[a+i], data[b+i] = data[b+i], data[a+i] - } -} - -func stableOrdered[E constraints.Ordered](data []E, n int) { - blockSize := 20 // must be > 0 - a, b := 0, blockSize - for b <= n { - insertionSortOrdered(data, a, b) - a = b - b += blockSize - } - insertionSortOrdered(data, a, n) - - for blockSize < n { - a, b = 0, 2*blockSize - for b <= n { - symMergeOrdered(data, a, a+blockSize, b) - a = b - b += 2 * blockSize - } - if m := a + blockSize; m < n { - symMergeOrdered(data, a, m, n) - } - blockSize *= 2 - } -} - -// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using -// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum -// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz -// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in -// Computer Science, pages 714-723. Springer, 2004. -// -// Let M = m-a and N = b-n. Wolog M < N. -// The recursion depth is bound by ceil(log(N+M)). -// The algorithm needs O(M*log(N/M + 1)) calls to data.Less. -// The algorithm needs O((M+N)*log(M)) calls to data.Swap. -// -// The paper gives O((M+N)*log(M)) as the number of assignments assuming a -// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation -// in the paper carries through for Swap operations, especially as the block -// swapping rotate uses only O(M+N) Swaps. -// -// symMerge assumes non-degenerate arguments: a < m && m < b. -// Having the caller check this condition eliminates many leaf recursion calls, -// which improves performance. -func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) { - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[a] into data[m:b] - // if data[a:m] only contains one element. - if m-a == 1 { - // Use binary search to find the lowest index i - // such that data[i] >= data[a] for m <= i < b. - // Exit the search loop with i == b in case no such index exists. - i := m - j := b - for i < j { - h := int(uint(i+j) >> 1) - if cmpLess(data[h], data[a]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[a] reaches the position before i. - for k := a; k < i-1; k++ { - data[k], data[k+1] = data[k+1], data[k] - } - return - } - - // Avoid unnecessary recursions of symMerge - // by direct insertion of data[m] into data[a:m] - // if data[m:b] only contains one element. - if b-m == 1 { - // Use binary search to find the lowest index i - // such that data[i] > data[m] for a <= i < m. - // Exit the search loop with i == m in case no such index exists. - i := a - j := m - for i < j { - h := int(uint(i+j) >> 1) - if !cmpLess(data[m], data[h]) { - i = h + 1 - } else { - j = h - } - } - // Swap values until data[m] reaches the position i. - for k := m; k > i; k-- { - data[k], data[k-1] = data[k-1], data[k] - } - return - } - - mid := int(uint(a+b) >> 1) - n := mid + m - var start, r int - if m > mid { - start = n - b - r = mid - } else { - start = a - r = m - } - p := n - 1 - - for start < r { - c := int(uint(start+r) >> 1) - if !cmpLess(data[p-c], data[c]) { - start = c + 1 - } else { - r = c - } - } - - end := n - start - if start < m && m < end { - rotateOrdered(data, start, m, end) - } - if a < start && start < mid { - symMergeOrdered(data, a, start, mid) - } - if mid < end && end < b { - symMergeOrdered(data, mid, end, b) - } -} - -// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data: -// Data of the form 'x u v y' is changed to 'x v u y'. -// rotate performs at most b-a many calls to data.Swap, -// and it assumes non-degenerate arguments: a < m && m < b. -func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) { - i := m - a - j := b - m - - for i != j { - if i > j { - swapRangeOrdered(data, m-i, m, j) - i -= j - } else { - swapRangeOrdered(data, m-i, m+j-i, i) - j -= i - } - } - // i == j - swapRangeOrdered(data, m-i, m, i) -} diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go index 93a798ab..794b2e32 100644 --- a/vendor/golang.org/x/time/rate/rate.go +++ b/vendor/golang.org/x/time/rate/rate.go @@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int { // TokensAt returns the number of tokens available at time t. func (lim *Limiter) TokensAt(t time.Time) float64 { lim.mu.Lock() - _, tokens := lim.advance(t) // does not mutate lim + tokens := lim.advance(t) // does not mutate lim lim.mu.Unlock() return tokens } @@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) { return } // advance time to now - t, tokens := r.lim.advance(t) + tokens := r.lim.advance(t) // calculate new number of tokens tokens += restoreTokens if burst := float64(r.lim.burst); tokens > burst { @@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) { lim.mu.Lock() defer lim.mu.Unlock() - t, tokens := lim.advance(t) + tokens := lim.advance(t) lim.last = t lim.tokens = tokens @@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) } } - t, tokens := lim.advance(t) + tokens := lim.advance(t) // Calculate the remaining number of tokens resulting from the request. tokens -= float64(n) @@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration) return r } -// advance calculates and returns an updated state for lim resulting from the passage of time. +// advance calculates and returns an updated number of tokens for lim +// resulting from the passage of time. // lim is not changed. // advance requires that lim.mu is held. -func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { +func (lim *Limiter) advance(t time.Time) (newTokens float64) { last := lim.last if t.Before(last) { last = t @@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) { if burst := float64(lim.burst); tokens > burst { tokens = burst } - return t, tokens + return tokens } // durationFromTokens is a unit conversion function from the number of tokens to the duration @@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration { if limit <= 0 { return InfDuration } - seconds := tokens / float64(limit) - return time.Duration(float64(time.Second) * seconds) + + duration := (tokens / float64(limit)) * float64(time.Second) + + // Cap the duration to the maximum representable int64 value, to avoid overflow. + if duration > float64(math.MaxInt64) { + return InfDuration + } + + return time.Duration(duration) } // tokensFromDuration is a unit conversion function from a time duration to the number of tokens diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go index 870271ed..0458b4f9 100644 --- a/vendor/golang.org/x/tools/go/packages/golist.go +++ b/vendor/golang.org/x/tools/go/packages/golist.go @@ -322,6 +322,7 @@ type jsonPackage struct { ImportPath string Dir string Name string + Target string Export string GoFiles []string CompiledGoFiles []string @@ -506,6 +507,7 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse Name: p.Name, ID: p.ImportPath, Dir: p.Dir, + Target: p.Target, GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles), CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles), OtherFiles: absJoin(p.Dir, otherFiles(p)...), @@ -811,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string { if cfg.Mode&NeedEmbedPatterns != 0 { addFields("EmbedPatterns") } + if cfg.Mode&NeedTarget != 0 { + addFields("Target") + } return "-json=" + strings.Join(fields, ",") } diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go index 969da4c2..69eec9f4 100644 --- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go +++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go @@ -27,6 +27,7 @@ var modes = [...]struct { {NeedModule, "NeedModule"}, {NeedEmbedFiles, "NeedEmbedFiles"}, {NeedEmbedPatterns, "NeedEmbedPatterns"}, + {NeedTarget, "NeedTarget"}, } func (mode LoadMode) String() string { diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go index 9dedf977..c3a59b8e 100644 --- a/vendor/golang.org/x/tools/go/packages/packages.go +++ b/vendor/golang.org/x/tools/go/packages/packages.go @@ -59,10 +59,10 @@ import ( // // Unfortunately there are a number of open bugs related to // interactions among the LoadMode bits: -// - https://github.com/golang/go/issues/56633 -// - https://github.com/golang/go/issues/56677 -// - https://github.com/golang/go/issues/58726 -// - https://github.com/golang/go/issues/63517 +// - https://go.dev/issue/56633 +// - https://go.dev/issue/56677 +// - https://go.dev/issue/58726 +// - https://go.dev/issue/63517 type LoadMode int const ( @@ -118,6 +118,9 @@ const ( // NeedEmbedPatterns adds EmbedPatterns. NeedEmbedPatterns + // NeedTarget adds Target. + NeedTarget + // Be sure to update loadmode_string.go when adding new items! ) @@ -479,6 +482,10 @@ type Package struct { // information for the package as provided by the build system. ExportFile string + // Target is the absolute install path of the .a file, for libraries, + // and of the executable file, for binaries. + Target string + // Imports maps import paths appearing in the package's Go source files // to corresponding loaded Packages. Imports map[string]*Package diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go index 8d824f71..43261147 100644 --- a/vendor/golang.org/x/tools/go/types/typeutil/map.go +++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go @@ -2,30 +2,35 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// Package typeutil defines various utilities for types, such as Map, -// a mapping from types.Type to any values. -package typeutil // import "golang.org/x/tools/go/types/typeutil" +// Package typeutil defines various utilities for types, such as [Map], +// a hash table that maps [types.Type] to any value. +package typeutil import ( "bytes" "fmt" "go/types" - "reflect" + "hash/maphash" + "unsafe" "golang.org/x/tools/internal/typeparams" ) // Map is a hash-table-based mapping from types (types.Type) to -// arbitrary any values. The concrete types that implement +// arbitrary values. The concrete types that implement // the Type interface are pointers. Since they are not canonicalized, // == cannot be used to check for equivalence, and thus we cannot // simply use a Go map. // // Just as with map[K]V, a nil *Map is a valid empty map. // -// Not thread-safe. +// Read-only map operations ([Map.At], [Map.Len], and so on) may +// safely be called concurrently. +// +// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420 +// and 69559, if the latter proposals for a generic hash-map type and +// a types.Hash function are accepted. type Map struct { - hasher Hasher // shared by many Maps table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused length int // number of map entries } @@ -36,35 +41,17 @@ type entry struct { value any } -// SetHasher sets the hasher used by Map. +// SetHasher has no effect. // -// All Hashers are functionally equivalent but contain internal state -// used to cache the results of hashing previously seen types. -// -// A single Hasher created by MakeHasher() may be shared among many -// Maps. This is recommended if the instances have many keys in -// common, as it will amortize the cost of hash computation. -// -// A Hasher may grow without bound as new types are seen. Even when a -// type is deleted from the map, the Hasher never shrinks, since other -// types in the map may reference the deleted type indirectly. -// -// Hashers are not thread-safe, and read-only operations such as -// Map.Lookup require updates to the hasher, so a full Mutex lock (not a -// read-lock) is require around all Map operations if a shared -// hasher is accessed from multiple threads. -// -// If SetHasher is not called, the Map will create a private hasher at -// the first call to Insert. -func (m *Map) SetHasher(hasher Hasher) { - m.hasher = hasher -} +// It is a relic of an optimization that is no longer profitable. Do +// not use [Hasher], [MakeHasher], or [SetHasher] in new code. +func (m *Map) SetHasher(Hasher) {} // Delete removes the entry with the given key, if any. // It returns true if the entry was found. func (m *Map) Delete(key types.Type) bool { if m != nil && m.table != nil { - hash := m.hasher.Hash(key) + hash := hash(key) bucket := m.table[hash] for i, e := range bucket { if e.key != nil && types.Identical(key, e.key) { @@ -83,7 +70,7 @@ func (m *Map) Delete(key types.Type) bool { // The result is nil if the entry is not present. func (m *Map) At(key types.Type) any { if m != nil && m.table != nil { - for _, e := range m.table[m.hasher.Hash(key)] { + for _, e := range m.table[hash(key)] { if e.key != nil && types.Identical(key, e.key) { return e.value } @@ -96,7 +83,7 @@ func (m *Map) At(key types.Type) any { // and returns the previous entry, if any. func (m *Map) Set(key types.Type, value any) (prev any) { if m.table != nil { - hash := m.hasher.Hash(key) + hash := hash(key) bucket := m.table[hash] var hole *entry for i, e := range bucket { @@ -115,10 +102,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) { m.table[hash] = append(bucket, entry{key, value}) } } else { - if m.hasher.memo == nil { - m.hasher = MakeHasher() - } - hash := m.hasher.Hash(key) + hash := hash(key) m.table = map[uint32][]entry{hash: {entry{key, value}}} } @@ -195,53 +179,35 @@ func (m *Map) KeysString() string { return m.toString(false) } -//////////////////////////////////////////////////////////////////////// -// Hasher +// -- Hasher -- -// A Hasher maps each type to its hash value. -// For efficiency, a hasher uses memoization; thus its memory -// footprint grows monotonically over time. -// Hashers are not thread-safe. -// Hashers have reference semantics. -// Call MakeHasher to create a Hasher. -type Hasher struct { - memo map[types.Type]uint32 - - // ptrMap records pointer identity. - ptrMap map[any]uint32 - - // sigTParams holds type parameters from the signature being hashed. - // Signatures are considered identical modulo renaming of type parameters, so - // within the scope of a signature type the identity of the signature's type - // parameters is just their index. - // - // Since the language does not currently support referring to uninstantiated - // generic types or functions, and instantiated signatures do not have type - // parameter lists, we should never encounter a second non-empty type - // parameter list when hashing a generic signature. - sigTParams *types.TypeParamList +// hash returns the hash of type t. +// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted. +func hash(t types.Type) uint32 { + return theHasher.Hash(t) } -// MakeHasher returns a new Hasher instance. -func MakeHasher() Hasher { - return Hasher{ - memo: make(map[types.Type]uint32), - ptrMap: make(map[any]uint32), - sigTParams: nil, - } -} +// A Hasher provides a [Hasher.Hash] method to map a type to its hash value. +// Hashers are stateless, and all are equivalent. +type Hasher struct{} + +var theHasher Hasher + +// MakeHasher returns Hasher{}. +// Hashers are stateless; all are equivalent. +func MakeHasher() Hasher { return theHasher } // Hash computes a hash value for the given type t such that // Identical(t, t') => Hash(t) == Hash(t'). func (h Hasher) Hash(t types.Type) uint32 { - hash, ok := h.memo[t] - if !ok { - hash = h.hashFor(t) - h.memo[t] = hash - } - return hash + return hasher{inGenericSig: false}.hash(t) } +// hasher holds the state of a single Hash traversal: whether we are +// inside the signature of a generic function; this is used to +// optimize [hasher.hashTypeParam]. +type hasher struct{ inGenericSig bool } + // hashString computes the Fowler–Noll–Vo hash of s. func hashString(s string) uint32 { var h uint32 @@ -252,21 +218,21 @@ func hashString(s string) uint32 { return h } -// hashFor computes the hash of t. -func (h Hasher) hashFor(t types.Type) uint32 { +// hash computes the hash of t. +func (h hasher) hash(t types.Type) uint32 { // See Identical for rationale. switch t := t.(type) { case *types.Basic: return uint32(t.Kind()) case *types.Alias: - return h.Hash(types.Unalias(t)) + return h.hash(types.Unalias(t)) case *types.Array: - return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem()) + return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem()) case *types.Slice: - return 9049 + 2*h.Hash(t.Elem()) + return 9049 + 2*h.hash(t.Elem()) case *types.Struct: var hash uint32 = 9059 @@ -277,12 +243,12 @@ func (h Hasher) hashFor(t types.Type) uint32 { } hash += hashString(t.Tag(i)) hash += hashString(f.Name()) // (ignore f.Pkg) - hash += h.Hash(f.Type()) + hash += h.hash(f.Type()) } return hash case *types.Pointer: - return 9067 + 2*h.Hash(t.Elem()) + return 9067 + 2*h.hash(t.Elem()) case *types.Signature: var hash uint32 = 9091 @@ -290,33 +256,14 @@ func (h Hasher) hashFor(t types.Type) uint32 { hash *= 8863 } - // Use a separate hasher for types inside of the signature, where type - // parameter identity is modified to be (index, constraint). We must use a - // new memo for this hasher as type identity may be affected by this - // masking. For example, in func[T any](*T), the identity of *T depends on - // whether we are mapping the argument in isolation, or recursively as part - // of hashing the signature. - // - // We should never encounter a generic signature while hashing another - // generic signature, but defensively set sigTParams only if h.mask is - // unset. tparams := t.TypeParams() - if h.sigTParams == nil && tparams.Len() != 0 { - h = Hasher{ - // There may be something more efficient than discarding the existing - // memo, but it would require detecting whether types are 'tainted' by - // references to type parameters. - memo: make(map[types.Type]uint32), - // Re-using ptrMap ensures that pointer identity is preserved in this - // hasher. - ptrMap: h.ptrMap, - sigTParams: tparams, - } - } + if n := tparams.Len(); n > 0 { + h.inGenericSig = true // affects constraints, params, and results - for i := 0; i < tparams.Len(); i++ { - tparam := tparams.At(i) - hash += 7 * h.Hash(tparam.Constraint()) + for i := range n { + tparam := tparams.At(i) + hash += 7 * h.hash(tparam.Constraint()) + } } return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results()) @@ -350,17 +297,17 @@ func (h Hasher) hashFor(t types.Type) uint32 { return hash case *types.Map: - return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem()) + return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem()) case *types.Chan: - return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem()) + return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem()) case *types.Named: - hash := h.hashPtr(t.Obj()) + hash := h.hashTypeName(t.Obj()) targs := t.TypeArgs() for i := 0; i < targs.Len(); i++ { targ := targs.At(i) - hash += 2 * h.Hash(targ) + hash += 2 * h.hash(targ) } return hash @@ -374,17 +321,17 @@ func (h Hasher) hashFor(t types.Type) uint32 { panic(fmt.Sprintf("%T: %v", t, t)) } -func (h Hasher) hashTuple(tuple *types.Tuple) uint32 { +func (h hasher) hashTuple(tuple *types.Tuple) uint32 { // See go/types.identicalTypes for rationale. n := tuple.Len() hash := 9137 + 2*uint32(n) - for i := 0; i < n; i++ { - hash += 3 * h.Hash(tuple.At(i).Type()) + for i := range n { + hash += 3 * h.hash(tuple.At(i).Type()) } return hash } -func (h Hasher) hashUnion(t *types.Union) uint32 { +func (h hasher) hashUnion(t *types.Union) uint32 { // Hash type restrictions. terms, err := typeparams.UnionTermSet(t) // if err != nil t has invalid type restrictions. Fall back on a non-zero @@ -395,11 +342,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 { return h.hashTermSet(terms) } -func (h Hasher) hashTermSet(terms []*types.Term) uint32 { +func (h hasher) hashTermSet(terms []*types.Term) uint32 { hash := 9157 + 2*uint32(len(terms)) for _, term := range terms { // term order is not significant. - termHash := h.Hash(term.Type()) + termHash := h.hash(term.Type()) if term.Tilde() { termHash *= 9161 } @@ -408,36 +355,42 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 { return hash } -// hashTypeParam returns a hash of the type parameter t, with a hash value -// depending on whether t is contained in h.sigTParams. -// -// If h.sigTParams is set and contains t, then we are in the process of hashing -// a signature, and the hash value of t must depend only on t's index and -// constraint: signatures are considered identical modulo type parameter -// renaming. To avoid infinite recursion, we only hash the type parameter -// index, and rely on types.Identical to handle signatures where constraints -// are not identical. -// -// Otherwise the hash of t depends only on t's pointer identity. -func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 { - if h.sigTParams != nil { - i := t.Index() - if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) { - return 9173 + 3*uint32(i) - } +// hashTypeParam returns the hash of a type parameter. +func (h hasher) hashTypeParam(t *types.TypeParam) uint32 { + // Within the signature of a generic function, TypeParams are + // identical if they have the same index and constraint, so we + // hash them based on index. + // + // When we are outside a generic function, free TypeParams are + // identical iff they are the same object, so we can use a + // more discriminating hash consistent with object identity. + // This optimization saves [Map] about 4% when hashing all the + // types.Info.Types in the forward closure of net/http. + if !h.inGenericSig { + // Optimization: outside a generic function signature, + // use a more discrimating hash consistent with object identity. + return h.hashTypeName(t.Obj()) } - return h.hashPtr(t.Obj()) + return 9173 + 3*uint32(t.Index()) } -// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that -// pointers values are not dependent on the GC. -func (h Hasher) hashPtr(ptr any) uint32 { - if hash, ok := h.ptrMap[ptr]; ok { - return hash - } - hash := uint32(reflect.ValueOf(ptr).Pointer()) - h.ptrMap[ptr] = hash - return hash +var theSeed = maphash.MakeSeed() + +// hashTypeName hashes the pointer of tname. +func (hasher) hashTypeName(tname *types.TypeName) uint32 { + // Since types.Identical uses == to compare TypeNames, + // the Hash function uses maphash.Comparable. + // TODO(adonovan): or will, when it becomes available in go1.24. + // In the meantime we use the pointer's numeric value. + // + // hash := maphash.Comparable(theSeed, tname) + // + // (Another approach would be to hash the name and package + // path, and whether or not it is a package-level typename. It + // is rare for a package to define multiple local types with + // the same name.) + hash := uintptr(unsafe.Pointer(tname)) + return uint32(hash ^ (hash >> 32)) } // shallowHash computes a hash of t without looking at any of its @@ -454,7 +407,7 @@ func (h Hasher) hashPtr(ptr any) uint32 { // include m itself; there is no mention of the named type X that // might help us break the cycle. // (See comment in go/types.identical, case *Interface, for more.) -func (h Hasher) shallowHash(t types.Type) uint32 { +func (h hasher) shallowHash(t types.Type) uint32 { // t is the type of an interface method (Signature), // its params or results (Tuples), or their immediate // elements (mostly Slice, Pointer, Basic, Named), @@ -475,7 +428,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 { case *types.Tuple: n := t.Len() hash := 9137 + 2*uint32(n) - for i := 0; i < n; i++ { + for i := range n { hash += 53471161 * h.shallowHash(t.At(i).Type()) } return hash @@ -508,10 +461,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 { return 9127 case *types.Named: - return h.hashPtr(t.Obj()) + return h.hashTypeName(t.Obj()) case *types.TypeParam: - return h.hashPtr(t.Obj()) + return h.hashTypeParam(t) } panic(fmt.Sprintf("shallowHash: %T: %v", t, t)) } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go index 6f5d8a21..5662a311 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go @@ -2,52 +2,183 @@ // Use of this source code is governed by a BSD-style // license that can be found in the LICENSE file. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go. - -// This file implements FindExportData. +// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go. +// This file also additionally implements FindExportData for gcexportdata.NewReader. package gcimporter import ( "bufio" + "bytes" + "errors" "fmt" + "go/build" "io" - "strconv" + "os" + "os/exec" + "path/filepath" "strings" + "sync" ) -func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) { - // See $GOROOT/include/ar.h. - hdr := make([]byte, 16+12+6+6+8+10+2) - _, err = io.ReadFull(r, hdr) - if err != nil { - return - } - // leave for debugging - if false { - fmt.Printf("header: %s", hdr) - } - s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10])) - length, err := strconv.Atoi(s) - size = int64(length) - if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' { - err = fmt.Errorf("invalid archive header") - return - } - name = strings.TrimSpace(string(hdr[:16])) - return -} - // FindExportData positions the reader r at the beginning of the // export data section of an underlying cmd/compile created archive // file by reading from it. The reader must be positioned at the // start of the file before calling this function. -// The size result is the length of the export data in bytes. +// This returns the length of the export data in bytes. // // This function is needed by [gcexportdata.Read], which must // accept inputs produced by the last two releases of cmd/compile, // plus tip. func FindExportData(r *bufio.Reader) (size int64, err error) { + arsize, err := FindPackageDefinition(r) + if err != nil { + return + } + size = int64(arsize) + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + size -= int64(len(objapi)) + for _, h := range headers { + size -= int64(len(h)) + } + + // Check for the binary export data section header "$$B\n". + // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading + line, err := r.ReadSlice('\n') + if err != nil { + return + } + hdr := string(line) + if hdr != "$$B\n" { + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + size -= int64(len(hdr)) + + // For files with a binary export data header "$$B\n", + // these are always terminated by an end-of-section marker "\n$$\n". + // So the last bytes must always be this constant. + // + // The end-of-section marker is not a part of the export data itself. + // Do not include these in size. + // + // It would be nice to have sanity check that the final bytes after + // the export data are indeed the end-of-section marker. The split + // of gcexportdata.NewReader and gcexportdata.Read make checking this + // ugly so gcimporter gives up enforcing this. The compiler and go/types + // importer do enforce this, which seems good enough. + const endofsection = "\n$$\n" + size -= int64(len(endofsection)) + + if size < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + return + } + + return +} + +// ReadUnified reads the contents of the unified export data from a reader r +// that contains the contents of a GC-created archive file. +// +// On success, the reader will be positioned after the end-of-section marker "\n$$\n". +// +// Supported GC-created archive files have 4 layers of nesting: +// - An archive file containing a package definition file. +// - The package definition file contains headers followed by a data section. +// Headers are lines (≤ 4kb) that do not start with "$$". +// - The data section starts with "$$B\n" followed by export data followed +// by an end of section marker "\n$$\n". (The section start "$$\n" is no +// longer supported.) +// - The export data starts with a format byte ('u') followed by the in +// the given format. (See ReadExportDataHeader for older formats.) +// +// Putting this together, the bytes in a GC-created archive files are expected +// to look like the following. +// See cmd/internal/archive for more details on ar file headers. +// +// | \n | ar file signature +// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size. +// | go object <...>\n | objabi header +// | \n | other headers such as build id +// | $$B\n | binary format marker +// | u\n | unified export +// | $$\n | end-of-section marker +// | [optional padding] | padding byte (0x0A) if size is odd +// | [ar file header] | other ar files +// | [ar file data] | +func ReadUnified(r *bufio.Reader) (data []byte, err error) { + // We historically guaranteed headers at the default buffer size (4096) work. + // This ensures we can use ReadSlice throughout. + const minBufferSize = 4096 + r = bufio.NewReaderSize(r, minBufferSize) + + size, err := FindPackageDefinition(r) + if err != nil { + return + } + n := size + + objapi, headers, err := ReadObjectHeaders(r) + if err != nil { + return + } + n -= len(objapi) + for _, h := range headers { + n -= len(h) + } + + hdrlen, err := ReadExportDataHeader(r) + if err != nil { + return + } + n -= hdrlen + + // size also includes the end of section marker. Remove that many bytes from the end. + const marker = "\n$$\n" + n -= len(marker) + + if n < 0 { + err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n) + return + } + + // Read n bytes from buf. + data = make([]byte, n) + _, err = io.ReadFull(r, data) + if err != nil { + return + } + + // Check for marker at the end. + var suffix [len(marker)]byte + _, err = io.ReadFull(r, suffix[:]) + if err != nil { + return + } + if s := string(suffix[:]); s != marker { + err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker) + return + } + + return +} + +// FindPackageDefinition positions the reader r at the beginning of a package +// definition file ("__.PKGDEF") within a GC-created archive by reading +// from it, and returns the size of the package definition file in the archive. +// +// The reader must be positioned at the start of the archive file before calling +// this function, and "__.PKGDEF" is assumed to be the first file in the archive. +// +// See cmd/internal/archive for details on the archive format. +func FindPackageDefinition(r *bufio.Reader) (size int, err error) { + // Uses ReadSlice to limit risk of malformed inputs. + // Read first line to make sure this is an object file. line, err := r.ReadSlice('\n') if err != nil { @@ -61,56 +192,230 @@ func FindExportData(r *bufio.Reader) (size int64, err error) { return } - // Archive file. Scan to __.PKGDEF. - var name string - if name, size, err = readGopackHeader(r); err != nil { - return - } - arsize := size - - // First entry should be __.PKGDEF. - if name != "__.PKGDEF" { - err = fmt.Errorf("go archive is missing __.PKGDEF") - return - } - - // Read first line of __.PKGDEF data, so that line - // is once again the first line of the input. - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - - // Now at __.PKGDEF in archive or still at beginning of file. - // Either way, line should begin with "go object ". - if !strings.HasPrefix(string(line), "go object ") { - err = fmt.Errorf("not a Go object file") - return - } - - // Skip over object headers to get to the export data section header "$$B\n". - // Object headers are lines that do not start with '$'. - for line[0] != '$' { - if line, err = r.ReadSlice('\n'); err != nil { - err = fmt.Errorf("can't find export data (%v)", err) - return - } - size -= int64(len(line)) - } - - // Check for the binary export data section header "$$B\n". - hdr := string(line) - if hdr != "$$B\n" { - err = fmt.Errorf("unknown export data header: %q", hdr) - return - } - // TODO(taking): Remove end-of-section marker "\n$$\n" from size. - - if size < 0 { - err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size) + // package export block should be first + size = readArchiveHeader(r, "__.PKGDEF") + if size <= 0 { + err = fmt.Errorf("not a package file") return } return } + +// ReadObjectHeaders reads object headers from the reader. Object headers are +// lines that do not start with an end-of-section marker "$$". The first header +// is the objabi header. On success, the reader will be positioned at the beginning +// of the end-of-section marker. +// +// It returns an error if any header does not fit in r.Size() bytes. +func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) { + // line is a temporary buffer for headers. + // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs. + var line []byte + + // objapi header should be the first line + if line, err = r.ReadSlice('\n'); err != nil { + err = fmt.Errorf("can't find export data (%v)", err) + return + } + objapi = string(line) + + // objapi header begins with "go object ". + if !strings.HasPrefix(objapi, "go object ") { + err = fmt.Errorf("not a go object file: %s", objapi) + return + } + + // process remaining object header lines + for { + // check for an end of section marker "$$" + line, err = r.Peek(2) + if err != nil { + return + } + if string(line) == "$$" { + return // stop + } + + // read next header + line, err = r.ReadSlice('\n') + if err != nil { + return + } + headers = append(headers, string(line)) + } +} + +// ReadExportDataHeader reads the export data header and format from r. +// It returns the number of bytes read, or an error if the format is no longer +// supported or it failed to read. +// +// The only currently supported format is binary export data in the +// unified export format. +func ReadExportDataHeader(r *bufio.Reader) (n int, err error) { + // Read export data header. + line, err := r.ReadSlice('\n') + if err != nil { + return + } + + hdr := string(line) + switch hdr { + case "$$\n": + err = fmt.Errorf("old textual export format no longer supported (recompile package)") + return + + case "$$B\n": + var format byte + format, err = r.ReadByte() + if err != nil { + return + } + // The unified export format starts with a 'u'. + switch format { + case 'u': + default: + // Older no longer supported export formats include: + // indexed export format which started with an 'i'; and + // the older binary export format which started with a 'c', + // 'd', or 'v' (from "version"). + err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format) + return + } + + default: + err = fmt.Errorf("unknown export data header: %q", hdr) + return + } + + n = len(hdr) + 1 // + 1 is for 'u' + return +} + +// FindPkg returns the filename and unique package id for an import +// path based on package information provided by build.Import (using +// the build.Default build.Context). A relative srcDir is interpreted +// relative to the current working directory. +// +// FindPkg is only used in tests within x/tools. +func FindPkg(path, srcDir string) (filename, id string, err error) { + // TODO(taking): Move internal/exportdata.FindPkg into its own file, + // and then this copy into a _test package. + if path == "" { + return "", "", errors.New("path is empty") + } + + var noext string + switch { + default: + // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" + // Don't require the source files to be present. + if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 + srcDir = abs + } + var bp *build.Package + bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary) + if bp.PkgObj == "" { + if bp.Goroot && bp.Dir != "" { + filename, err = lookupGorootExport(bp.Dir) + if err == nil { + _, err = os.Stat(filename) + } + if err == nil { + return filename, bp.ImportPath, nil + } + } + goto notfound + } else { + noext = strings.TrimSuffix(bp.PkgObj, ".a") + } + id = bp.ImportPath + + case build.IsLocalImport(path): + // "./x" -> "/this/directory/x.ext", "/this/directory/x" + noext = filepath.Join(srcDir, path) + id = noext + + case filepath.IsAbs(path): + // for completeness only - go/build.Import + // does not support absolute imports + // "/x" -> "/x.ext", "/x" + noext = path + id = path + } + + if false { // for debugging + if path != id { + fmt.Printf("%s -> %s\n", path, id) + } + } + + // try extensions + for _, ext := range pkgExts { + filename = noext + ext + f, statErr := os.Stat(filename) + if statErr == nil && !f.IsDir() { + return filename, id, nil + } + if err == nil { + err = statErr + } + } + +notfound: + if err == nil { + return "", path, fmt.Errorf("can't find import: %q", path) + } + return "", path, fmt.Errorf("can't find import: %q: %w", path, err) +} + +var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension + +var exportMap sync.Map // package dir → func() (string, error) + +// lookupGorootExport returns the location of the export data +// (normally found in the build cache, but located in GOROOT/pkg +// in prior Go releases) for the package located in pkgDir. +// +// (We use the package's directory instead of its import path +// mainly to simplify handling of the packages in src/vendor +// and cmd/vendor.) +// +// lookupGorootExport is only used in tests within x/tools. +func lookupGorootExport(pkgDir string) (string, error) { + f, ok := exportMap.Load(pkgDir) + if !ok { + var ( + listOnce sync.Once + exportPath string + err error + ) + f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) { + listOnce.Do(func() { + cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir) + cmd.Dir = build.Default.GOROOT + cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT) + var output []byte + output, err = cmd.Output() + if err != nil { + if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 { + err = errors.New(string(ee.Stderr)) + } + return + } + + exports := strings.Split(string(bytes.TrimSpace(output)), "\n") + if len(exports) != 1 { + err = fmt.Errorf("go list reported %d exports; expected 1", len(exports)) + return + } + + exportPath = exports[0] + }) + + return exportPath, err + }) + } + + return f.(func() (string, error))() +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go index dbbca860..3dbd21d1 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go @@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter" import ( "bufio" - "bytes" "fmt" - "go/build" "go/token" "go/types" "io" "os" - "os/exec" - "path/filepath" - "strings" - "sync" ) const ( @@ -45,127 +39,14 @@ const ( trace = false ) -var exportMap sync.Map // package dir → func() (string, bool) - -// lookupGorootExport returns the location of the export data -// (normally found in the build cache, but located in GOROOT/pkg -// in prior Go releases) for the package located in pkgDir. -// -// (We use the package's directory instead of its import path -// mainly to simplify handling of the packages in src/vendor -// and cmd/vendor.) -func lookupGorootExport(pkgDir string) (string, bool) { - f, ok := exportMap.Load(pkgDir) - if !ok { - var ( - listOnce sync.Once - exportPath string - ) - f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) { - listOnce.Do(func() { - cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir) - cmd.Dir = build.Default.GOROOT - var output []byte - output, err := cmd.Output() - if err != nil { - return - } - - exports := strings.Split(string(bytes.TrimSpace(output)), "\n") - if len(exports) != 1 { - return - } - - exportPath = exports[0] - }) - - return exportPath, exportPath != "" - }) - } - - return f.(func() (string, bool))() -} - -var pkgExts = [...]string{".a", ".o"} - -// FindPkg returns the filename and unique package id for an import -// path based on package information provided by build.Import (using -// the build.Default build.Context). A relative srcDir is interpreted -// relative to the current working directory. -// If no file was found, an empty filename is returned. -func FindPkg(path, srcDir string) (filename, id string) { - if path == "" { - return - } - - var noext string - switch { - default: - // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x" - // Don't require the source files to be present. - if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282 - srcDir = abs - } - bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary) - if bp.PkgObj == "" { - var ok bool - if bp.Goroot && bp.Dir != "" { - filename, ok = lookupGorootExport(bp.Dir) - } - if !ok { - id = path // make sure we have an id to print in error message - return - } - } else { - noext = strings.TrimSuffix(bp.PkgObj, ".a") - id = bp.ImportPath - } - - case build.IsLocalImport(path): - // "./x" -> "/this/directory/x.ext", "/this/directory/x" - noext = filepath.Join(srcDir, path) - id = noext - - case filepath.IsAbs(path): - // for completeness only - go/build.Import - // does not support absolute imports - // "/x" -> "/x.ext", "/x" - noext = path - id = path - } - - if false { // for debugging - if path != id { - fmt.Printf("%s -> %s\n", path, id) - } - } - - if filename != "" { - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - // try extensions - for _, ext := range pkgExts { - filename = noext + ext - if f, err := os.Stat(filename); err == nil && !f.IsDir() { - return - } - } - - filename = "" // not found - return -} - // Import imports a gc-generated package given its import path and srcDir, adds // the corresponding package object to the packages map, and returns the object. // The packages map must contain all packages already imported. // -// TODO(taking): Import is only used in tests. Move to gcimporter_test. -func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { +// Import is only used in tests. +func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) { var rc io.ReadCloser - var filename, id string + var id string if lookup != nil { // With custom lookup specified, assume that caller has // converted path to a canonical import path for use in the map. @@ -184,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } rc = f } else { - filename, id = FindPkg(path, srcDir) + var filename string + filename, id, err = FindPkg(path, srcDir) if filename == "" { if path == "unsafe" { return types.Unsafe, nil } - return nil, fmt.Errorf("can't find import: %q", id) + return nil, err } // no need to re-import if the package was imported completely before @@ -212,54 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func } defer rc.Close() - var size int64 buf := bufio.NewReader(rc) - if size, err = FindExportData(buf); err != nil { - return - } - - var data []byte - data, err = io.ReadAll(buf) + data, err := ReadUnified(buf) if err != nil { + err = fmt.Errorf("import %q: %v", path, err) return } - if len(data) == 0 { - return nil, fmt.Errorf("no data to load a package from for path %s", id) - } - // TODO(gri): allow clients of go/importer to provide a FileSet. - // Or, define a new standard go/types/gcexportdata package. - fset := token.NewFileSet() + // unified: emitted by cmd/compile since go1.20. + _, pkg, err = UImportData(fset, packages, data, id) - // Select appropriate importer. - switch data[0] { - case 'v', 'c', 'd': - // binary: emitted by cmd/compile till go1.10; obsolete. - return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0]) - - case 'i': - // indexed: emitted by cmd/compile till go1.19; - // now used only for serializing go/types. - // See https://github.com/golang/go/issues/69491. - _, pkg, err := IImportData(fset, packages, data[1:], id) - return pkg, err - - case 'u': - // unified: emitted by cmd/compile since go1.20. - _, pkg, err := UImportData(fset, packages, data[1:size], id) - return pkg, err - - default: - l := len(data) - if l > 10 { - l = 10 - } - return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id) - } + return } - -type byPath []*types.Package - -func (a byPath) Len() int { return len(a) } -func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } -func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go index e260c0e8..12943927 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go @@ -5,8 +5,6 @@ // Indexed package import. // See iexport.go for the export data format. -// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go. - package gcimporter import ( @@ -673,7 +671,9 @@ func (r *importReader) obj(name string) { case varTag: typ := r.typ() - r.declare(types.NewVar(pos, r.currPkg, name, typ)) + v := types.NewVar(pos, r.currPkg, name, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + r.declare(v) default: errorf("unexpected tag: %v", tag) @@ -1111,3 +1111,9 @@ func (r *importReader) byte() byte { } return x } + +type byPath []*types.Package + +func (a byPath) Len() int { return len(a) } +func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() } diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go new file mode 100644 index 00000000..4af810dc --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go @@ -0,0 +1,30 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package gcimporter + +import ( + "bufio" + "io" + "strconv" + "strings" +) + +// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader. +func readArchiveHeader(b *bufio.Reader, name string) int { + // architecture-independent object file output + const HeaderSize = 60 + + var buf [HeaderSize]byte + if _, err := io.ReadFull(b, buf[:]); err != nil { + return -1 + } + aname := strings.Trim(string(buf[0:16]), " ") + if !strings.HasPrefix(aname, name) { + return -1 + } + asize := strings.Trim(string(buf[48:58]), " ") + i, _ := strconv.Atoi(asize) + return i +} diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go index 1db40861..522287d1 100644 --- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go +++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go @@ -11,10 +11,10 @@ import ( "go/token" "go/types" "sort" - "strings" "golang.org/x/tools/internal/aliases" "golang.org/x/tools/internal/pkgbits" + "golang.org/x/tools/internal/typesinternal" ) // A pkgReader holds the shared state for reading a unified IR package @@ -71,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data [] } s := string(data) - s = s[:strings.LastIndex(s, "\n$$\n")] input := pkgbits.NewPkgDecoder(path, s) pkg = readUnifiedPackage(fset, nil, imports, input) return @@ -266,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package { func (r *reader) doPkg() *types.Package { path := r.String() switch path { - case "": + // cmd/compile emits path="main" for main packages because + // that's the linker symbol prefix it used; but we need + // the package's path as it would be reported by go list, + // hence "main" below. + // See test at go/packages.TestMainPackagePathInModeTypes. + case "", "main": path = r.p.PkgPath() case "builtin": return nil // universe @@ -569,6 +573,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { sig := fn.Type().(*types.Signature) recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named) + typesinternal.SetVarKind(recv, typesinternal.RecvVar) methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic())) } @@ -616,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) { case pkgbits.ObjVar: pos := r.pos() typ := r.typ() - declare(types.NewVar(pos, objPkg, objName, typ)) + v := types.NewVar(pos, objPkg, objName, typ) + typesinternal.SetVarKind(v, typesinternal.PackageVar) + declare(v) } } diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go index e333efc8..7ea90134 100644 --- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go @@ -28,7 +28,7 @@ import ( "golang.org/x/tools/internal/event/label" ) -// An Runner will run go command invocations and serialize +// A Runner will run go command invocations and serialize // them if it sees a concurrency error. type Runner struct { // once guards the runner initialization. @@ -179,7 +179,7 @@ type Invocation struct { CleanEnv bool Env []string WorkingDir string - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) } // Postcondition: both error results have same nilness. @@ -388,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { case err := <-resChan: return err case <-timer.C: - HandleHangingGoCommand(startTime, cmd) + // HandleHangingGoCommand terminates this process. + // Pass off resChan in case we can collect the command error. + handleHangingGoCommand(startTime, cmd, resChan) case <-ctx.Done(): } } else { @@ -413,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { } // Didn't shut down in response to interrupt. Kill it hard. - // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT - // on certain platforms, such as unix. if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug { log.Printf("error killing the Go command: %v", err) } @@ -422,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) { return <-resChan } -func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) { +// handleHangingGoCommand outputs debugging information to help diagnose the +// cause of a hanging Go command, and then exits with log.Fatalf. +func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) { switch runtime.GOOS { - case "linux", "darwin", "freebsd", "netbsd": + case "linux", "darwin", "freebsd", "netbsd", "openbsd": fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND -The gopls test runner has detected a hanging go command. In order to debug -this, the output of ps and lsof/fstat is printed below. + The gopls test runner has detected a hanging go command. In order to debug + this, the output of ps and lsof/fstat is printed below. -See golang/go#54461 for more details.`) + See golang/go#54461 for more details.`) fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:") fmt.Fprintln(os.Stderr, "-------------------------") @@ -438,7 +440,7 @@ See golang/go#54461 for more details.`) psCmd.Stdout = os.Stderr psCmd.Stderr = os.Stderr if err := psCmd.Run(); err != nil { - panic(fmt.Sprintf("running ps: %v", err)) + log.Printf("Handling hanging Go command: running ps: %v", err) } listFiles := "lsof" @@ -452,10 +454,24 @@ See golang/go#54461 for more details.`) listFilesCmd.Stdout = os.Stderr listFilesCmd.Stderr = os.Stderr if err := listFilesCmd.Run(); err != nil { - panic(fmt.Sprintf("running %s: %v", listFiles, err)) + log.Printf("Handling hanging Go command: running %s: %v", listFiles, err) + } + // Try to extract information about the slow go process by issuing a SIGQUIT. + if err := cmd.Process.Signal(sigStuckProcess); err == nil { + select { + case err := <-resChan: + stderr := "not a bytes.Buffer" + if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil { + stderr = buf.String() + } + log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr) + case <-time.After(5 * time.Second): + } + } else { + log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err) } } - panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)) + log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid) } func cmdDebugStr(cmd *exec.Cmd) string { diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go new file mode 100644 index 00000000..469c648e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build !unix + +package gocommand + +import "os" + +// sigStuckProcess is the signal to send to kill a hanging subprocess. +// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill. +var sigStuckProcess = os.Kill diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go new file mode 100644 index 00000000..169d37c8 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go @@ -0,0 +1,13 @@ +// Copyright 2025 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +//go:build unix + +package gocommand + +import "syscall" + +// Sigstuckprocess is the signal to send to kill a hanging subprocess. +// Send SIGQUIT to get a stack trace. +var sigStuckProcess = syscall.SIGQUIT diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go index 5ae57697..bf6b0aad 100644 --- a/vendor/golang.org/x/tools/internal/imports/fix.go +++ b/vendor/golang.org/x/tools/internal/imports/fix.go @@ -780,7 +780,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } // Try the assumed package name first, then a simpler path match @@ -815,7 +815,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix, return true }, dirFound: func(pkg *pkg) bool { - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } return strings.HasPrefix(pkg.importPathShort, searchPrefix) @@ -927,7 +927,7 @@ type ProcessEnv struct { WorkingDir string // If Logf is non-nil, debug logging is enabled through this function. - Logf func(format string, args ...interface{}) + Logf func(format string, args ...any) // If set, ModCache holds a shared cache of directory info to use across // multiple ProcessEnvs. @@ -1132,6 +1132,9 @@ func addStdlibCandidates(pass *pass, refs References) error { // but we have no way of figuring out what the user is using // TODO: investigate using the toolchain version to disambiguate in the stdlib add("math/rand/v2") + // math/rand has an overlapping API + // TestIssue66407 fails without this + add("math/rand") continue } for importPath := range stdlib.PackageSymbols { @@ -1736,7 +1739,7 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m // searching for "client.New") func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { // Check "internal" and "vendor" visibility: - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } @@ -1759,9 +1762,9 @@ func pkgIsCandidate(filename string, refs References, pkg *pkg) bool { return false } -// canUse reports whether the package in dir is usable from filename, +// CanUse reports whether the package in dir is usable from filename, // respecting the Go "internal" and "vendor" visibility rules. -func canUse(filename, dir string) bool { +func CanUse(filename, dir string) bool { // Fast path check, before any allocations. If it doesn't contain vendor // or internal, it's not tricky: // Note that this can false-negative on directories like "notinternal", diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go index d14abaa3..ec996c3c 100644 --- a/vendor/golang.org/x/tools/internal/imports/source_env.go +++ b/vendor/golang.org/x/tools/internal/imports/source_env.go @@ -67,7 +67,7 @@ func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename strin // same package name. Don't try to import ourselves. return false } - if !canUse(filename, pkg.dir) { + if !CanUse(filename, pkg.dir) { return false } mu.Lock() diff --git a/vendor/golang.org/x/tools/internal/modindex/index.go b/vendor/golang.org/x/tools/internal/modindex/index.go index 27b6dd83..9665356c 100644 --- a/vendor/golang.org/x/tools/internal/modindex/index.go +++ b/vendor/golang.org/x/tools/internal/modindex/index.go @@ -17,6 +17,7 @@ import ( "path/filepath" "strconv" "strings" + "testing" "time" ) @@ -85,6 +86,28 @@ type Entry struct { Names []string // exported names and information } +// IndexDir is where the module index is stored. +var IndexDir string + +// Set IndexDir +func init() { + var dir string + var err error + if testing.Testing() { + dir = os.TempDir() + } else { + dir, err = os.UserCacheDir() + // shouldn't happen, but TempDir is better than + // creating ./go/imports + if err != nil { + dir = os.TempDir() + } + } + dir = filepath.Join(dir, "go", "imports") + os.MkdirAll(dir, 0777) + IndexDir = dir +} + // ReadIndex reads the latest version of the on-disk index // for the cache directory cd. // It returns (nil, nil) if there is no index, but returns @@ -95,10 +118,7 @@ func ReadIndex(cachedir string) (*Index, error) { return nil, err } cd := Abspath(cachedir) - dir, err := IndexDir() - if err != nil { - return nil, err - } + dir := IndexDir base := indexNameBase(cd) iname := filepath.Join(dir, base) buf, err := os.ReadFile(iname) @@ -185,12 +205,8 @@ func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) { // write the index as a text file func writeIndex(cachedir Abspath, ix *Index) error { - dir, err := IndexDir() - if err != nil { - return err - } ipat := fmt.Sprintf("index-%d-*", CurrentVersion) - fd, err := os.CreateTemp(dir, ipat) + fd, err := os.CreateTemp(IndexDir, ipat) if err != nil { return err // can this happen? } @@ -201,7 +217,7 @@ func writeIndex(cachedir Abspath, ix *Index) error { content := fd.Name() content = filepath.Base(content) base := indexNameBase(cachedir) - nm := filepath.Join(dir, base) + nm := filepath.Join(IndexDir, base) err = os.WriteFile(nm, []byte(content), 0666) if err != nil { return err @@ -241,18 +257,6 @@ func writeIndexToFile(x *Index, fd *os.File) error { return nil } -// tests can override this -var IndexDir = indexDir - -// IndexDir computes the directory containing the index -func indexDir() (string, error) { - dir, err := os.UserCacheDir() - if err != nil { - return "", fmt.Errorf("cannot open UserCacheDir, %w", err) - } - return filepath.Join(dir, "go", "imports"), nil -} - // return the base name of the file containing the name of the current index func indexNameBase(cachedir Abspath) string { // crc64 is a way to convert path names into 16 hex digits. diff --git a/vendor/golang.org/x/tools/internal/modindex/lookup.go b/vendor/golang.org/x/tools/internal/modindex/lookup.go index 29d4e3d7..5499c5c6 100644 --- a/vendor/golang.org/x/tools/internal/modindex/lookup.go +++ b/vendor/golang.org/x/tools/internal/modindex/lookup.go @@ -16,6 +16,7 @@ type Candidate struct { Dir string ImportPath string Type LexType + Deprecated bool // information for Funcs Results int16 // how many results Sig []Field // arg names and types @@ -34,6 +35,36 @@ const ( Func ) +// LookupAll only returns those Candidates whose import path +// finds all the nms. +func (ix *Index) LookupAll(pkg string, names ...string) map[string][]Candidate { + // this can be made faster when benchmarks show that it needs to be + names = uniquify(names) + byImpPath := make(map[string][]Candidate) + for _, nm := range names { + cands := ix.Lookup(pkg, nm, false) + for _, c := range cands { + byImpPath[c.ImportPath] = append(byImpPath[c.ImportPath], c) + } + } + for k, v := range byImpPath { + if len(v) != len(names) { + delete(byImpPath, k) + } + } + return byImpPath +} + +// remove duplicates +func uniquify(in []string) []string { + if len(in) == 0 { + return in + } + in = slices.Clone(in) + slices.Sort(in) + return slices.Compact(in) +} + // Lookup finds all the symbols in the index with the given PkgName and name. // If prefix is true, it finds all of these with name as a prefix. func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate { @@ -79,8 +110,9 @@ func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate { Dir: string(e.Dir), ImportPath: e.ImportPath, Type: asLexType(flds[1][0]), + Deprecated: len(flds[1]) > 1 && flds[1][1] == 'D', } - if flds[1] == "F" { + if px.Type == Func { n, err := strconv.Atoi(flds[2]) if err != nil { continue // should never happen @@ -111,6 +143,7 @@ func toFields(sig []string) []Field { } // benchmarks show this is measurably better than strings.Split +// split into first 4 fields separated by single space func fastSplit(x string) []string { ans := make([]string, 0, 4) nxt := 0 diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go index 2e285ed9..b918529d 100644 --- a/vendor/golang.org/x/tools/internal/modindex/symbols.go +++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go @@ -12,6 +12,7 @@ import ( "go/types" "os" "path/filepath" + "runtime" "slices" "strings" @@ -19,29 +20,30 @@ import ( ) // The name of a symbol contains information about the symbol: -// T for types -// C for consts -// V for vars +// T for types, TD if the type is deprecated +// C for consts, CD if the const is deprecated +// V for vars, VD if the var is deprecated // and for funcs: F ( )* // any spaces in are replaced by $s so that the fields -// of the name are space separated +// of the name are space separated. F is replaced by FD if the func +// is deprecated. type symbol struct { pkg string // name of the symbols's package name string // declared name - kind string // T, C, V, or F + kind string // T, C, V, or F, follwed by D if deprecated sig string // signature information, for F } // find the symbols for the best directories func getSymbols(cd Abspath, dirs map[string][]*directory) { var g errgroup.Group - g.SetLimit(-1) // maybe throttle this some day + g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2)) for _, vv := range dirs { // throttling some day? d := vv[0] g.Go(func() error { thedir := filepath.Join(string(cd), string(d.path)) - mode := parser.SkipObjectResolution + mode := parser.SkipObjectResolution | parser.ParseComments fi, err := os.ReadDir(thedir) if err != nil { @@ -84,6 +86,9 @@ func getFileExports(f *ast.File) []symbol { // generic functions just like non-generic ones. sig := dtype.Params kind := "F" + if isDeprecated(decl.Doc) { + kind += "D" + } result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())} for _, x := range sig.List { // This code creates a string representing the type. @@ -107,7 +112,7 @@ func getFileExports(f *ast.File) []symbol { // print struct tags. So for this to happen the type of a formal parameter // has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString // would have to show the struct tag. Even testing for this case seems - // a waste of effort, but let's not ignore such pathologies + // a waste of effort, but let's remember the possibility if strings.Contains(tp, "$") { continue } @@ -127,12 +132,16 @@ func getFileExports(f *ast.File) []symbol { ans = append(ans, *s) } case *ast.GenDecl: + depr := isDeprecated(decl.Doc) switch decl.Tok { case token.CONST, token.VAR: tp := "V" if decl.Tok == token.CONST { tp = "C" } + if depr { + tp += "D" + } for _, sp := range decl.Specs { for _, x := range sp.(*ast.ValueSpec).Names { if s := newsym(pkg, x.Name, tp, ""); s != nil { @@ -141,8 +150,12 @@ func getFileExports(f *ast.File) []symbol { } } case token.TYPE: + tp := "T" + if depr { + tp += "D" + } for _, sp := range decl.Specs { - if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil { + if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil { ans = append(ans, *s) } } @@ -160,6 +173,22 @@ func newsym(pkg, name, kind, sig string) *symbol { return &sym } +func isDeprecated(doc *ast.CommentGroup) bool { + if doc == nil { + return false + } + // go.dev/wiki/Deprecated Paragraph starting 'Deprecated:' + // This code fails for /* Deprecated: */, but it's the code from + // gopls/internal/analysis/deprecated + lines := strings.Split(doc.Text(), "\n\n") + for _, line := range lines { + if strings.HasPrefix(line, "Deprecated:") { + return true + } + } + return false +} + // return the package name and the value for the symbols. // if there are multiple packages, choose one arbitrarily // the returned slice is sorted lexicographically diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go index 66e69b43..78460591 100644 --- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go +++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go @@ -5,7 +5,7 @@ // Package packagesinternal exposes internal-only fields from go/packages. package packagesinternal -var GetDepsErrors = func(p interface{}) []*PackageError { return nil } +var GetDepsErrors = func(p any) []*PackageError { return nil } type PackageError struct { ImportStack []string // shortest path from package named on command line to this one @@ -16,5 +16,5 @@ type PackageError struct { var TypecheckCgo int var DepsErrors int // must be set as a LoadMode to call GetDepsErrors -var SetModFlag = func(config interface{}, value string) {} +var SetModFlag = func(config any, value string) {} var SetModFile = func(config interface{}, value string) {} diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go index cdaac9ab..9f0b871f 100644 --- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go +++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go @@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{ {"ErrTooLarge", Var, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"MinRead", Const, 0}, {"NewBuffer", Func, 0}, @@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{ {"NewCTR", Func, 0}, {"NewGCM", Func, 2}, {"NewGCMWithNonceSize", Func, 5}, + {"NewGCMWithRandomNonce", Func, 24}, {"NewGCMWithTagSize", Func, 11}, {"NewOFB", Func, 0}, {"Stream", Type, 0}, @@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{ {"Unmarshal", Func, 0}, {"UnmarshalCompressed", Func, 15}, }, + "crypto/fips140": { + {"Enabled", Func, 24}, + }, + "crypto/hkdf": { + {"Expand", Func, 24}, + {"Extract", Func, 24}, + {"Key", Func, 24}, + }, "crypto/hmac": { {"Equal", Func, 1}, {"New", Func, 0}, @@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{ {"Size", Const, 0}, {"Sum", Func, 2}, }, + "crypto/mlkem": { + {"(*DecapsulationKey1024).Bytes", Method, 24}, + {"(*DecapsulationKey1024).Decapsulate", Method, 24}, + {"(*DecapsulationKey1024).EncapsulationKey", Method, 24}, + {"(*DecapsulationKey768).Bytes", Method, 24}, + {"(*DecapsulationKey768).Decapsulate", Method, 24}, + {"(*DecapsulationKey768).EncapsulationKey", Method, 24}, + {"(*EncapsulationKey1024).Bytes", Method, 24}, + {"(*EncapsulationKey1024).Encapsulate", Method, 24}, + {"(*EncapsulationKey768).Bytes", Method, 24}, + {"(*EncapsulationKey768).Encapsulate", Method, 24}, + {"CiphertextSize1024", Const, 24}, + {"CiphertextSize768", Const, 24}, + {"DecapsulationKey1024", Type, 24}, + {"DecapsulationKey768", Type, 24}, + {"EncapsulationKey1024", Type, 24}, + {"EncapsulationKey768", Type, 24}, + {"EncapsulationKeySize1024", Const, 24}, + {"EncapsulationKeySize768", Const, 24}, + {"GenerateKey1024", Func, 24}, + {"GenerateKey768", Func, 24}, + {"NewDecapsulationKey1024", Func, 24}, + {"NewDecapsulationKey768", Func, 24}, + {"NewEncapsulationKey1024", Func, 24}, + {"NewEncapsulationKey768", Func, 24}, + {"SeedSize", Const, 24}, + {"SharedKeySize", Const, 24}, + }, + "crypto/pbkdf2": { + {"Key", Func, 24}, + }, "crypto/rand": { {"Int", Func, 0}, {"Prime", Func, 0}, {"Read", Func, 0}, {"Reader", Var, 0}, + {"Text", Func, 24}, }, "crypto/rc4": { {"(*Cipher).Reset", Method, 0}, @@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{ {"Sum224", Func, 2}, {"Sum256", Func, 2}, }, + "crypto/sha3": { + {"(*SHA3).AppendBinary", Method, 24}, + {"(*SHA3).BlockSize", Method, 24}, + {"(*SHA3).MarshalBinary", Method, 24}, + {"(*SHA3).Reset", Method, 24}, + {"(*SHA3).Size", Method, 24}, + {"(*SHA3).Sum", Method, 24}, + {"(*SHA3).UnmarshalBinary", Method, 24}, + {"(*SHA3).Write", Method, 24}, + {"(*SHAKE).AppendBinary", Method, 24}, + {"(*SHAKE).BlockSize", Method, 24}, + {"(*SHAKE).MarshalBinary", Method, 24}, + {"(*SHAKE).Read", Method, 24}, + {"(*SHAKE).Reset", Method, 24}, + {"(*SHAKE).UnmarshalBinary", Method, 24}, + {"(*SHAKE).Write", Method, 24}, + {"New224", Func, 24}, + {"New256", Func, 24}, + {"New384", Func, 24}, + {"New512", Func, 24}, + {"NewCSHAKE128", Func, 24}, + {"NewCSHAKE256", Func, 24}, + {"NewSHAKE128", Func, 24}, + {"NewSHAKE256", Func, 24}, + {"SHA3", Type, 24}, + {"SHAKE", Type, 24}, + {"Sum224", Func, 24}, + {"Sum256", Func, 24}, + {"Sum384", Func, 24}, + {"Sum512", Func, 24}, + {"SumSHAKE128", Func, 24}, + {"SumSHAKE256", Func, 24}, + }, "crypto/sha512": { {"BlockSize", Const, 0}, {"New", Func, 0}, @@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{ {"ConstantTimeEq", Func, 0}, {"ConstantTimeLessOrEq", Func, 2}, {"ConstantTimeSelect", Func, 0}, + {"WithDataIndependentTiming", Func, 24}, {"XORBytes", Func, 20}, }, "crypto/tls": { @@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{ {"ClientHelloInfo", Type, 4}, {"ClientHelloInfo.CipherSuites", Field, 4}, {"ClientHelloInfo.Conn", Field, 8}, + {"ClientHelloInfo.Extensions", Field, 24}, {"ClientHelloInfo.ServerName", Field, 4}, {"ClientHelloInfo.SignatureSchemes", Field, 8}, {"ClientHelloInfo.SupportedCurves", Field, 4}, @@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{ {"Config.CurvePreferences", Field, 3}, {"Config.DynamicRecordSizingDisabled", Field, 7}, {"Config.EncryptedClientHelloConfigList", Field, 23}, + {"Config.EncryptedClientHelloKeys", Field, 24}, {"Config.EncryptedClientHelloRejectionVerify", Field, 23}, {"Config.GetCertificate", Field, 4}, {"Config.GetClientCertificate", Field, 8}, @@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{ {"ECHRejectionError", Type, 23}, {"ECHRejectionError.RetryConfigList", Field, 23}, {"Ed25519", Const, 13}, + {"EncryptedClientHelloKey", Type, 24}, + {"EncryptedClientHelloKey.Config", Field, 24}, + {"EncryptedClientHelloKey.PrivateKey", Field, 24}, + {"EncryptedClientHelloKey.SendAsRetry", Field, 24}, {"InsecureCipherSuites", Func, 14}, {"Listen", Func, 0}, {"LoadX509KeyPair", Func, 0}, @@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{ {"VersionTLS12", Const, 2}, {"VersionTLS13", Const, 12}, {"X25519", Const, 8}, + {"X25519MLKEM768", Const, 24}, {"X509KeyPair", Func, 0}, }, "crypto/x509": { @@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{ {"(ConstraintViolationError).Error", Method, 0}, {"(HostnameError).Error", Method, 0}, {"(InsecureAlgorithmError).Error", Method, 6}, + {"(OID).AppendBinary", Method, 24}, + {"(OID).AppendText", Method, 24}, {"(OID).Equal", Method, 22}, {"(OID).EqualASN1OID", Method, 22}, {"(OID).MarshalBinary", Method, 23}, @@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.Extensions", Field, 2}, {"Certificate.ExtraExtensions", Field, 2}, {"Certificate.IPAddresses", Field, 1}, + {"Certificate.InhibitAnyPolicy", Field, 24}, + {"Certificate.InhibitAnyPolicyZero", Field, 24}, + {"Certificate.InhibitPolicyMapping", Field, 24}, + {"Certificate.InhibitPolicyMappingZero", Field, 24}, {"Certificate.IsCA", Field, 0}, {"Certificate.Issuer", Field, 0}, {"Certificate.IssuingCertificateURL", Field, 2}, @@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.PermittedURIDomains", Field, 10}, {"Certificate.Policies", Field, 22}, {"Certificate.PolicyIdentifiers", Field, 0}, + {"Certificate.PolicyMappings", Field, 24}, {"Certificate.PublicKey", Field, 0}, {"Certificate.PublicKeyAlgorithm", Field, 0}, {"Certificate.Raw", Field, 0}, @@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{ {"Certificate.RawSubject", Field, 0}, {"Certificate.RawSubjectPublicKeyInfo", Field, 0}, {"Certificate.RawTBSCertificate", Field, 0}, + {"Certificate.RequireExplicitPolicy", Field, 24}, + {"Certificate.RequireExplicitPolicyZero", Field, 24}, {"Certificate.SerialNumber", Field, 0}, {"Certificate.Signature", Field, 0}, {"Certificate.SignatureAlgorithm", Field, 0}, @@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{ {"NameConstraintsWithoutSANs", Const, 10}, {"NameMismatch", Const, 8}, {"NewCertPool", Func, 0}, + {"NoValidChains", Const, 24}, {"NotAuthorizedToSign", Const, 0}, {"OID", Type, 22}, {"OIDFromInts", Func, 22}, @@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{ {"ParsePKCS8PrivateKey", Func, 0}, {"ParsePKIXPublicKey", Func, 0}, {"ParseRevocationList", Func, 19}, + {"PolicyMapping", Type, 24}, + {"PolicyMapping.IssuerDomainPolicy", Field, 24}, + {"PolicyMapping.SubjectDomainPolicy", Field, 24}, {"PublicKeyAlgorithm", Type, 0}, {"PureEd25519", Const, 13}, {"RSA", Const, 0}, @@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{ {"UnknownPublicKeyAlgorithm", Const, 0}, {"UnknownSignatureAlgorithm", Const, 0}, {"VerifyOptions", Type, 0}, + {"VerifyOptions.CertificatePolicies", Field, 24}, {"VerifyOptions.CurrentTime", Field, 0}, {"VerifyOptions.DNSName", Field, 0}, {"VerifyOptions.Intermediates", Field, 0}, @@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*File).DynString", Method, 1}, {"(*File).DynValue", Method, 21}, {"(*File).DynamicSymbols", Method, 4}, + {"(*File).DynamicVersionNeeds", Method, 24}, + {"(*File).DynamicVersions", Method, 24}, {"(*File).ImportedLibraries", Method, 0}, {"(*File).ImportedSymbols", Method, 0}, {"(*File).Section", Method, 0}, @@ -2240,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{ {"DynFlag", Type, 0}, {"DynFlag1", Type, 21}, {"DynTag", Type, 0}, + {"DynamicVersion", Type, 24}, + {"DynamicVersion.Deps", Field, 24}, + {"DynamicVersion.Flags", Field, 24}, + {"DynamicVersion.Index", Field, 24}, + {"DynamicVersion.Name", Field, 24}, + {"DynamicVersionDep", Type, 24}, + {"DynamicVersionDep.Dep", Field, 24}, + {"DynamicVersionDep.Flags", Field, 24}, + {"DynamicVersionDep.Index", Field, 24}, + {"DynamicVersionFlag", Type, 24}, + {"DynamicVersionNeed", Type, 24}, + {"DynamicVersionNeed.Name", Field, 24}, + {"DynamicVersionNeed.Needs", Field, 24}, {"EI_ABIVERSION", Const, 0}, {"EI_CLASS", Const, 0}, {"EI_DATA", Const, 0}, @@ -3726,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{ {"Symbol.Size", Field, 0}, {"Symbol.Value", Field, 0}, {"Symbol.Version", Field, 13}, + {"Symbol.VersionIndex", Field, 24}, + {"Symbol.VersionScope", Field, 24}, + {"SymbolVersionScope", Type, 24}, {"Type", Type, 0}, + {"VER_FLG_BASE", Const, 24}, + {"VER_FLG_INFO", Const, 24}, + {"VER_FLG_WEAK", Const, 24}, {"Version", Type, 0}, + {"VersionScopeGlobal", Const, 24}, + {"VersionScopeHidden", Const, 24}, + {"VersionScopeLocal", Const, 24}, + {"VersionScopeNone", Const, 24}, + {"VersionScopeSpecific", Const, 24}, }, "debug/gosym": { {"(*DecodingError).Error", Method, 0}, @@ -4453,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{ {"FS", Type, 16}, }, "encoding": { + {"BinaryAppender", Type, 24}, {"BinaryMarshaler", Type, 2}, {"BinaryUnmarshaler", Type, 2}, + {"TextAppender", Type, 24}, {"TextMarshaler", Type, 2}, {"TextUnmarshaler", Type, 2}, }, @@ -5984,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{ {"(*Interface).Complete", Method, 5}, {"(*Interface).Embedded", Method, 5}, {"(*Interface).EmbeddedType", Method, 11}, + {"(*Interface).EmbeddedTypes", Method, 24}, {"(*Interface).Empty", Method, 5}, {"(*Interface).ExplicitMethod", Method, 5}, + {"(*Interface).ExplicitMethods", Method, 24}, {"(*Interface).IsComparable", Method, 18}, {"(*Interface).IsImplicit", Method, 18}, {"(*Interface).IsMethodSet", Method, 18}, {"(*Interface).MarkImplicit", Method, 18}, {"(*Interface).Method", Method, 5}, + {"(*Interface).Methods", Method, 24}, {"(*Interface).NumEmbeddeds", Method, 5}, {"(*Interface).NumExplicitMethods", Method, 5}, {"(*Interface).NumMethods", Method, 5}, @@ -6011,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*MethodSet).At", Method, 5}, {"(*MethodSet).Len", Method, 5}, {"(*MethodSet).Lookup", Method, 5}, + {"(*MethodSet).Methods", Method, 24}, {"(*MethodSet).String", Method, 5}, {"(*Named).AddMethod", Method, 5}, {"(*Named).Method", Method, 5}, + {"(*Named).Methods", Method, 24}, {"(*Named).NumMethods", Method, 5}, {"(*Named).Obj", Method, 5}, {"(*Named).Origin", Method, 18}, @@ -6054,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Pointer).String", Method, 5}, {"(*Pointer).Underlying", Method, 5}, {"(*Scope).Child", Method, 5}, + {"(*Scope).Children", Method, 24}, {"(*Scope).Contains", Method, 5}, {"(*Scope).End", Method, 5}, {"(*Scope).Innermost", Method, 5}, @@ -6089,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*StdSizes).Offsetsof", Method, 5}, {"(*StdSizes).Sizeof", Method, 5}, {"(*Struct).Field", Method, 5}, + {"(*Struct).Fields", Method, 24}, {"(*Struct).NumFields", Method, 5}, {"(*Struct).String", Method, 5}, {"(*Struct).Tag", Method, 5}, @@ -6100,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{ {"(*Tuple).Len", Method, 5}, {"(*Tuple).String", Method, 5}, {"(*Tuple).Underlying", Method, 5}, + {"(*Tuple).Variables", Method, 24}, {"(*TypeList).At", Method, 18}, {"(*TypeList).Len", Method, 18}, + {"(*TypeList).Types", Method, 24}, {"(*TypeName).Exported", Method, 5}, {"(*TypeName).Id", Method, 5}, {"(*TypeName).IsAlias", Method, 9}, @@ -6119,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{ {"(*TypeParam).Underlying", Method, 18}, {"(*TypeParamList).At", Method, 18}, {"(*TypeParamList).Len", Method, 18}, + {"(*TypeParamList).TypeParams", Method, 24}, {"(*Union).Len", Method, 18}, {"(*Union).String", Method, 18}, {"(*Union).Term", Method, 18}, + {"(*Union).Terms", Method, 24}, {"(*Union).Underlying", Method, 18}, {"(*Var).Anonymous", Method, 5}, {"(*Var).Embedded", Method, 11}, @@ -6392,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{ {"(*Hash).WriteByte", Method, 14}, {"(*Hash).WriteString", Method, 14}, {"Bytes", Func, 19}, + {"Comparable", Func, 24}, {"Hash", Type, 14}, {"MakeSeed", Func, 14}, {"Seed", Type, 14}, {"String", Func, 19}, + {"WriteComparable", Func, 24}, }, "html": { {"EscapeString", Func, 0}, @@ -7082,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*JSONHandler).WithGroup", Method, 21}, {"(*Level).UnmarshalJSON", Method, 21}, {"(*Level).UnmarshalText", Method, 21}, + {"(*LevelVar).AppendText", Method, 24}, {"(*LevelVar).Level", Method, 21}, {"(*LevelVar).MarshalText", Method, 21}, {"(*LevelVar).Set", Method, 21}, @@ -7110,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{ {"(Attr).Equal", Method, 21}, {"(Attr).String", Method, 21}, {"(Kind).String", Method, 21}, + {"(Level).AppendText", Method, 24}, {"(Level).Level", Method, 21}, {"(Level).MarshalJSON", Method, 21}, {"(Level).MarshalText", Method, 21}, @@ -7140,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{ {"Debug", Func, 21}, {"DebugContext", Func, 21}, {"Default", Func, 21}, + {"DiscardHandler", Var, 24}, {"Duration", Func, 21}, {"DurationValue", Func, 21}, {"Error", Func, 21}, @@ -7375,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Float).Acc", Method, 5}, {"(*Float).Add", Method, 5}, {"(*Float).Append", Method, 5}, + {"(*Float).AppendText", Method, 24}, {"(*Float).Cmp", Method, 5}, {"(*Float).Copy", Method, 5}, {"(*Float).Float32", Method, 5}, @@ -7421,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).And", Method, 0}, {"(*Int).AndNot", Method, 0}, {"(*Int).Append", Method, 6}, + {"(*Int).AppendText", Method, 24}, {"(*Int).Binomial", Method, 0}, {"(*Int).Bit", Method, 0}, {"(*Int).BitLen", Method, 0}, @@ -7477,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Int).Xor", Method, 0}, {"(*Rat).Abs", Method, 0}, {"(*Rat).Add", Method, 0}, + {"(*Rat).AppendText", Method, 24}, {"(*Rat).Cmp", Method, 0}, {"(*Rat).Denom", Method, 0}, {"(*Rat).Float32", Method, 4}, @@ -7659,11 +7807,13 @@ var PackageSymbols = map[string][]Symbol{ {"Zipf", Type, 0}, }, "math/rand/v2": { + {"(*ChaCha8).AppendBinary", Method, 24}, {"(*ChaCha8).MarshalBinary", Method, 22}, {"(*ChaCha8).Read", Method, 23}, {"(*ChaCha8).Seed", Method, 22}, {"(*ChaCha8).Uint64", Method, 22}, {"(*ChaCha8).UnmarshalBinary", Method, 22}, + {"(*PCG).AppendBinary", Method, 24}, {"(*PCG).MarshalBinary", Method, 22}, {"(*PCG).Seed", Method, 22}, {"(*PCG).Uint64", Method, 22}, @@ -7931,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*UnixListener).SyscallConn", Method, 10}, {"(Flags).String", Method, 0}, {"(HardwareAddr).String", Method, 0}, + {"(IP).AppendText", Method, 24}, {"(IP).DefaultMask", Method, 0}, {"(IP).Equal", Method, 0}, {"(IP).IsGlobalUnicast", Method, 0}, @@ -8131,6 +8282,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*MaxBytesError).Error", Method, 19}, {"(*ProtocolError).Error", Method, 0}, {"(*ProtocolError).Is", Method, 21}, + {"(*Protocols).SetHTTP1", Method, 24}, + {"(*Protocols).SetHTTP2", Method, 24}, + {"(*Protocols).SetUnencryptedHTTP2", Method, 24}, {"(*Request).AddCookie", Method, 0}, {"(*Request).BasicAuth", Method, 4}, {"(*Request).Clone", Method, 13}, @@ -8190,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{ {"(Header).Values", Method, 14}, {"(Header).Write", Method, 0}, {"(Header).WriteSubset", Method, 0}, + {"(Protocols).HTTP1", Method, 24}, + {"(Protocols).HTTP2", Method, 24}, + {"(Protocols).String", Method, 24}, + {"(Protocols).UnencryptedHTTP2", Method, 24}, {"AllowQuerySemicolons", Func, 17}, {"CanonicalHeaderKey", Func, 0}, {"Client", Type, 0}, @@ -8252,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{ {"FileSystem", Type, 0}, {"Flusher", Type, 0}, {"Get", Func, 0}, + {"HTTP2Config", Type, 24}, + {"HTTP2Config.CountError", Field, 24}, + {"HTTP2Config.MaxConcurrentStreams", Field, 24}, + {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24}, + {"HTTP2Config.MaxReadFrameSize", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24}, + {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24}, + {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24}, + {"HTTP2Config.PingTimeout", Field, 24}, + {"HTTP2Config.SendPingTimeout", Field, 24}, + {"HTTP2Config.WriteByteTimeout", Field, 24}, {"Handle", Func, 0}, {"HandleFunc", Func, 0}, {"Handler", Type, 0}, @@ -8292,6 +8462,7 @@ var PackageSymbols = map[string][]Symbol{ {"PostForm", Func, 0}, {"ProtocolError", Type, 0}, {"ProtocolError.ErrorString", Field, 0}, + {"Protocols", Type, 24}, {"ProxyFromEnvironment", Func, 0}, {"ProxyURL", Func, 0}, {"PushOptions", Type, 8}, @@ -8361,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{ {"Server.ConnState", Field, 3}, {"Server.DisableGeneralOptionsHandler", Field, 20}, {"Server.ErrorLog", Field, 3}, + {"Server.HTTP2", Field, 24}, {"Server.Handler", Field, 0}, {"Server.IdleTimeout", Field, 8}, {"Server.MaxHeaderBytes", Field, 0}, + {"Server.Protocols", Field, 24}, {"Server.ReadHeaderTimeout", Field, 8}, {"Server.ReadTimeout", Field, 0}, {"Server.TLSConfig", Field, 0}, @@ -8453,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{ {"Transport.ExpectContinueTimeout", Field, 6}, {"Transport.ForceAttemptHTTP2", Field, 13}, {"Transport.GetProxyConnectHeader", Field, 16}, + {"Transport.HTTP2", Field, 24}, {"Transport.IdleConnTimeout", Field, 7}, {"Transport.MaxConnsPerHost", Field, 11}, {"Transport.MaxIdleConns", Field, 7}, {"Transport.MaxIdleConnsPerHost", Field, 0}, {"Transport.MaxResponseHeaderBytes", Field, 7}, {"Transport.OnProxyConnectResponse", Field, 20}, + {"Transport.Protocols", Field, 24}, {"Transport.Proxy", Field, 0}, {"Transport.ProxyConnectHeader", Field, 8}, {"Transport.ReadBufferSize", Field, 13}, @@ -8646,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*AddrPort).UnmarshalText", Method, 18}, {"(*Prefix).UnmarshalBinary", Method, 18}, {"(*Prefix).UnmarshalText", Method, 18}, + {"(Addr).AppendBinary", Method, 24}, + {"(Addr).AppendText", Method, 24}, {"(Addr).AppendTo", Method, 18}, {"(Addr).As16", Method, 18}, {"(Addr).As4", Method, 18}, @@ -8676,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{ {"(Addr).WithZone", Method, 18}, {"(Addr).Zone", Method, 18}, {"(AddrPort).Addr", Method, 18}, + {"(AddrPort).AppendBinary", Method, 24}, + {"(AddrPort).AppendText", Method, 24}, {"(AddrPort).AppendTo", Method, 18}, {"(AddrPort).Compare", Method, 22}, {"(AddrPort).IsValid", Method, 18}, @@ -8684,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{ {"(AddrPort).Port", Method, 18}, {"(AddrPort).String", Method, 18}, {"(Prefix).Addr", Method, 18}, + {"(Prefix).AppendBinary", Method, 24}, + {"(Prefix).AppendText", Method, 24}, {"(Prefix).AppendTo", Method, 18}, {"(Prefix).Bits", Method, 18}, {"(Prefix).Contains", Method, 18}, @@ -8868,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*Error).Temporary", Method, 6}, {"(*Error).Timeout", Method, 6}, {"(*Error).Unwrap", Method, 13}, + {"(*URL).AppendBinary", Method, 24}, {"(*URL).EscapedFragment", Method, 15}, {"(*URL).EscapedPath", Method, 5}, {"(*URL).Hostname", Method, 8}, @@ -8967,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{ {"(*ProcessState).SysUsage", Method, 0}, {"(*ProcessState).SystemTime", Method, 0}, {"(*ProcessState).UserTime", Method, 0}, + {"(*Root).Close", Method, 24}, + {"(*Root).Create", Method, 24}, + {"(*Root).FS", Method, 24}, + {"(*Root).Lstat", Method, 24}, + {"(*Root).Mkdir", Method, 24}, + {"(*Root).Name", Method, 24}, + {"(*Root).Open", Method, 24}, + {"(*Root).OpenFile", Method, 24}, + {"(*Root).OpenRoot", Method, 24}, + {"(*Root).Remove", Method, 24}, + {"(*Root).Stat", Method, 24}, {"(*SyscallError).Error", Method, 0}, {"(*SyscallError).Timeout", Method, 10}, {"(*SyscallError).Unwrap", Method, 13}, @@ -9060,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{ {"O_WRONLY", Const, 0}, {"Open", Func, 0}, {"OpenFile", Func, 0}, + {"OpenInRoot", Func, 24}, + {"OpenRoot", Func, 24}, {"PathError", Type, 0}, {"PathError.Err", Field, 0}, {"PathError.Op", Field, 0}, @@ -9081,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{ {"Remove", Func, 0}, {"RemoveAll", Func, 0}, {"Rename", Func, 0}, + {"Root", Type, 24}, {"SEEK_CUR", Const, 0}, {"SEEK_END", Const, 0}, {"SEEK_SET", Const, 0}, @@ -9422,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{ {"Zero", Func, 0}, }, "regexp": { + {"(*Regexp).AppendText", Method, 24}, {"(*Regexp).Copy", Method, 6}, {"(*Regexp).Expand", Method, 0}, {"(*Regexp).ExpandString", Method, 0}, @@ -9602,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{ {"(*StackRecord).Stack", Method, 0}, {"(*TypeAssertionError).Error", Method, 0}, {"(*TypeAssertionError).RuntimeError", Method, 0}, + {"(Cleanup).Stop", Method, 24}, + {"AddCleanup", Func, 24}, {"BlockProfile", Func, 1}, {"BlockProfileRecord", Type, 1}, {"BlockProfileRecord.Count", Field, 1}, @@ -9612,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{ {"Caller", Func, 0}, {"Callers", Func, 0}, {"CallersFrames", Func, 7}, + {"Cleanup", Type, 24}, {"Compiler", Const, 0}, {"Error", Type, 0}, {"Frame", Type, 7}, @@ -9974,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{ {"EqualFold", Func, 0}, {"Fields", Func, 0}, {"FieldsFunc", Func, 0}, + {"FieldsFuncSeq", Func, 24}, + {"FieldsSeq", Func, 24}, {"HasPrefix", Func, 0}, {"HasSuffix", Func, 0}, {"Index", Func, 0}, @@ -9986,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{ {"LastIndexAny", Func, 0}, {"LastIndexByte", Func, 5}, {"LastIndexFunc", Func, 0}, + {"Lines", Func, 24}, {"Map", Func, 0}, {"NewReader", Func, 0}, {"NewReplacer", Func, 0}, @@ -9997,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{ {"Split", Func, 0}, {"SplitAfter", Func, 0}, {"SplitAfterN", Func, 0}, + {"SplitAfterSeq", Func, 24}, {"SplitN", Func, 0}, + {"SplitSeq", Func, 24}, {"Title", Func, 0}, {"ToLower", Func, 0}, {"ToLowerSpecial", Func, 0}, @@ -16413,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{ {"ValueOf", Func, 0}, }, "testing": { + {"(*B).Chdir", Method, 24}, {"(*B).Cleanup", Method, 14}, + {"(*B).Context", Method, 24}, {"(*B).Elapsed", Method, 20}, {"(*B).Error", Method, 0}, {"(*B).Errorf", Method, 0}, @@ -16425,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).Helper", Method, 9}, {"(*B).Log", Method, 0}, {"(*B).Logf", Method, 0}, + {"(*B).Loop", Method, 24}, {"(*B).Name", Method, 8}, {"(*B).ReportAllocs", Method, 1}, {"(*B).ReportMetric", Method, 13}, @@ -16442,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*B).StopTimer", Method, 0}, {"(*B).TempDir", Method, 15}, {"(*F).Add", Method, 18}, + {"(*F).Chdir", Method, 24}, {"(*F).Cleanup", Method, 18}, + {"(*F).Context", Method, 24}, {"(*F).Error", Method, 18}, {"(*F).Errorf", Method, 18}, {"(*F).Fail", Method, 18}, @@ -16463,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{ {"(*F).TempDir", Method, 18}, {"(*M).Run", Method, 4}, {"(*PB).Next", Method, 3}, + {"(*T).Chdir", Method, 24}, {"(*T).Cleanup", Method, 14}, + {"(*T).Context", Method, 24}, {"(*T).Deadline", Method, 15}, {"(*T).Error", Method, 0}, {"(*T).Errorf", Method, 0}, @@ -16954,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{ {"(Time).Add", Method, 0}, {"(Time).AddDate", Method, 0}, {"(Time).After", Method, 0}, + {"(Time).AppendBinary", Method, 24}, {"(Time).AppendFormat", Method, 5}, + {"(Time).AppendText", Method, 24}, {"(Time).Before", Method, 0}, {"(Time).Clock", Method, 0}, {"(Time).Compare", Method, 20}, @@ -17428,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{ {"String", Func, 0}, {"StringData", Func, 0}, }, + "weak": { + {"(Pointer).Value", Method, 24}, + {"Make", Func, 24}, + {"Pointer", Type, 24}, + }, } diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go index 0b84acc5..cdae2b8e 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/common.go +++ b/vendor/golang.org/x/tools/internal/typeparams/common.go @@ -66,75 +66,3 @@ func IsTypeParam(t types.Type) bool { _, ok := types.Unalias(t).(*types.TypeParam) return ok } - -// GenericAssignableTo is a generalization of types.AssignableTo that -// implements the following rule for uninstantiated generic types: -// -// If V and T are generic named types, then V is considered assignable to T if, -// for every possible instantiation of V[A_1, ..., A_N], the instantiation -// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N]. -// -// If T has structural constraints, they must be satisfied by V. -// -// For example, consider the following type declarations: -// -// type Interface[T any] interface { -// Accept(T) -// } -// -// type Container[T any] struct { -// Element T -// } -// -// func (c Container[T]) Accept(t T) { c.Element = t } -// -// In this case, GenericAssignableTo reports that instantiations of Container -// are assignable to the corresponding instantiation of Interface. -func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool { - V = types.Unalias(V) - T = types.Unalias(T) - - // If V and T are not both named, or do not have matching non-empty type - // parameter lists, fall back on types.AssignableTo. - - VN, Vnamed := V.(*types.Named) - TN, Tnamed := T.(*types.Named) - if !Vnamed || !Tnamed { - return types.AssignableTo(V, T) - } - - vtparams := VN.TypeParams() - ttparams := TN.TypeParams() - if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 { - return types.AssignableTo(V, T) - } - - // V and T have the same (non-zero) number of type params. Instantiate both - // with the type parameters of V. This must always succeed for V, and will - // succeed for T if and only if the type set of each type parameter of V is a - // subset of the type set of the corresponding type parameter of T, meaning - // that every instantiation of V corresponds to a valid instantiation of T. - - // Minor optimization: ensure we share a context across the two - // instantiations below. - if ctxt == nil { - ctxt = types.NewContext() - } - - var targs []types.Type - for i := 0; i < vtparams.Len(); i++ { - targs = append(targs, vtparams.At(i)) - } - - vinst, err := types.Instantiate(ctxt, V, targs, true) - if err != nil { - panic("type parameters should satisfy their own constraints") - } - - tinst, err := types.Instantiate(ctxt, T, targs, true) - if err != nil { - return false - } - - return types.AssignableTo(vinst, tinst) -} diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go index 6e83c6fb..27a2b179 100644 --- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go +++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go @@ -109,8 +109,13 @@ func CoreType(T types.Type) types.Type { // // NormalTerms makes no guarantees about the order of terms, except that it // is deterministic. -func NormalTerms(typ types.Type) ([]*types.Term, error) { - switch typ := typ.Underlying().(type) { +func NormalTerms(T types.Type) ([]*types.Term, error) { + // typeSetOf(T) == typeSetOf(Unalias(T)) + typ := types.Unalias(T) + if named, ok := typ.(*types.Named); ok { + typ = named.Underlying() + } + switch typ := typ.(type) { case *types.TypeParam: return StructuralTerms(typ) case *types.Union: @@ -118,7 +123,7 @@ func NormalTerms(typ types.Type) ([]*types.Term, error) { case *types.Interface: return InterfaceTermSet(typ) default: - return []*types.Term{types.NewTerm(false, typ)}, nil + return []*types.Term{types.NewTerm(false, T)}, nil } } diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go index 131caab2..235a6def 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go @@ -966,7 +966,7 @@ const ( // var _ = string(x) InvalidConversion - // InvalidUntypedConversion occurs when an there is no valid implicit + // InvalidUntypedConversion occurs when there is no valid implicit // conversion from an untyped value satisfying the type constraints of the // context in which it is used. // diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go new file mode 100644 index 00000000..b64f714e --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go @@ -0,0 +1,46 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +import ( + "go/ast" + "go/types" + "strconv" +) + +// FileQualifier returns a [types.Qualifier] function that qualifies +// imported symbols appropriately based on the import environment of a given +// file. +// If the same package is imported multiple times, the last appearance is +// recorded. +func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier { + // Construct mapping of import paths to their defined names. + // It is only necessary to look at renaming imports. + imports := make(map[string]string) + for _, imp := range f.Imports { + if imp.Name != nil && imp.Name.Name != "_" { + path, _ := strconv.Unquote(imp.Path.Value) + imports[path] = imp.Name.Name + } + } + + // Define qualifier to replace full package paths with names of the imports. + return func(p *types.Package) string { + if p == nil || p == pkg { + return "" + } + + if name, ok := imports[p.Path()]; ok { + if name == "." { + return "" + } else { + return name + } + } + + // If there is no local renaming, fall back to the package name. + return p.Name() + } +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go index ba6f4f4e..8352ea76 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go @@ -11,6 +11,9 @@ import ( // ReceiverNamed returns the named type (if any) associated with the // type of recv, which may be of the form N or *N, or aliases thereof. // It also reports whether a Pointer was present. +// +// The named result may be nil if recv is from a method on an +// anonymous interface or struct types or in ill-typed code. func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) { t := recv.Type() if ptr, ok := types.Unalias(t).(*types.Pointer); ok { diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go index df3ea521..34534879 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/types.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go @@ -82,6 +82,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier { type NamedOrAlias interface { types.Type Obj() *types.TypeName + // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22. } // TypeParams is a light shim around t.TypeParams(). @@ -119,3 +120,8 @@ func Origin(t NamedOrAlias) NamedOrAlias { } return t } + +// IsPackageLevel reports whether obj is a package-level symbol. +func IsPackageLevel(obj types.Object) bool { + return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope() +} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go new file mode 100644 index 00000000..e5da0495 --- /dev/null +++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go @@ -0,0 +1,40 @@ +// Copyright 2024 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package typesinternal + +// TODO(adonovan): when CL 645115 lands, define the go1.25 version of +// this API that actually does something. + +import "go/types" + +type VarKind uint8 + +const ( + _ VarKind = iota // (not meaningful) + PackageVar // a package-level variable + LocalVar // a local variable + RecvVar // a method receiver variable + ParamVar // a function parameter variable + ResultVar // a function result variable + FieldVar // a struct field +) + +func (kind VarKind) String() string { + return [...]string{ + 0: "VarKind(0)", + PackageVar: "PackageVar", + LocalVar: "LocalVar", + RecvVar: "RecvVar", + ParamVar: "ParamVar", + ResultVar: "ResultVar", + FieldVar: "FieldVar", + }[kind] +} + +// GetVarKind returns an invalid VarKind. +func GetVarKind(v *types.Var) VarKind { return 0 } + +// SetVarKind has no effect. +func SetVarKind(v *types.Var, kind VarKind) {} diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go index 10669806..d272949c 100644 --- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go +++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go @@ -9,62 +9,97 @@ import ( "go/ast" "go/token" "go/types" - "strconv" "strings" ) -// ZeroString returns the string representation of the "zero" value of the type t. -// This string can be used on the right-hand side of an assignment where the -// left-hand side has that explicit type. -// Exception: This does not apply to tuples. Their string representation is -// informational only and cannot be used in an assignment. +// ZeroString returns the string representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroString may return a partially correct +// string representation. The caller should use the returned isValid boolean +// to determine the validity of the expression. +// // When assigning to a wider type (such as 'any'), it's the caller's // responsibility to handle any necessary type conversions. +// +// This string can be used on the right-hand side of an assignment where the +// left-hand side has that explicit type. +// References to named types are qualified by an appropriate (optional) +// qualifier function. +// Exception: This does not apply to tuples. Their string representation is +// informational only and cannot be used in an assignment. +// // See [ZeroExpr] for a variant that returns an [ast.Expr]. -func ZeroString(t types.Type, qf types.Qualifier) string { +func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) { switch t := t.(type) { case *types.Basic: switch { case t.Info()&types.IsBoolean != 0: - return "false" + return "false", true case t.Info()&types.IsNumeric != 0: - return "0" + return "0", true case t.Info()&types.IsString != 0: - return `""` + return `""`, true case t.Kind() == types.UnsafePointer: fallthrough case t.Kind() == types.UntypedNil: - return "nil" + return "nil", true + case t.Kind() == types.Invalid: + return "invalid", false default: - panic(fmt.Sprint("ZeroString for unexpected type:", t)) + panic(fmt.Sprintf("ZeroString for unexpected type %v", t)) } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return "nil" + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return "nil", true - case *types.Named, *types.Alias: + case *types.Interface: + if !t.IsMethodSet() { + return "invalid", false + } + return "nil", true + + case *types.Named: switch under := t.Underlying().(type) { case *types.Struct, *types.Array: - return types.TypeString(t, qf) + "{}" + return types.TypeString(t, qual) + "{}", true default: - return ZeroString(under, qf) + return ZeroString(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return types.TypeString(t, qual) + "{}", true + default: + // A type parameter can have alias but alias type's underlying type + // can never be a type parameter. + // Use types.Unalias to preserve the info of type parameter instead + // of call Underlying() going right through and get the underlying + // type of the type parameter which is always an interface. + return ZeroString(types.Unalias(t), qual) } case *types.Array, *types.Struct: - return types.TypeString(t, qf) + "{}" + return types.TypeString(t, qual) + "{}", true case *types.TypeParam: // Assumes func new is not shadowed. - return "*new(" + types.TypeString(t, qf) + ")" + return "*new(" + types.TypeString(t, qual) + ")", true case *types.Tuple: // Tuples are not normal values. // We are currently format as "(t[0], ..., t[n])". Could be something else. + isValid := true components := make([]string, t.Len()) for i := 0; i < t.Len(); i++ { - components[i] = ZeroString(t.At(i).Type(), qf) + comp, ok := ZeroString(t.At(i).Type(), qual) + + components[i] = comp + isValid = isValid && ok } - return "(" + strings.Join(components, ", ") + ")" + return "(" + strings.Join(components, ", ") + ")", isValid case *types.Union: // Variables of these types cannot be created, so it makes @@ -76,45 +111,72 @@ func ZeroString(t types.Type, qf types.Qualifier) string { } } -// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t. -// ZeroExpr is defined for types that are suitable for variables. -// It may panic for other types such as Tuple or Union. +// ZeroExpr returns the ast.Expr representation of the zero value for any type t. +// The boolean result indicates whether the type is or contains an invalid type +// or a non-basic (constraint) interface type. +// +// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr +// representation. The caller should use the returned isValid boolean to determine +// the validity of the expression. +// +// This function is designed for types suitable for variables and should not be +// used with Tuple or Union types.References to named types are qualified by an +// appropriate (optional) qualifier function. +// // See [ZeroString] for a variant that returns a string. -func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { +func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) { + switch t := t.(type) { case *types.Basic: switch { case t.Info()&types.IsBoolean != 0: - return &ast.Ident{Name: "false"} + return &ast.Ident{Name: "false"}, true case t.Info()&types.IsNumeric != 0: - return &ast.BasicLit{Kind: token.INT, Value: "0"} + return &ast.BasicLit{Kind: token.INT, Value: "0"}, true case t.Info()&types.IsString != 0: - return &ast.BasicLit{Kind: token.STRING, Value: `""`} + return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true case t.Kind() == types.UnsafePointer: fallthrough case t.Kind() == types.UntypedNil: - return ast.NewIdent("nil") + return ast.NewIdent("nil"), true + case t.Kind() == types.Invalid: + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false default: - panic(fmt.Sprint("ZeroExpr for unexpected type:", t)) + panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t)) } - case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature: - return ast.NewIdent("nil") + case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature: + return ast.NewIdent("nil"), true - case *types.Named, *types.Alias: + case *types.Interface: + if !t.IsMethodSet() { + return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false + } + return ast.NewIdent("nil"), true + + case *types.Named: switch under := t.Underlying().(type) { case *types.Struct, *types.Array: return &ast.CompositeLit{ - Type: TypeExpr(f, pkg, typ), - } + Type: TypeExpr(t, qual), + }, true default: - return ZeroExpr(f, pkg, under) + return ZeroExpr(under, qual) + } + + case *types.Alias: + switch t.Underlying().(type) { + case *types.Struct, *types.Array: + return &ast.CompositeLit{ + Type: TypeExpr(t, qual), + }, true + default: + return ZeroExpr(types.Unalias(t), qual) } case *types.Array, *types.Struct: return &ast.CompositeLit{ - Type: TypeExpr(f, pkg, typ), - } + Type: TypeExpr(t, qual), + }, true case *types.TypeParam: return &ast.StarExpr{ // *new(T) @@ -125,7 +187,7 @@ func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { ast.NewIdent(t.Obj().Name()), }, }, - } + }, true case *types.Tuple: // Unlike ZeroString, there is no ast.Expr can express tuple by @@ -157,16 +219,14 @@ func IsZeroExpr(expr ast.Expr) bool { } // TypeExpr returns syntax for the specified type. References to named types -// from packages other than pkg are qualified by an appropriate package name, as -// defined by the import environment of file. +// are qualified by an appropriate (optional) qualifier function. // It may panic for types such as Tuple or Union. -func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { - switch t := typ.(type) { +func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr { + switch t := t.(type) { case *types.Basic: switch t.Kind() { case types.UnsafePointer: - // TODO(hxjiang): replace the implementation with types.Qualifier. - return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")} + return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")} default: return ast.NewIdent(t.Name()) } @@ -174,7 +234,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { case *types.Pointer: return &ast.UnaryExpr{ Op: token.MUL, - X: TypeExpr(f, pkg, t.Elem()), + X: TypeExpr(t.Elem(), qual), } case *types.Array: @@ -183,18 +243,18 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { Kind: token.INT, Value: fmt.Sprintf("%d", t.Len()), }, - Elt: TypeExpr(f, pkg, t.Elem()), + Elt: TypeExpr(t.Elem(), qual), } case *types.Slice: return &ast.ArrayType{ - Elt: TypeExpr(f, pkg, t.Elem()), + Elt: TypeExpr(t.Elem(), qual), } case *types.Map: return &ast.MapType{ - Key: TypeExpr(f, pkg, t.Key()), - Value: TypeExpr(f, pkg, t.Elem()), + Key: TypeExpr(t.Key(), qual), + Value: TypeExpr(t.Elem(), qual), } case *types.Chan: @@ -204,14 +264,14 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { } return &ast.ChanType{ Dir: dir, - Value: TypeExpr(f, pkg, t.Elem()), + Value: TypeExpr(t.Elem(), qual), } case *types.Signature: var params []*ast.Field for i := 0; i < t.Params().Len(); i++ { params = append(params, &ast.Field{ - Type: TypeExpr(f, pkg, t.Params().At(i).Type()), + Type: TypeExpr(t.Params().At(i).Type(), qual), Names: []*ast.Ident{ { Name: t.Params().At(i).Name(), @@ -226,7 +286,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { var returns []*ast.Field for i := 0; i < t.Results().Len(); i++ { returns = append(returns, &ast.Field{ - Type: TypeExpr(f, pkg, t.Results().At(i).Type()), + Type: TypeExpr(t.Results().At(i).Type(), qual), }) } return &ast.FuncType{ @@ -238,23 +298,9 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { }, } - case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam} - switch t.Obj().Pkg() { - case pkg, nil: - return ast.NewIdent(t.Obj().Name()) - } - pkgName := t.Obj().Pkg().Name() - - // TODO(hxjiang): replace the implementation with types.Qualifier. - // If the file already imports the package under another name, use that. - for _, cand := range f.Imports { - if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() { - if cand.Name != nil && cand.Name.Name != "" { - pkgName = cand.Name.Name - } - } - } - if pkgName == "." { + case *types.TypeParam: + pkgName := qual(t.Obj().Pkg()) + if pkgName == "" || t.Obj().Pkg() == nil { return ast.NewIdent(t.Obj().Name()) } return &ast.SelectorExpr{ @@ -262,6 +308,36 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { Sel: ast.NewIdent(t.Obj().Name()), } + // types.TypeParam also implements interface NamedOrAlias. To differentiate, + // case TypeParam need to be present before case NamedOrAlias. + // TODO(hxjiang): remove this comment once TypeArgs() is added to interface + // NamedOrAlias. + case NamedOrAlias: + var expr ast.Expr = ast.NewIdent(t.Obj().Name()) + if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" { + expr = &ast.SelectorExpr{ + X: ast.NewIdent(pkgName), + Sel: expr.(*ast.Ident), + } + } + + // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to + // typesinternal.NamedOrAlias. + if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok { + if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 { + var indices []ast.Expr + for i := range typeArgs.Len() { + indices = append(indices, TypeExpr(typeArgs.At(i), qual)) + } + expr = &ast.IndexListExpr{ + X: expr, + Indices: indices, + } + } + } + + return expr + case *types.Struct: return ast.NewIdent(t.String()) @@ -269,9 +345,43 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr { return ast.NewIdent(t.String()) case *types.Union: - // TODO(hxjiang): handle the union through syntax (~A | ... | ~Z). - // Remove nil check when calling typesinternal.TypeExpr. - return nil + if t.Len() == 0 { + panic("Union type should have at least one term") + } + // Same as go/ast, the return expression will put last term in the + // Y field at topmost level of BinaryExpr. + // For union of type "float32 | float64 | int64", the structure looks + // similar to: + // { + // X: { + // X: float32, + // Op: | + // Y: float64, + // } + // Op: |, + // Y: int64, + // } + var union ast.Expr + for i := range t.Len() { + term := t.Term(i) + termExpr := TypeExpr(term.Type(), qual) + if term.Tilde() { + termExpr = &ast.UnaryExpr{ + Op: token.TILDE, + X: termExpr, + } + } + if i == 0 { + union = termExpr + } else { + union = &ast.BinaryExpr{ + X: union, + Op: token.OR, + Y: termExpr, + } + } + } + return union case *types.Tuple: panic("invalid input type types.Tuple") diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go index 0d7823b3..d88162ff 100644 --- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go +++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go @@ -70,12 +70,14 @@ func CreatePatch(a, b []byte) ([]Operation, error) { } var aI interface{} var bI interface{} - err := json.Unmarshal(a, &aI) - if err != nil { + aDec := json.NewDecoder(bytes.NewReader(a)) + aDec.UseNumber() + if err := aDec.Decode(&aI); err != nil { return nil, errBadJSONDoc } - err = json.Unmarshal(b, &bI) - if err != nil { + bDec := json.NewDecoder(bytes.NewReader(b)) + bDec.UseNumber() + if err := bDec.Decode(&bI); err != nil { return nil, errBadJSONDoc } return handleValues(aI, bI, "", []Operation{}) @@ -94,6 +96,11 @@ func matchesValue(av, bv interface{}) bool { if ok && bt == at { return true } + case json.Number: + bt, ok := bv.(json.Number) + if ok && bt == at { + return true + } case float64: bt, ok := bv.(float64) if ok && bt == at { @@ -212,7 +219,7 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation, if err != nil { return nil, err } - case string, float64, bool: + case string, float64, bool, json.Number: if !matchesValue(av, bv) { patch = append(patch, NewOperation("replace", p, bv)) } diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go index 24bc98ac..b5380505 100644 --- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go +++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go @@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro } else if xtErr != nil && xtErr != protoregistry.NotFound { return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr) } - if flags.ProtoLegacy { - if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() { - fd = nil // reset since the weak reference is not linked in - } - } // Handle unknown fields. if fd == nil { diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go index 7e87c760..669133d0 100644 --- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go +++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go @@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0)) // The type is the underlying field type (e.g., a repeated field may be // represented by []T, but the Go type passed in is just T). // A list of enum value descriptors must be provided for enum fields. -// This does not populate the Enum or Message (except for weak message). +// This does not populate the Enum or Message. // // This function is a best effort attempt; parsing errors are ignored. func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor { @@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri } case s == "packed": f.L1.EditionFeatures.IsPacked = true - case strings.HasPrefix(s, "weak="): - f.L1.IsWeak = true - f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):])) case strings.HasPrefix(s, "def="): // The default tag is special in that everything afterwards is the // default regardless of the presence of commas. @@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string { // the exact same semantics from the previous generator. tag = append(tag, "json="+jsonName) } - if fd.IsWeak() { - tag = append(tag, "weak="+string(fd.Message().FullName())) - } // The previous implementation does not tag extension fields as proto3, // even when the field is defined in a proto3 file. Match that behavior // for consistency. diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go index 378b826f..688aabe4 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go @@ -19,7 +19,6 @@ import ( "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/internal/strs" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // Edition is an Enum for proto2.Edition @@ -275,7 +274,6 @@ type ( Kind protoreflect.Kind StringName stringName IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto - IsWeak bool // promoted from google.protobuf.FieldOptions IsLazy bool // promoted from google.protobuf.FieldOptions Default defaultValue ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields @@ -369,7 +367,7 @@ func (fd *Field) IsPacked() bool { return fd.L1.EditionFeatures.IsPacked } func (fd *Field) IsExtension() bool { return false } -func (fd *Field) IsWeak() bool { return fd.L1.IsWeak } +func (fd *Field) IsWeak() bool { return false } func (fd *Field) IsLazy() bool { return fd.L1.IsLazy } func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() } func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() } @@ -396,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor { return fd.L1.Enum } func (fd *Field) Message() protoreflect.MessageDescriptor { - if fd.L1.IsWeak { - if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil { - return d.(protoreflect.MessageDescriptor) - } - } return fd.L1.Message } func (fd *Field) IsMapEntry() bool { diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go index 67a51b32..d4c94458 100644 --- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go +++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go @@ -32,11 +32,6 @@ func (file *File) resolveMessages() { for j := range md.L2.Fields.List { fd := &md.L2.Fields.List[j] - // Weak fields are resolved upon actual use. - if fd.L1.IsWeak { - continue - } - // Resolve message field dependency. switch fd.L1.Kind { case protoreflect.EnumKind: @@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) { switch num { case genid.FileDescriptorProto_PublicDependency_field_number: fd.L2.Imports[v].IsPublic = true - case genid.FileDescriptorProto_WeakDependency_field_number: - fd.L2.Imports[v].IsWeak = true } case protowire.BytesType: v, m := protowire.ConsumeBytes(b) @@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) { switch num { case genid.FieldOptions_Packed_field_number: fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v) - case genid.FieldOptions_Weak_field_number: - fd.L1.IsWeak = protowire.DecodeBool(v) case genid.FieldOptions_Lazy_field_number: fd.L1.IsLazy = protowire.DecodeBool(v) case FieldOptions_EnforceUTF8: diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go index ba83fea4..e1b4130b 100644 --- a/vendor/google.golang.org/protobuf/internal/filetype/build.go +++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go @@ -63,7 +63,7 @@ type Builder struct { // message declarations in "flattened ordering". // // Dependencies are Go types for enums or messages referenced by - // message fields (excluding weak fields), for parent extended messages of + // message fields, for parent extended messages of // extension fields, for enums or messages referenced by extension fields, // and for input and output messages referenced by service methods. // Dependencies must come after declarations, but the ordering of diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go index 58372dd3..a06ccabc 100644 --- a/vendor/google.golang.org/protobuf/internal/flags/flags.go +++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go @@ -6,7 +6,7 @@ package flags // ProtoLegacy specifies whether to enable support for legacy functionality -// such as MessageSets, weak fields, and various other obscure behavior +// such as MessageSets, and various other obscure behavior // that is necessary to maintain backwards compatibility with proto1 or // the pre-release variants of proto2 and proto3. // diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go index 693d2e9e..99bb95ba 100644 --- a/vendor/google.golang.org/protobuf/internal/genid/goname.go +++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go @@ -11,15 +11,10 @@ const ( SizeCache_goname = "sizeCache" SizeCacheA_goname = "XXX_sizecache" - WeakFields_goname = "weakFields" - WeakFieldsA_goname = "XXX_weak" - UnknownFields_goname = "unknownFields" UnknownFieldsA_goname = "XXX_unrecognized" ExtensionFields_goname = "extensionFields" ExtensionFieldsA_goname = "XXX_InternalExtensions" ExtensionFieldsB_goname = "XXX_extensions" - - WeakFieldPrefix_goname = "XXX_weak_" ) diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go index 7c1f66c8..d14d7d93 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go @@ -5,15 +5,12 @@ package impl import ( - "fmt" "reflect" - "sync" "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/errors" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" "google.golang.org/protobuf/runtime/protoiface" ) @@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si } } -func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs { - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - }) - } - - return pointerCoderFuncs{ - size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int { - m, ok := p.WeakFields().get(f.num) - if !ok { - return 0 - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return sizeMessage(m, f.tagsize, opts) - }, - marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) { - m, ok := p.WeakFields().get(f.num) - if !ok { - return b, nil - } - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - return appendMessage(b, m, f.wiretag, opts) - }, - unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) { - fs := p.WeakFields() - m, ok := fs.get(f.num) - if !ok { - lazyInit() - if messageType == nil { - return unmarshalOutput{}, errUnknown - } - m = messageType.New().Interface() - fs.set(f.num, m) - } - return consumeMessage(b, m, wtyp, opts) - }, - isInit: func(p pointer, f *coderFieldInfo) error { - m, ok := p.WeakFields().get(f.num) - if !ok { - return nil - } - return proto.CheckInitialized(m) - }, - merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { - sm, ok := src.WeakFields().get(f.num) - if !ok { - return - } - dm, ok := dst.WeakFields().get(f.num) - if !ok { - lazyInit() - if messageType == nil { - panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName())) - } - dm = messageType.New().Interface() - dst.WeakFields().set(f.num, dm) - } - opts.Merge(dm, sm) - }, - } -} - func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs { if mi := getMessageInfo(ft); mi != nil { funcs := pointerCoderFuncs{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go index fb35f0ba..229c6980 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go @@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO return 0 } n := 0 - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey() keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts) @@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o if opts.Deterministic() { return appendMapDeterministic(b, mapv, mapi, f, opts) } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { var err error b = protowire.AppendVarint(b, f.wiretag) @@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { if !mi.needsInitCheck { return nil } - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := pointerOfValue(iter.Value()) if err := mi.checkInitializedPointer(val); err != nil { @@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error { } } } else { - iter := mapRange(mapv) + iter := mapv.MapRange() for iter.Next() { val := mapi.conv.valConv.PBValueOf(iter.Value()) if err := mapi.valFuncs.isInit(val); err != nil { @@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), iter.Value()) } @@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...))) } @@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) { if dstm.IsNil() { dstm.Set(reflect.MakeMap(f.ft)) } - iter := mapRange(srcm) + iter := srcm.MapRange() for iter.Next() { val := reflect.New(f.ft.Elem().Elem()) if f.mi != nil { diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go deleted file mode 100644 index 4b15493f..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go +++ /dev/null @@ -1,38 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build !go1.12 -// +build !go1.12 - -package impl - -import "reflect" - -type mapIter struct { - v reflect.Value - keys []reflect.Value -} - -// mapRange provides a less-efficient equivalent to -// the Go 1.12 reflect.Value.MapRange method. -func mapRange(v reflect.Value) *mapIter { - return &mapIter{v: v} -} - -func (i *mapIter) Next() bool { - if i.keys == nil { - i.keys = i.v.MapKeys() - } else { - i.keys = i.keys[1:] - } - return len(i.keys) > 0 -} - -func (i *mapIter) Key() reflect.Value { - return i.keys[0] -} - -func (i *mapIter) Value() reflect.Value { - return i.v.MapIndex(i.keys[0]) -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go deleted file mode 100644 index 0b31b66e..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go +++ /dev/null @@ -1,12 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -//go:build go1.12 -// +build go1.12 - -package impl - -import "reflect" - -func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() } diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go index 2f7b363e..f78b57b0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go @@ -118,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) { }, } case isOneof: - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) + fieldOffset = offsetOf(fs) default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &preallocFields[i] diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go index 88c16ae5..41c1f74e 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go @@ -45,19 +45,16 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf var childMessage *MessageInfo switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): - fieldOffset = offsetOf(fs, mi.Exporter) - case fd.IsWeak(): - fieldOffset = si.weakOffset - funcs = makeWeakMessageFieldCoder(fd) + fieldOffset = offsetOf(fs) case fd.Message() != nil && !fd.IsMap(): - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) if fd.IsList() { childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft) } else { childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft) } default: - fieldOffset = offsetOf(fs, mi.Exporter) + fieldOffset = offsetOf(fs) childMessage, funcs = fieldCoder(fd, ft) } cf := &coderFieldInfo{ diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go index 304244a6..e4580b3a 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go +++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go @@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value { return v } func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) { - iter := mapRange(ms.v) + iter := ms.v.MapRange() for iter.Next() { k := ms.keyConv.PBValueOf(iter.Key()).MapKey() v := ms.valConv.PBValueOf(iter.Value()) diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go index e8fb6c35..c7de31e2 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/lazy.go +++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go @@ -131,7 +131,7 @@ func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Typ fmi := f.validation.mi if fmi == nil { fd := mi.Desc.Fields().ByNumber(f.num) - if fd == nil || !fd.IsWeak() { + if fd == nil { return out, ValidationUnknown } messageName := fd.Message().FullName() diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go index bf0b6049..a51dffbe 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go @@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey, fd.L0.Parent = md fd.L0.Index = n - if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked { + if fd.L1.EditionFeatures.IsPacked { fd.L1.Options = func() protoreflect.ProtoMessage { opts := descopts.Field.ProtoReflect().New() - if fd.L1.IsWeak { - opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true)) - } if fd.L1.EditionFeatures.IsPacked { opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked)) } diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go index fa10a0f5..d50423dc 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message.go @@ -14,7 +14,6 @@ import ( "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) // MessageInfo provides protobuf related functionality for a given Go type @@ -120,7 +119,6 @@ type ( var ( sizecacheType = reflect.TypeOf(SizeCache(0)) - weakFieldsType = reflect.TypeOf(WeakFields(nil)) unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil)) unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil)) extensionFieldsType = reflect.TypeOf(ExtensionFields(nil)) @@ -129,8 +127,6 @@ var ( type structInfo struct { sizecacheOffset offset sizecacheType reflect.Type - weakOffset offset - weakType reflect.Type unknownOffset offset unknownType reflect.Type extensionOffset offset @@ -148,7 +144,6 @@ type structInfo struct { func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo { si := structInfo{ sizecacheOffset: invalidOffset, - weakOffset: invalidOffset, unknownOffset: invalidOffset, extensionOffset: invalidOffset, lazyOffset: invalidOffset, @@ -165,28 +160,23 @@ fieldLoop: switch f := t.Field(i); f.Name { case genid.SizeCache_goname, genid.SizeCacheA_goname: if f.Type == sizecacheType { - si.sizecacheOffset = offsetOf(f, mi.Exporter) + si.sizecacheOffset = offsetOf(f) si.sizecacheType = f.Type } - case genid.WeakFields_goname, genid.WeakFieldsA_goname: - if f.Type == weakFieldsType { - si.weakOffset = offsetOf(f, mi.Exporter) - si.weakType = f.Type - } case genid.UnknownFields_goname, genid.UnknownFieldsA_goname: if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType { - si.unknownOffset = offsetOf(f, mi.Exporter) + si.unknownOffset = offsetOf(f) si.unknownType = f.Type } case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname: if f.Type == extensionFieldsType { - si.extensionOffset = offsetOf(f, mi.Exporter) + si.extensionOffset = offsetOf(f) si.extensionType = f.Type } case "lazyFields", "XXX_lazyUnmarshalInfo": - si.lazyOffset = offsetOf(f, mi.Exporter) + si.lazyOffset = offsetOf(f) case "XXX_presence": - si.presenceOffset = offsetOf(f, mi.Exporter) + si.presenceOffset = offsetOf(f) default: for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") { if len(s) > 0 && strings.Trim(s, "0123456789") == "" { @@ -256,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType { mi.init() fd := mi.Desc.Fields().Get(i) switch { - case fd.IsWeak(): - mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName()) - return mt case fd.IsMap(): return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]} default: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go index d407dd79..dd55e8e0 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go @@ -56,9 +56,6 @@ func opaqueInitHook(mi *MessageInfo) bool { usePresence, _ := usePresenceForField(si, fd) switch { - case fd.IsWeak(): - // Weak fields are no different for opaque. - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): // Oneofs are no different for opaque. fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()]) @@ -88,9 +85,7 @@ func opaqueInitHook(mi *MessageInfo) bool { mi.oneofs = map[protoreflect.Name]*oneofInfo{} for i := 0; i < mi.Desc.Oneofs().Len(); i++ { od := mi.Desc.Oneofs().Get(i) - if !od.IsSynthetic() { - mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter) - } + mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter) } mi.denseFields = make([]*fieldInfo, fds.Len()*2) @@ -119,12 +114,32 @@ func opaqueInitHook(mi *MessageInfo) bool { return true } +func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo { + oi := &oneofInfo{oneofDesc: od} + if od.IsSynthetic() { + fd := od.Fields().Get(0) + index, _ := presenceIndex(mi.Desc, fd) + oi.which = func(p pointer) protoreflect.FieldNumber { + if p.IsNil() { + return 0 + } + if !mi.present(p, index) { + return 0 + } + return od.Fields().Get(0).Number() + } + return oi + } + // Dispatch to non-opaque oneof implementation for non-synthetic oneofs. + return makeOneofInfo(od, si, x) +} + func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type if ft.Kind() != reflect.Map { panic(fmt.Sprintf("invalid type: got %v, want map kind", ft)) } - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) conv := NewConverter(ft, fd) return fieldInfo{ fieldDesc: fd, @@ -178,7 +193,7 @@ func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd prot panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(reflect.PtrTo(ft), fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) return fieldInfo{ fieldDesc: fd, @@ -228,7 +243,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd pro panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() return fieldInfo{ @@ -321,7 +336,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft)) } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -393,7 +408,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref deref = true } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) var getter func(p pointer) protoreflect.Value if !nullable { @@ -462,7 +477,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, mi.Exporter) + fieldOffset := offsetOf(fs) index, _ := presenceIndex(mi.Desc, fd) fieldNumber := fd.Number() elemType := fs.Type.Elem() @@ -602,8 +617,6 @@ func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) ( switch { case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic(): return false, false - case fd.IsWeak(): - return false, false case fd.IsMap(): return false, false case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind: diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go index 31c19b54..0d20132f 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go @@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) { fi = fieldInfoForMap(fd, fs, mi.Exporter) case fd.IsList(): fi = fieldInfoForList(fd, fs, mi.Exporter) - case fd.IsWeak(): - fi = fieldInfoForWeakMessage(fd, si.weakOffset) case fd.Message() != nil: fi = fieldInfoForMessage(fd, fs, mi.Exporter) default: @@ -219,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) { } case fd.Message() != nil: ft = fs.Type - if fd.IsWeak() { - ft = nil - } isMessage = true } if isMessage && ft != nil && ft.Kind() != reflect.Ptr { diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go index a7406462..68d4ae32 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go +++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go @@ -8,11 +8,8 @@ import ( "fmt" "math" "reflect" - "sync" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" ) type fieldInfo struct { @@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField, isMessage := fd.Message() != nil // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ // NOTE: The logic below intentionally assumes that oneof fields are // well-formatted. That is, the oneof interface never contains a @@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x conv := NewConverter(reflect.PtrTo(ft), fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -269,7 +266,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } conv := NewConverter(ft, fd) - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) // Generate specialized getter functions to avoid going through reflect.Value if nullable { @@ -332,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField, } } -func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo { - if !flags.ProtoLegacy { - panic("no support for proto1 weak fields") - } - - var once sync.Once - var messageType protoreflect.MessageType - lazyInit := func() { - once.Do(func() { - messageName := fd.Message().FullName() - messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName) - if messageType == nil { - panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName())) - } - }) - } - - num := fd.Number() - return fieldInfo{ - fieldDesc: fd, - has: func(p pointer) bool { - if p.IsNil() { - return false - } - _, ok := p.Apply(weakOffset).WeakFields().get(num) - return ok - }, - clear: func(p pointer) { - p.Apply(weakOffset).WeakFields().clear(num) - }, - get: func(p pointer) protoreflect.Value { - lazyInit() - if p.IsNil() { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - m, ok := p.Apply(weakOffset).WeakFields().get(num) - if !ok { - return protoreflect.ValueOfMessage(messageType.Zero()) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - set: func(p pointer, v protoreflect.Value) { - lazyInit() - m := v.Message() - if m.Descriptor() != messageType.Descriptor() { - if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want { - panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want)) - } - panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName())) - } - p.Apply(weakOffset).WeakFields().set(num, m.Interface()) - }, - mutable: func(p pointer) protoreflect.Value { - lazyInit() - fs := p.Apply(weakOffset).WeakFields() - m, ok := fs.get(num) - if !ok { - m = messageType.New().Interface() - fs.set(num, m) - } - return protoreflect.ValueOfMessage(m.ProtoReflect()) - }, - newMessage: func() protoreflect.Message { - lazyInit() - return messageType.New() - }, - newField: func() protoreflect.Value { - lazyInit() - return protoreflect.ValueOfMessage(messageType.New()) - }, - } -} - func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo { ft := fs.Type conv := NewConverter(ft, fd) // TODO: Implement unsafe fast path? - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) return fieldInfo{ fieldDesc: fd, has: func(p pointer) bool { @@ -419,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField } rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem() if fs.Type.Kind() != reflect.Ptr { - return !isZero(rv) + return !rv.IsZero() } return !rv.IsNil() }, @@ -466,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * oi := &oneofInfo{oneofDesc: od} if od.IsSynthetic() { fs := si.fieldsByNumber[od.Fields().Get(0).Number()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -479,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } } else { fs := si.oneofsByName[od.Name()] - fieldOffset := offsetOf(fs, x) + fieldOffset := offsetOf(fs) oi.which = func(p pointer) protoreflect.FieldNumber { if p.IsNil() { return 0 @@ -497,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) * } return oi } - -// isZero is identical to reflect.Value.IsZero. -// TODO: Remove this when Go1.13 is the minimally supported Go version. -func isZero(v reflect.Value) bool { - switch v.Kind() { - case reflect.Bool: - return !v.Bool() - case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: - return v.Int() == 0 - case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: - return v.Uint() == 0 - case reflect.Float32, reflect.Float64: - return math.Float64bits(v.Float()) == 0 - case reflect.Complex64, reflect.Complex128: - c := v.Complex() - return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0 - case reflect.Array: - for i := 0; i < v.Len(); i++ { - if !isZero(v.Index(i)) { - return false - } - } - return true - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer: - return v.IsNil() - case reflect.String: - return v.Len() == 0 - case reflect.Struct: - for i := 0; i < v.NumField(); i++ { - if !isZero(v.Field(i)) { - return false - } - } - return true - default: - panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()}) - } -} diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go index 041ebde2..62f8bf66 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go +++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go @@ -22,7 +22,7 @@ type Pointer unsafe.Pointer type offset uintptr // offsetOf returns a field offset for the struct field. -func offsetOf(f reflect.StructField, x exporter) offset { +func offsetOf(f reflect.StructField) offset { return offset(f.Offset) } @@ -111,7 +111,6 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) } func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) } func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) } -func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) } func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) } func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo { return (**protolazy.XXX_lazyUnmarshalInfo)(p.p) diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go index b534a3d6..7b2995dd 100644 --- a/vendor/google.golang.org/protobuf/internal/impl/validate.go +++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go @@ -211,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat switch fd.Kind() { case protoreflect.MessageKind: vi.typ = validationTypeMessage - if !fd.IsWeak() { - vi.mi = getMessageInfo(ft) - } + vi.mi = getMessageInfo(ft) case protoreflect.GroupKind: vi.typ = validationTypeGroup vi.mi = getMessageInfo(ft) @@ -320,26 +318,6 @@ State: } if f != nil { vi = f.validation - if vi.typ == validationTypeMessage && vi.mi == nil { - // Probable weak field. - // - // TODO: Consider storing the results of this lookup somewhere - // rather than recomputing it on every validation. - fd := st.mi.Desc.Fields().ByNumber(num) - if fd == nil || !fd.IsWeak() { - break - } - messageName := fd.Message().FullName() - messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName) - switch err { - case nil: - vi.mi, _ = messageType.(*MessageInfo) - case protoregistry.NotFound: - vi.typ = validationTypeBytes - default: - return out, ValidationUnknown - } - } break } // Possible extension field. diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go deleted file mode 100644 index eb79a7ba..00000000 --- a/vendor/google.golang.org/protobuf/internal/impl/weak.go +++ /dev/null @@ -1,74 +0,0 @@ -// Copyright 2019 The Go Authors. All rights reserved. -// Use of this source code is governed by a BSD-style -// license that can be found in the LICENSE file. - -package impl - -import ( - "fmt" - - "google.golang.org/protobuf/reflect/protoreflect" - "google.golang.org/protobuf/reflect/protoregistry" -) - -// weakFields adds methods to the exported WeakFields type for internal use. -// -// The exported type is an alias to an unnamed type, so methods can't be -// defined directly on it. -type weakFields WeakFields - -func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) { - m, ok := w[int32(num)] - return m, ok -} - -func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) { - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} - -func (w *weakFields) clear(num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool { - _, ok := w[int32(num)] - return ok -} - -func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) { - delete(*w, int32(num)) -} - -func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage { - if m, ok := w[int32(num)]; ok { - return m - } - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - return mt.Zero().Interface() -} - -func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) { - if m != nil { - mt, _ := protoregistry.GlobalTypes.FindMessageByName(name) - if mt == nil { - panic(fmt.Sprintf("message %v for weak field is not linked in", name)) - } - if mt != m.ProtoReflect().Type() { - panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface())) - } - } - if m == nil || !m.ProtoReflect().IsValid() { - delete(*w, int32(num)) - return - } - if *w == nil { - *w = make(weakFields) - } - (*w)[int32(num)] = m -} diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go index 3018450d..01efc330 100644 --- a/vendor/google.golang.org/protobuf/internal/version/version.go +++ b/vendor/google.golang.org/protobuf/internal/version/version.go @@ -52,7 +52,7 @@ import ( const ( Major = 1 Minor = 36 - Patch = 1 + Patch = 5 PreRelease = "" ) diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go index a3b5e142..4cbf1aea 100644 --- a/vendor/google.golang.org/protobuf/proto/decode.go +++ b/vendor/google.golang.org/protobuf/proto/decode.go @@ -8,7 +8,6 @@ import ( "google.golang.org/protobuf/encoding/protowire" "google.golang.org/protobuf/internal/encoding/messageset" "google.golang.org/protobuf/internal/errors" - "google.golang.org/protobuf/internal/flags" "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/internal/pragma" "google.golang.org/protobuf/reflect/protoreflect" @@ -172,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message) var err error if fd == nil { err = errUnknown - } else if flags.ProtoLegacy { - if fd.IsWeak() && fd.Message().IsPlaceholder() { - err = errUnknown // weak referent is not linked in - } } // Parse the field value. diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go index 69a05050..823dbf3b 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go @@ -132,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot } f.L2.Imports[i].IsPublic = true } - for _, i := range fd.GetWeakDependency() { - if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak { - return nil, errors.New("invalid or duplicate weak import index: %d", i) - } - f.L2.Imports[i].IsWeak = true - } imps := importSet{f.Path(): true} for i, path := range fd.GetDependency() { imp := &f.L2.Imports[i] f, err := r.FindFileByPath(path) - if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) { + if err == protoregistry.NotFound && o.AllowUnresolvable { f = filedesc.PlaceholderFile(path) } else if err != nil { return nil, errors.New("could not resolve import %q: %v", path, err) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go index ebcb4a8a..9da34998 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go @@ -149,7 +149,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc if opts := fd.GetOptions(); opts != nil { opts = proto.Clone(opts).(*descriptorpb.FieldOptions) f.L1.Options = func() protoreflect.ProtoMessage { return opts } - f.L1.IsWeak = opts.GetWeak() f.L1.IsLazy = opts.GetLazy() if opts.Packed != nil { f.L1.EditionFeatures.IsPacked = opts.GetPacked() diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go index f3cebab2..ff692436 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go @@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc o.L1.Fields.List = append(o.L1.Fields.List, f) } - if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil { + if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil { return errors.New("message field %q cannot resolve type: %v", f.FullName(), err) } if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) { @@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) { for i, xd := range xds { x := &xs[i] - if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil { + if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil { return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err) } - if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil { + if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil { return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err) } if xd.DefaultValue != nil { @@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc s := &ss[i] for j, md := range sd.GetMethod() { m := &s.L2.Methods.List[j] - m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false) + m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType())) if err != nil { return errors.New("service method %q cannot resolve input: %v", m.FullName(), err) } - m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false) + m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType())) if err != nil { return errors.New("service method %q cannot resolve output: %v", m.FullName(), err) } @@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc // findTarget finds an enum or message descriptor if k is an enum, message, // group, or unknown. If unknown, and the name could be resolved, the kind // returned kind is set based on the type of the resolved descriptor. -func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { +func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) { switch k { case protoreflect.EnumKind: - ed, err := r.findEnumDescriptor(scope, ref, isWeak) + ed, err := r.findEnumDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } return k, ed, nil, nil case protoreflect.MessageKind, protoreflect.GroupKind: - md, err := r.findMessageDescriptor(scope, ref, isWeak) + md, err := r.findMessageDescriptor(scope, ref) if err != nil { return 0, nil, nil, err } @@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, // Handle unspecified kinds (possible with parsers that operate // on a per-file basis without knowledge of dependencies). d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return 0, nil, nil, errors.New("%q not found", ref.FullName()) @@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName) } } -func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) { +func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderEnum(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) @@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa return ed, nil } -func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) { +func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) { d, err := r.findDescriptor(scope, ref) - if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) { + if err == protoregistry.NotFound && r.allowUnresolvable { return filedesc.PlaceholderMessage(ref.FullName()), nil } else if err == protoregistry.NotFound { return nil, errors.New("%q not found", ref.FullName()) diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go index 6de31c2e..c343d922 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go @@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName()) } } - if f.IsWeak() && !flags.ProtoLegacy { - return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName()) - } - if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) { - return errors.New("message field %q may only be weak for an optional message", f.FullName()) - } if f.IsPacked() && !isPackable(f) { return errors.New("message field %q is not packable", f.FullName()) } @@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds if f.Cardinality() != protoreflect.Optional { return errors.New("message field %q belongs in a oneof and must be optional", f.FullName()) } - if f.IsWeak() { - return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName()) - } } } @@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number()) } } - if xd.GetOptions().GetWeak() { - return errors.New("extension field %q cannot be a weak reference", x.FullName()) - } if x.IsPacked() && !isPackable(x) { return errors.New("extension field %q is not packable", x.FullName()) } diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go index bf0a0ccd..697a61b2 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go @@ -11,6 +11,7 @@ import ( "google.golang.org/protobuf/internal/editiondefaults" "google.golang.org/protobuf/internal/filedesc" + "google.golang.org/protobuf/internal/genid" "google.golang.org/protobuf/proto" "google.golang.org/protobuf/reflect/protoreflect" "google.golang.org/protobuf/types/descriptorpb" @@ -125,16 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW } - if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil { - if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil { - parentFS.GenerateLegacyUnmarshalJSON = *luje - } - if sep := goFeatures.StripEnumPrefix; sep != nil { - parentFS.StripEnumPrefix = int(*sep) - } - if al := goFeatures.ApiLevel; al != nil { - parentFS.APILevel = int(*al) - } + // We must not use proto.GetExtension(child, gofeaturespb.E_Go) + // because that only works for messages we generated, but not for + // dynamicpb messages. See golang/protobuf#1669. + // + // Further, we harden this code against adversarial inputs: a + // service which accepts descriptors from a possibly malicious + // source shouldn't crash. + goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor()) + if !goFeatures.IsValid() { + return parentFS + } + gf, ok := goFeatures.Interface().(protoreflect.Message) + if !ok { + return parentFS + } + // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures. + fields := gf.Descriptor().Fields() + + if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.BoolKind && + gf.Has(fd) { + parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool() + } + + if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.StripEnumPrefix = int(gf.Get(fd).Enum()) + } + + if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil && + !fd.IsList() && + fd.Kind() == protoreflect.EnumKind && + gf.Has(fd) { + parentFS.APILevel = int(gf.Get(fd).Enum()) } return parentFS diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go index a5de8d40..9b880aa8 100644 --- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go +++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go @@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD if imp.IsPublic { p.PublicDependency = append(p.PublicDependency, int32(i)) } - if imp.IsWeak { - p.WeakDependency = append(p.WeakDependency, int32(i)) - } } for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ { loc := locs.Get(i) diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go index cd8fadba..cd7fbc87 100644 --- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go +++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go @@ -68,7 +68,7 @@ type Descriptor interface { // dependency is not resolved, in which case only name information is known. // // Placeholder types may only be returned by the following accessors - // as a result of unresolved dependencies or weak imports: + // as a result of unresolved dependencies: // // ╔═══════════════════════════════════╤═════════════════════╗ // ║ Accessor │ Descriptor ║ @@ -168,11 +168,7 @@ type FileImport struct { // The current file and the imported file must be within proto package. IsPublic bool - // IsWeak reports whether this is a weak import, which does not impose - // a direct dependency on the target file. - // - // Weak imports are a legacy proto1 feature. Equivalent behavior is - // achieved using proto2 extension fields or proto3 Any messages. + // Deprecated: support for weak fields has been removed. IsWeak bool } @@ -325,9 +321,7 @@ type FieldDescriptor interface { // specified in the source .proto file. HasOptionalKeyword() bool - // IsWeak reports whether this is a weak field, which does not impose a - // direct dependency on the target type. - // If true, then Message returns a placeholder type. + // Deprecated: support for weak fields has been removed. IsWeak() bool // IsPacked reports whether repeated primitive numeric kinds should be diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go index a551e7ae..a5163376 100644 --- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go +++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go @@ -46,6 +46,7 @@ import ( protoimpl "google.golang.org/protobuf/runtime/protoimpl" reflect "reflect" sync "sync" + unsafe "unsafe" ) // The full set of known editions. @@ -4360,7 +4361,7 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor -var file_google_protobuf_descriptor_proto_rawDesc = []byte{ +var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{ 0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, @@ -5130,16 +5131,16 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{ 0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65, 0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e, -} +}) var ( file_google_protobuf_descriptor_proto_rawDescOnce sync.Once - file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc + file_google_protobuf_descriptor_proto_rawDescData []byte ) func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte { file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() { - file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData) + file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc))) }) return file_google_protobuf_descriptor_proto_rawDescData } @@ -5292,7 +5293,7 @@ func file_google_protobuf_descriptor_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)), NumEnums: 17, NumMessages: 33, NumExtensions: 0, @@ -5304,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() { MessageInfos: file_google_protobuf_descriptor_proto_msgTypes, }.Build() File_google_protobuf_descriptor_proto = out.File - file_google_protobuf_descriptor_proto_rawDesc = nil file_google_protobuf_descriptor_proto_goTypes = nil file_google_protobuf_descriptor_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go index e0b72eaf..28d24bad 100644 --- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go +++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go @@ -16,6 +16,7 @@ import ( descriptorpb "google.golang.org/protobuf/types/descriptorpb" reflect "reflect" sync "sync" + unsafe "unsafe" ) type GoFeatures_APILevel int32 @@ -227,7 +228,7 @@ var ( var File_google_protobuf_go_features_proto protoreflect.FileDescriptor -var file_google_protobuf_go_features_proto_rawDesc = []byte{ +var file_google_protobuf_go_features_proto_rawDesc = string([]byte{ 0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, @@ -283,16 +284,16 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{ 0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x70, 0x62, -} +}) var ( file_google_protobuf_go_features_proto_rawDescOnce sync.Once - file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc + file_google_protobuf_go_features_proto_rawDescData []byte ) func file_google_protobuf_go_features_proto_rawDescGZIP() []byte { file_google_protobuf_go_features_proto_rawDescOnce.Do(func() { - file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData) + file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc))) }) return file_google_protobuf_go_features_proto_rawDescData } @@ -326,7 +327,7 @@ func file_google_protobuf_go_features_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_go_features_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)), NumEnums: 2, NumMessages: 1, NumExtensions: 1, @@ -339,7 +340,6 @@ func file_google_protobuf_go_features_proto_init() { ExtensionInfos: file_google_protobuf_go_features_proto_extTypes, }.Build() File_google_protobuf_go_features_proto = out.File - file_google_protobuf_go_features_proto_rawDesc = nil file_google_protobuf_go_features_proto_goTypes = nil file_google_protobuf_go_features_proto_depIdxs = nil } diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go index 191552cc..497da66e 100644 --- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go +++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go @@ -122,6 +122,7 @@ import ( reflect "reflect" strings "strings" sync "sync" + unsafe "unsafe" ) // `Any` contains an arbitrary serialized protocol buffer message along with a @@ -411,7 +412,7 @@ func (x *Any) GetValue() []byte { var File_google_protobuf_any_proto protoreflect.FileDescriptor -var file_google_protobuf_any_proto_rawDesc = []byte{ +var file_google_protobuf_any_proto_rawDesc = string([]byte{ 0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03, @@ -427,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{ 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65, 0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x33, -} +}) var ( file_google_protobuf_any_proto_rawDescOnce sync.Once - file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc + file_google_protobuf_any_proto_rawDescData []byte ) func file_google_protobuf_any_proto_rawDescGZIP() []byte { file_google_protobuf_any_proto_rawDescOnce.Do(func() { - file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData) + file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc))) }) return file_google_protobuf_any_proto_rawDescData } @@ -462,7 +463,7 @@ func file_google_protobuf_any_proto_init() { out := protoimpl.TypeBuilder{ File: protoimpl.DescBuilder{ GoPackagePath: reflect.TypeOf(x{}).PkgPath(), - RawDescriptor: file_google_protobuf_any_proto_rawDesc, + RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)), NumEnums: 0, NumMessages: 1, NumExtensions: 0, @@ -473,7 +474,6 @@ func file_google_protobuf_any_proto_init() { MessageInfos: file_google_protobuf_any_proto_msgTypes, }.Build() File_google_protobuf_any_proto = out.File - file_google_protobuf_any_proto_rawDesc = nil file_google_protobuf_any_proto_goTypes = nil file_google_protobuf_any_proto_depIdxs = nil } diff --git a/vendor/modules.txt b/vendor/modules.txt index 44331d0b..75c0ca6a 100644 --- a/vendor/modules.txt +++ b/vendor/modules.txt @@ -1,7 +1,7 @@ # github.com/blang/semver v3.5.1+incompatible ## explicit github.com/blang/semver -# github.com/containernetworking/cni v1.2.3 +# github.com/containernetworking/cni v1.3.0 ## explicit; go 1.21 github.com/containernetworking/cni/pkg/ns github.com/containernetworking/cni/pkg/skel @@ -13,7 +13,7 @@ github.com/containernetworking/cni/pkg/types/create github.com/containernetworking/cni/pkg/types/internal github.com/containernetworking/cni/pkg/utils github.com/containernetworking/cni/pkg/version -# github.com/containernetworking/plugins v1.6.1 +# github.com/containernetworking/plugins v1.6.2 ## explicit; go 1.23 github.com/containernetworking/plugins/pkg/ns github.com/containernetworking/plugins/pkg/testutils @@ -26,15 +26,15 @@ github.com/emicklei/go-restful/v3 github.com/emicklei/go-restful/v3/log # github.com/evanphx/json-patch v5.6.0+incompatible ## explicit -# github.com/fsnotify/fsnotify v1.8.0 +# github.com/fsnotify/fsnotify v1.9.0 ## explicit; go 1.17 github.com/fsnotify/fsnotify github.com/fsnotify/fsnotify/internal # github.com/fxamacker/cbor/v2 v2.7.0 ## explicit; go 1.17 github.com/fxamacker/cbor/v2 -# github.com/go-co-op/gocron/v2 v2.12.4 -## explicit; go 1.20 +# github.com/go-co-op/gocron/v2 v2.16.1 +## explicit; go 1.21.0 github.com/go-co-op/gocron/v2 # github.com/go-logr/logr v1.4.2 ## explicit; go 1.18 @@ -66,8 +66,8 @@ github.com/google/gnostic-models/extensions github.com/google/gnostic-models/jsonschema github.com/google/gnostic-models/openapiv2 github.com/google/gnostic-models/openapiv3 -# github.com/google/go-cmp v0.6.0 -## explicit; go 1.13 +# github.com/google/go-cmp v0.7.0 +## explicit; go 1.21 github.com/google/go-cmp/cmp github.com/google/go-cmp/cmp/internal/diff github.com/google/go-cmp/cmp/internal/flags @@ -83,8 +83,8 @@ github.com/google/uuid # github.com/imdario/mergo v0.3.16 ## explicit; go 1.13 github.com/imdario/mergo -# github.com/jonboulle/clockwork v0.4.0 -## explicit; go 1.15 +# github.com/jonboulle/clockwork v0.5.0 +## explicit; go 1.21 github.com/jonboulle/clockwork # github.com/josharian/intern v1.0.0 ## explicit; go 1.5 @@ -92,7 +92,7 @@ github.com/josharian/intern # github.com/json-iterator/go v1.1.12 ## explicit; go 1.12 github.com/json-iterator/go -# github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.0 +# github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6 ## explicit; go 1.21 github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1 @@ -150,8 +150,8 @@ github.com/onsi/ginkgo/reporters/stenographer github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty github.com/onsi/ginkgo/types -# github.com/onsi/gomega v1.36.2 -## explicit; go 1.22.0 +# github.com/onsi/gomega v1.37.0 +## explicit; go 1.23.0 github.com/onsi/gomega github.com/onsi/gomega/format github.com/onsi/gomega/internal @@ -178,12 +178,7 @@ github.com/vishvananda/netns # github.com/x448/float16 v0.8.4 ## explicit; go 1.11 github.com/x448/float16 -# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8 -## explicit; go 1.20 -golang.org/x/exp/constraints -golang.org/x/exp/maps -golang.org/x/exp/slices -# golang.org/x/mod v0.22.0 +# golang.org/x/mod v0.23.0 ## explicit; go 1.22.0 golang.org/x/mod/internal/lazyregexp golang.org/x/mod/module @@ -237,10 +232,10 @@ golang.org/x/text/secure/bidirule golang.org/x/text/transform golang.org/x/text/unicode/bidi golang.org/x/text/unicode/norm -# golang.org/x/time v0.8.0 -## explicit; go 1.18 +# golang.org/x/time v0.11.0 +## explicit; go 1.23.0 golang.org/x/time/rate -# golang.org/x/tools v0.28.0 +# golang.org/x/tools v0.30.0 ## explicit; go 1.22.0 golang.org/x/tools/go/ast/astutil golang.org/x/tools/go/gcexportdata @@ -264,10 +259,10 @@ golang.org/x/tools/internal/stdlib golang.org/x/tools/internal/typeparams golang.org/x/tools/internal/typesinternal golang.org/x/tools/internal/versions -# gomodules.xyz/jsonpatch/v2 v2.4.0 +# gomodules.xyz/jsonpatch/v2 v2.5.0 ## explicit; go 1.20 gomodules.xyz/jsonpatch/v2 -# google.golang.org/protobuf v1.36.1 +# google.golang.org/protobuf v1.36.5 ## explicit; go 1.21 google.golang.org/protobuf/encoding/prototext google.golang.org/protobuf/encoding/protowire