.\nBe explicit and use BeNil() instead. This is to avoid mistakes where both sides of an assertion are erroneously uninitialized.")
}
@@ -27,7 +27,7 @@ func (matcher *EqualMatcher) Match(actual interface{}) (success bool, err error)
return reflect.DeepEqual(actual, matcher.Expected), nil
}
-func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) FailureMessage(actual any) (message string) {
actualString, actualOK := actual.(string)
expectedString, expectedOK := matcher.Expected.(string)
if actualOK && expectedOK {
@@ -37,6 +37,6 @@ func (matcher *EqualMatcher) FailureMessage(actual interface{}) (message string)
return format.Message(actual, "to equal", matcher.Expected)
}
-func (matcher *EqualMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *EqualMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to equal", matcher.Expected)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
index 9856752f..a4fcfc42 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_cap_matcher.go
@@ -12,7 +12,7 @@ type HaveCapMatcher struct {
Count int
}
-func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveCapMatcher) Match(actual any) (success bool, err error) {
length, ok := capOf(actual)
if !ok {
return false, fmt.Errorf("HaveCap matcher expects a array/channel/slice. Got:\n%s", format.Object(actual, 1))
@@ -21,10 +21,10 @@ func (matcher *HaveCapMatcher) Match(actual interface{}) (success bool, err erro
return length == matcher.Count, nil
}
-func (matcher *HaveCapMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have capacity %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveCapMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveCapMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have capacity %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
index 4111f2b8..4c45063b 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_each_matcher.go
@@ -9,10 +9,10 @@ import (
)
type HaveEachMatcher struct {
- Element interface{}
+ Element any
}
-func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveEachMatcher) Match(actual any) (success bool, err error) {
if !isArrayOrSlice(actual) && !isMap(actual) && !miter.IsIter(actual) {
return false, fmt.Errorf("HaveEach matcher expects an array/slice/map/iter.Seq/iter.Seq2. Got:\n%s",
format.Object(actual, 1))
@@ -61,14 +61,14 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
format.Object(actual, 1))
}
- var valueAt func(int) interface{}
+ var valueAt func(int) any
if isMap(actual) {
keys := value.MapKeys()
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.MapIndex(keys[i]).Interface()
}
} else {
- valueAt = func(i int) interface{} {
+ valueAt = func(i int) any {
return value.Index(i).Interface()
}
}
@@ -89,11 +89,11 @@ func (matcher *HaveEachMatcher) Match(actual interface{}) (success bool, err err
}
// FailureMessage returns a suitable failure message.
-func (matcher *HaveEachMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to contain element matching", matcher.Element)
}
// NegatedFailureMessage returns a suitable negated failure message.
-func (matcher *HaveEachMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveEachMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain element matching", matcher.Element)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
index 23799f1c..8b2d297c 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_exact_elements.go
@@ -14,13 +14,13 @@ type mismatchFailure struct {
}
type HaveExactElementsMatcher struct {
- Elements []interface{}
+ Elements []any
mismatchFailures []mismatchFailure
missingIndex int
extraIndex int
}
-func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExactElementsMatcher) Match(actual any) (success bool, err error) {
matcher.resetState()
if isMap(actual) || miter.IsSeq2(actual) {
@@ -108,7 +108,7 @@ func (matcher *HaveExactElementsMatcher) Match(actual interface{}) (success bool
return success, nil
}
-func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) FailureMessage(actual any) (message string) {
message = format.Message(actual, "to have exact elements with", presentable(matcher.Elements))
if matcher.missingIndex > 0 {
message = fmt.Sprintf("%s\nthe missing elements start from index %d", message, matcher.missingIndex)
@@ -125,7 +125,7 @@ func (matcher *HaveExactElementsMatcher) FailureMessage(actual interface{}) (mes
return
}
-func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExactElementsMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to contain elements", presentable(matcher.Elements))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
index b5701874..a5a028e9 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_existing_field_matcher.go
@@ -11,7 +11,7 @@ type HaveExistingFieldMatcher struct {
Field string
}
-func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveExistingFieldMatcher) Match(actual any) (success bool, err error) {
// we don't care about the field's actual value, just about any error in
// trying to find the field (or method).
_, err = extractField(actual, matcher.Field, "HaveExistingField")
@@ -27,10 +27,10 @@ func (matcher *HaveExistingFieldMatcher) Match(actual interface{}) (success bool
return false, err
}
-func (matcher *HaveExistingFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have field '%s'", format.Object(actual, 1), matcher.Field)
}
-func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveExistingFieldMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have field '%s'", format.Object(actual, 1), matcher.Field)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_field.go b/vendor/github.com/onsi/gomega/matchers/have_field.go
index 293457e8..d9fbeaf7 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_field.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_field.go
@@ -17,7 +17,7 @@ func (e missingFieldError) Error() string {
return string(e)
}
-func extractField(actual interface{}, field string, matchername string) (any, error) {
+func extractField(actual any, field string, matchername string) (any, error) {
fields := strings.SplitN(field, ".", 2)
actualValue := reflect.ValueOf(actual)
@@ -68,7 +68,7 @@ func extractField(actual interface{}, field string, matchername string) (any, er
type HaveFieldMatcher struct {
Field string
- Expected interface{}
+ Expected any
}
func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
@@ -80,7 +80,7 @@ func (matcher *HaveFieldMatcher) expectedMatcher() omegaMatcher {
return expectedMatcher
}
-func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveFieldMatcher) Match(actual any) (success bool, err error) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
return false, err
@@ -89,7 +89,7 @@ func (matcher *HaveFieldMatcher) Match(actual interface{}) (success bool, err er
return matcher.expectedMatcher().Match(extractedField)
}
-func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) FailureMessage(actual any) (message string) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
// this really shouldn't happen
@@ -101,7 +101,7 @@ func (matcher *HaveFieldMatcher) FailureMessage(actual interface{}) (message str
return message
}
-func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveFieldMatcher) NegatedFailureMessage(actual any) (message string) {
extractedField, err := extractField(actual, matcher.Field, "HaveField")
if err != nil {
// this really shouldn't happen
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
index d14d9e5f..2d561b9a 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_body_matcher.go
@@ -11,12 +11,12 @@ import (
)
type HaveHTTPBodyMatcher struct {
- Expected interface{}
- cachedResponse interface{}
+ Expected any
+ cachedResponse any
cachedBody []byte
}
-func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
+func (matcher *HaveHTTPBodyMatcher) Match(actual any) (bool, error) {
body, err := matcher.body(actual)
if err != nil {
return false, err
@@ -34,7 +34,7 @@ func (matcher *HaveHTTPBodyMatcher) Match(actual interface{}) (bool, error) {
}
}
-func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -52,7 +52,7 @@ func (matcher *HaveHTTPBodyMatcher) FailureMessage(actual interface{}) (message
}
}
-func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual any) (message string) {
body, err := matcher.body(actual)
if err != nil {
return fmt.Sprintf("failed to read body: %s", err)
@@ -73,7 +73,7 @@ func (matcher *HaveHTTPBodyMatcher) NegatedFailureMessage(actual interface{}) (m
// body returns the body. It is cached because once we read it in Match()
// the Reader is closed and it is not readable again in FailureMessage()
// or NegatedFailureMessage()
-func (matcher *HaveHTTPBodyMatcher) body(actual interface{}) ([]byte, error) {
+func (matcher *HaveHTTPBodyMatcher) body(actual any) ([]byte, error) {
if matcher.cachedResponse == actual && matcher.cachedBody != nil {
return matcher.cachedBody, nil
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
index c256f452..75672265 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_header_with_value_matcher.go
@@ -11,10 +11,10 @@ import (
type HaveHTTPHeaderWithValueMatcher struct {
Header string
- Value interface{}
+ Value any
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual any) (success bool, err error) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
return false, err
@@ -28,7 +28,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) Match(actual interface{}) (succes
return headerMatcher.Match(headerValue)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}) string {
+func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual any) string {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -43,7 +43,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) FailureMessage(actual interface{}
return fmt.Sprintf("HTTP header %q:\n%s", matcher.Header, diff)
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
headerValue, err := matcher.extractHeader(actual)
if err != nil {
panic(err) // protected by Match()
@@ -69,7 +69,7 @@ func (matcher *HaveHTTPHeaderWithValueMatcher) getSubMatcher() (types.GomegaMatc
}
}
-func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual interface{}) (string, error) {
+func (matcher *HaveHTTPHeaderWithValueMatcher) extractHeader(actual any) (string, error) {
switch r := actual.(type) {
case *http.Response:
return r.Header.Get(matcher.Header), nil
diff --git a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
index 0f66e46e..8b25b3a9 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_http_status_matcher.go
@@ -12,10 +12,10 @@ import (
)
type HaveHTTPStatusMatcher struct {
- Expected []interface{}
+ Expected []any
}
-func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveHTTPStatusMatcher) Match(actual any) (success bool, err error) {
var resp *http.Response
switch a := actual.(type) {
case *http.Response:
@@ -48,11 +48,11 @@ func (matcher *HaveHTTPStatusMatcher) Match(actual interface{}) (success bool, e
return false, nil
}
-func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "to have HTTP status", matcher.expectedString())
}
-func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveHTTPStatusMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\n%s\n%s", formatHttpResponse(actual), "not to have HTTP status", matcher.expectedString())
}
@@ -64,7 +64,7 @@ func (matcher *HaveHTTPStatusMatcher) expectedString() string {
return strings.Join(lines, "\n")
}
-func formatHttpResponse(input interface{}) string {
+func formatHttpResponse(input any) string {
var resp *http.Response
switch r := input.(type) {
case *http.Response:
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
index b62ee93c..9e16dcf5 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_matcher.go
@@ -11,10 +11,10 @@ import (
)
type HaveKeyMatcher struct {
- Key interface{}
+ Key any
}
-func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveKeyMatcher) Match(actual any) (success bool, err error) {
if !isMap(actual) && !miter.IsSeq2(actual) {
return false, fmt.Errorf("HaveKey matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
@@ -52,7 +52,7 @@ func (matcher *HaveKeyMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) FailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "to have key matching", matcher.Key)
@@ -61,7 +61,7 @@ func (matcher *HaveKeyMatcher) FailureMessage(actual interface{}) (message strin
}
}
-func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyMatcher) NegatedFailureMessage(actual any) (message string) {
switch matcher.Key.(type) {
case omegaMatcher:
return format.Message(actual, "not to have key matching", matcher.Key)
diff --git a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
index 3d608f63..1c53f1e5 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_key_with_value_matcher.go
@@ -11,11 +11,11 @@ import (
)
type HaveKeyWithValueMatcher struct {
- Key interface{}
- Value interface{}
+ Key any
+ Value any
}
-func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveKeyWithValueMatcher) Match(actual any) (success bool, err error) {
if !isMap(actual) && !miter.IsSeq2(actual) {
return false, fmt.Errorf("HaveKeyWithValue matcher expects a map/iter.Seq2. Got:%s", format.Object(actual, 1))
}
@@ -70,7 +70,7 @@ func (matcher *HaveKeyWithValueMatcher) Match(actual interface{}) (success bool,
return false, nil
}
-func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual any) (message string) {
str := "to have {key: value}"
if _, ok := matcher.Key.(omegaMatcher); ok {
str += " matching"
@@ -78,12 +78,12 @@ func (matcher *HaveKeyWithValueMatcher) FailureMessage(actual interface{}) (mess
str += " matching"
}
- expect := make(map[interface{}]interface{}, 1)
+ expect := make(map[any]any, 1)
expect[matcher.Key] = matcher.Value
return format.Message(actual, str, expect)
}
-func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveKeyWithValueMatcher) NegatedFailureMessage(actual any) (message string) {
kStr := "not to have key"
if _, ok := matcher.Key.(omegaMatcher); ok {
kStr = "not to have key matching"
diff --git a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
index ca25713f..c334d4c0 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_len_matcher.go
@@ -10,7 +10,7 @@ type HaveLenMatcher struct {
Count int
}
-func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveLenMatcher) Match(actual any) (success bool, err error) {
length, ok := lengthOf(actual)
if !ok {
return false, fmt.Errorf("HaveLen matcher expects a string/array/map/channel/slice/iterator. Got:\n%s", format.Object(actual, 1))
@@ -19,10 +19,10 @@ func (matcher *HaveLenMatcher) Match(actual interface{}) (success bool, err erro
return length == matcher.Count, nil
}
-func (matcher *HaveLenMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nto have length %d", format.Object(actual, 1), matcher.Count)
}
-func (matcher *HaveLenMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveLenMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected\n%s\nnot to have length %d", format.Object(actual, 1), matcher.Count)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
index 22a1b673..a240f1a1 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_occurred_matcher.go
@@ -11,7 +11,7 @@ import (
type HaveOccurredMatcher struct {
}
-func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveOccurredMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return false, nil
@@ -26,10 +26,10 @@ func (matcher *HaveOccurredMatcher) Match(actual interface{}) (success bool, err
return !isNil(actual), nil
}
-func (matcher *HaveOccurredMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) FailureMessage(actual any) (message string) {
return fmt.Sprintf("Expected an error to have occurred. Got:\n%s", format.Object(actual, 1))
}
-func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveOccurredMatcher) NegatedFailureMessage(actual any) (message string) {
return fmt.Sprintf("Unexpected error:\n%s\n%s", format.Object(actual, 1), "occurred")
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
index 1d8e8027..7987d41f 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_prefix_matcher.go
@@ -8,10 +8,10 @@ import (
type HavePrefixMatcher struct {
Prefix string
- Args []interface{}
+ Args []any
}
-func (matcher *HavePrefixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HavePrefixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HavePrefix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HavePrefixMatcher) prefix() string {
return matcher.Prefix
}
-func (matcher *HavePrefixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have prefix", matcher.prefix())
}
-func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HavePrefixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have prefix", matcher.prefix())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
index 40a3526e..2aa4ceac 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_suffix_matcher.go
@@ -8,10 +8,10 @@ import (
type HaveSuffixMatcher struct {
Suffix string
- Args []interface{}
+ Args []any
}
-func (matcher *HaveSuffixMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *HaveSuffixMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("HaveSuffix matcher requires a string or stringer. Got:\n%s", format.Object(actual, 1))
@@ -27,10 +27,10 @@ func (matcher *HaveSuffixMatcher) suffix() string {
return matcher.Suffix
}
-func (matcher *HaveSuffixMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to have suffix", matcher.suffix())
}
-func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *HaveSuffixMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to have suffix", matcher.suffix())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/have_value.go b/vendor/github.com/onsi/gomega/matchers/have_value.go
index f6725283..4c39e0db 100644
--- a/vendor/github.com/onsi/gomega/matchers/have_value.go
+++ b/vendor/github.com/onsi/gomega/matchers/have_value.go
@@ -12,10 +12,10 @@ const maxIndirections = 31
type HaveValueMatcher struct {
Matcher types.GomegaMatcher // the matcher to apply to the "resolved" actual value.
- resolvedActual interface{} // the ("resolved") value.
+ resolvedActual any // the ("resolved") value.
}
-func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
+func (m *HaveValueMatcher) Match(actual any) (bool, error) {
val := reflect.ValueOf(actual)
for allowedIndirs := maxIndirections; allowedIndirs > 0; allowedIndirs-- {
// return an error if value isn't valid. Please note that we cannot
@@ -45,10 +45,10 @@ func (m *HaveValueMatcher) Match(actual interface{}) (bool, error) {
return false, errors.New(format.Message(actual, "too many indirections"))
}
-func (m *HaveValueMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.resolvedActual)
}
-func (m *HaveValueMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *HaveValueMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.resolvedActual)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
index c539dd38..f9d31377 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_error_matcher.go
@@ -71,14 +71,14 @@ func (matcher *MatchErrorMatcher) Match(actual any) (success bool, err error) {
format.Object(expected, 1))
}
-func (matcher *MatchErrorMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) FailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("to match error function %s", matcher.FuncErrDescription[0]))
}
return format.Message(actual, "to match error", matcher.Expected)
}
-func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchErrorMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.isFunc {
return format.Message(actual, fmt.Sprintf("not to match error function %s", matcher.FuncErrDescription[0]))
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
index f962f139..331f289a 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_json_matcher.go
@@ -9,18 +9,18 @@ import (
)
type MatchJSONMatcher struct {
- JSONToMatch interface{}
- firstFailurePath []interface{}
+ JSONToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchJSONMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.prettyPrint(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
// this is guarded by prettyPrint
json.Unmarshal([]byte(actualString), &aval)
@@ -30,17 +30,17 @@ func (matcher *MatchJSONMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchJSONMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchJSONMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.prettyPrint(actual)
return formattedMessage(format.Message(actualString, "not to match JSON of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchJSONMatcher) prettyPrint(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchJSONMatcher) prettyPrint(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchJSONMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
index adac5db6..779be683 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_regexp_matcher.go
@@ -9,10 +9,10 @@ import (
type MatchRegexpMatcher struct {
Regexp string
- Args []interface{}
+ Args []any
}
-func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchRegexpMatcher) Match(actual any) (success bool, err error) {
actualString, ok := toString(actual)
if !ok {
return false, fmt.Errorf("RegExp matcher requires a string or stringer.\nGot:%s", format.Object(actual, 1))
@@ -26,11 +26,11 @@ func (matcher *MatchRegexpMatcher) Match(actual interface{}) (success bool, err
return match, nil
}
-func (matcher *MatchRegexpMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to match regular expression", matcher.regexp())
}
-func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchRegexpMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "not to match regular expression", matcher.regexp())
}
diff --git a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
index 5c815f5a..f7dcaf6f 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_xml_matcher.go
@@ -15,10 +15,10 @@ import (
)
type MatchXMLMatcher struct {
- XMLToMatch interface{}
+ XMLToMatch any
}
-func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchXMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.formattedPrint(actual)
if err != nil {
return false, err
@@ -37,17 +37,17 @@ func (matcher *MatchXMLMatcher) Match(actual interface{}) (success bool, err err
return reflect.DeepEqual(aval, eval), nil
}
-func (matcher *MatchXMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nto match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchXMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.formattedPrint(actual)
return fmt.Sprintf("Expected\n%s\nnot to match XML of\n%s", actualString, expectedString)
}
-func (matcher *MatchXMLMatcher) formattedPrint(actual interface{}) (actualString, expectedString string, err error) {
+func (matcher *MatchXMLMatcher) formattedPrint(actual any) (actualString, expectedString string, err error) {
var ok bool
actualString, ok = toString(actual)
if !ok {
diff --git a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
index 2cb6b47d..95057c26 100644
--- a/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/match_yaml_matcher.go
@@ -9,18 +9,18 @@ import (
)
type MatchYAMLMatcher struct {
- YAMLToMatch interface{}
- firstFailurePath []interface{}
+ YAMLToMatch any
+ firstFailurePath []any
}
-func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *MatchYAMLMatcher) Match(actual any) (success bool, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
if err != nil {
return false, err
}
- var aval interface{}
- var eval interface{}
+ var aval any
+ var eval any
if err := yaml.Unmarshal([]byte(actualString), &aval); err != nil {
return false, fmt.Errorf("Actual '%s' should be valid YAML, but it is not.\nUnderlying error:%s", actualString, err)
@@ -34,23 +34,23 @@ func (matcher *MatchYAMLMatcher) Match(actual interface{}) (success bool, err er
return equal, nil
}
-func (matcher *MatchYAMLMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) FailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *MatchYAMLMatcher) NegatedFailureMessage(actual any) (message string) {
actualString, expectedString, _ := matcher.toNormalisedStrings(actual)
return formattedMessage(format.Message(actualString, "not to match YAML of", expectedString), matcher.firstFailurePath)
}
-func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toNormalisedStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, expectedString, err := matcher.toStrings(actual)
return normalise(actualString), normalise(expectedString), err
}
func normalise(input string) string {
- var val interface{}
+ var val any
err := yaml.Unmarshal([]byte(input), &val)
if err != nil {
panic(err) // unreachable since Match already calls Unmarshal
@@ -62,7 +62,7 @@ func normalise(input string) string {
return strings.TrimSpace(string(output))
}
-func (matcher *MatchYAMLMatcher) toStrings(actual interface{}) (actualFormatted, expectedFormatted string, err error) {
+func (matcher *MatchYAMLMatcher) toStrings(actual any) (actualFormatted, expectedFormatted string, err error) {
actualString, ok := toString(actual)
if !ok {
return "", "", fmt.Errorf("MatchYAMLMatcher matcher requires a string, stringer, or []byte. Got actual:\n%s", format.Object(actual, 1))
diff --git a/vendor/github.com/onsi/gomega/matchers/not.go b/vendor/github.com/onsi/gomega/matchers/not.go
index 78b71910..c598b789 100644
--- a/vendor/github.com/onsi/gomega/matchers/not.go
+++ b/vendor/github.com/onsi/gomega/matchers/not.go
@@ -8,7 +8,7 @@ type NotMatcher struct {
Matcher types.GomegaMatcher
}
-func (m *NotMatcher) Match(actual interface{}) (bool, error) {
+func (m *NotMatcher) Match(actual any) (bool, error) {
success, err := m.Matcher.Match(actual)
if err != nil {
return false, err
@@ -16,14 +16,14 @@ func (m *NotMatcher) Match(actual interface{}) (bool, error) {
return !success, nil
}
-func (m *NotMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) FailureMessage(actual any) (message string) {
return m.Matcher.NegatedFailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *NotMatcher) NegatedFailureMessage(actual any) (message string) {
return m.Matcher.FailureMessage(actual) // works beautifully
}
-func (m *NotMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *NotMatcher) MatchMayChangeInTheFuture(actual any) bool {
return types.MatchMayChangeInTheFuture(m.Matcher, actual) // just return m.Matcher's value
}
diff --git a/vendor/github.com/onsi/gomega/matchers/or.go b/vendor/github.com/onsi/gomega/matchers/or.go
index 841ae26a..6578404b 100644
--- a/vendor/github.com/onsi/gomega/matchers/or.go
+++ b/vendor/github.com/onsi/gomega/matchers/or.go
@@ -14,7 +14,7 @@ type OrMatcher struct {
firstSuccessfulMatcher types.GomegaMatcher
}
-func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *OrMatcher) Match(actual any) (success bool, err error) {
m.firstSuccessfulMatcher = nil
for _, matcher := range m.Matchers {
success, err := matcher.Match(actual)
@@ -29,16 +29,16 @@ func (m *OrMatcher) Match(actual interface{}) (success bool, err error) {
return false, nil
}
-func (m *OrMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) FailureMessage(actual any) (message string) {
// not the most beautiful list of matchers, but not bad either...
return format.Message(actual, fmt.Sprintf("To satisfy at least one of these matchers: %s", m.Matchers))
}
-func (m *OrMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *OrMatcher) NegatedFailureMessage(actual any) (message string) {
return m.firstSuccessfulMatcher.NegatedFailureMessage(actual)
}
-func (m *OrMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (m *OrMatcher) MatchMayChangeInTheFuture(actual any) bool {
/*
Example with 3 matchers: A, B, C
diff --git a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
index adc8cee6..8be5a7cc 100644
--- a/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/panic_matcher.go
@@ -8,11 +8,11 @@ import (
)
type PanicMatcher struct {
- Expected interface{}
- object interface{}
+ Expected any
+ object any
}
-func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *PanicMatcher) Match(actual any) (success bool, err error) {
if actual == nil {
return false, fmt.Errorf("PanicMatcher expects a non-nil actual.")
}
@@ -52,7 +52,7 @@ func (matcher *PanicMatcher) Match(actual interface{}) (success bool, err error)
return
}
-func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) FailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We wanted any panic to occur, but none did.
return format.Message(actual, "to panic")
@@ -91,7 +91,7 @@ func (matcher *PanicMatcher) FailureMessage(actual interface{}) (message string)
}
}
-func (matcher *PanicMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *PanicMatcher) NegatedFailureMessage(actual any) (message string) {
if matcher.Expected == nil {
// We didn't want any panic to occur, but one did.
return format.Message(actual, fmt.Sprintf("not to panic, but panicked with\n%s", format.Object(matcher.object, 1)))
diff --git a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
index 948164ea..1d9f61d6 100644
--- a/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/receive_matcher.go
@@ -11,12 +11,12 @@ import (
)
type ReceiveMatcher struct {
- Args []interface{}
+ Args []any
receivedValue reflect.Value
channelClosed bool
}
-func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *ReceiveMatcher) Match(actual any) (success bool, err error) {
if !isChan(actual) {
return false, fmt.Errorf("ReceiveMatcher expects a channel. Got:\n%s", format.Object(actual, 1))
}
@@ -30,7 +30,7 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
var subMatcher omegaMatcher
var hasSubMatcher bool
- var resultReference interface{}
+ var resultReference any
// Valid arg formats are as follows, always with optional POINTER before
// optional MATCHER:
@@ -115,8 +115,8 @@ func (matcher *ReceiveMatcher) Match(actual interface{}) (success bool, err erro
return false, nil
}
-func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message string) {
- var matcherArg interface{}
+func (matcher *ReceiveMatcher) FailureMessage(actual any) (message string) {
+ var matcherArg any
if len(matcher.Args) > 0 {
matcherArg = matcher.Args[len(matcher.Args)-1]
}
@@ -136,8 +136,8 @@ func (matcher *ReceiveMatcher) FailureMessage(actual interface{}) (message strin
return format.Message(actual, "to receive something."+closedAddendum)
}
-func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (message string) {
- var matcherArg interface{}
+func (matcher *ReceiveMatcher) NegatedFailureMessage(actual any) (message string) {
+ var matcherArg any
if len(matcher.Args) > 0 {
matcherArg = matcher.Args[len(matcher.Args)-1]
}
@@ -157,7 +157,7 @@ func (matcher *ReceiveMatcher) NegatedFailureMessage(actual interface{}) (messag
return format.Message(actual, "not to receive anything."+closedAddendum)
}
-func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual interface{}) bool {
+func (matcher *ReceiveMatcher) MatchMayChangeInTheFuture(actual any) bool {
if !isChan(actual) {
return false
}
diff --git a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
index ec68fe8b..2adc4825 100644
--- a/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/satisfy_matcher.go
@@ -8,13 +8,13 @@ import (
)
type SatisfyMatcher struct {
- Predicate interface{}
+ Predicate any
// cached type
predicateArgType reflect.Type
}
-func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
+func NewSatisfyMatcher(predicate any) *SatisfyMatcher {
if predicate == nil {
panic("predicate cannot be nil")
}
@@ -35,7 +35,7 @@ func NewSatisfyMatcher(predicate interface{}) *SatisfyMatcher {
}
}
-func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
+func (m *SatisfyMatcher) Match(actual any) (success bool, err error) {
// prepare a parameter to pass to the predicate
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.predicateArgType) {
@@ -57,10 +57,10 @@ func (m *SatisfyMatcher) Match(actual interface{}) (success bool, err error) {
return result[0].Bool(), nil
}
-func (m *SatisfyMatcher) FailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) FailureMessage(actual any) (message string) {
return format.Message(actual, "to satisfy predicate", m.Predicate)
}
-func (m *SatisfyMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (m *SatisfyMatcher) NegatedFailureMessage(actual any) (message string) {
return format.Message(actual, "to not satisfy predicate", m.Predicate)
}
diff --git a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
index 1369c1e8..30dd58f4 100644
--- a/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
+++ b/vendor/github.com/onsi/gomega/matchers/semi_structured_data_support.go
@@ -8,7 +8,7 @@ import (
"strings"
)
-func formattedMessage(comparisonMessage string, failurePath []interface{}) string {
+func formattedMessage(comparisonMessage string, failurePath []any) string {
var diffMessage string
if len(failurePath) == 0 {
diffMessage = ""
@@ -18,7 +18,7 @@ func formattedMessage(comparisonMessage string, failurePath []interface{}) strin
return fmt.Sprintf("%s%s", comparisonMessage, diffMessage)
}
-func formattedFailurePath(failurePath []interface{}) string {
+func formattedFailurePath(failurePath []any) string {
formattedPaths := []string{}
for i := len(failurePath) - 1; i >= 0; i-- {
switch p := failurePath[i].(type) {
@@ -34,33 +34,33 @@ func formattedFailurePath(failurePath []interface{}) string {
return strings.Join(formattedPaths, "")
}
-func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
- var errorPath []interface{}
+func deepEqual(a any, b any) (bool, []any) {
+ var errorPath []any
if reflect.TypeOf(a) != reflect.TypeOf(b) {
return false, errorPath
}
switch a.(type) {
- case []interface{}:
- if len(a.([]interface{})) != len(b.([]interface{})) {
+ case []any:
+ if len(a.([]any)) != len(b.([]any)) {
return false, errorPath
}
- for i, v := range a.([]interface{}) {
- elementEqual, keyPath := deepEqual(v, b.([]interface{})[i])
+ for i, v := range a.([]any) {
+ elementEqual, keyPath := deepEqual(v, b.([]any)[i])
if !elementEqual {
return false, append(keyPath, i)
}
}
return true, errorPath
- case map[interface{}]interface{}:
- if len(a.(map[interface{}]interface{})) != len(b.(map[interface{}]interface{})) {
+ case map[any]any:
+ if len(a.(map[any]any)) != len(b.(map[any]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[interface{}]interface{}) {
- v2, ok := b.(map[interface{}]interface{})[k]
+ for k, v1 := range a.(map[any]any) {
+ v2, ok := b.(map[any]any)[k]
if !ok {
return false, errorPath
}
@@ -71,13 +71,13 @@ func deepEqual(a interface{}, b interface{}) (bool, []interface{}) {
}
return true, errorPath
- case map[string]interface{}:
- if len(a.(map[string]interface{})) != len(b.(map[string]interface{})) {
+ case map[string]any:
+ if len(a.(map[string]any)) != len(b.(map[string]any)) {
return false, errorPath
}
- for k, v1 := range a.(map[string]interface{}) {
- v2, ok := b.(map[string]interface{})[k]
+ for k, v1 := range a.(map[string]any) {
+ v2, ok := b.(map[string]any)[k]
if !ok {
return false, errorPath
}
diff --git a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
index 327350f7..f0b2c4aa 100644
--- a/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
+++ b/vendor/github.com/onsi/gomega/matchers/succeed_matcher.go
@@ -14,7 +14,7 @@ type formattedGomegaError interface {
type SucceedMatcher struct {
}
-func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err error) {
+func (matcher *SucceedMatcher) Match(actual any) (success bool, err error) {
// is purely nil?
if actual == nil {
return true, nil
@@ -29,7 +29,7 @@ func (matcher *SucceedMatcher) Match(actual interface{}) (success bool, err erro
return isNil(actual), nil
}
-func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) FailureMessage(actual any) (message string) {
var fgErr formattedGomegaError
if errors.As(actual.(error), &fgErr) {
return fgErr.FormattedGomegaError()
@@ -37,6 +37,6 @@ func (matcher *SucceedMatcher) FailureMessage(actual interface{}) (message strin
return fmt.Sprintf("Expected success, but got an error:\n%s", format.Object(actual, 1))
}
-func (matcher *SucceedMatcher) NegatedFailureMessage(actual interface{}) (message string) {
+func (matcher *SucceedMatcher) NegatedFailureMessage(actual any) (message string) {
return "Expected failure, but got no error."
}
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
index 830e3082..0d78779d 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/bipartitegraph/bipartitegraph.go
@@ -11,7 +11,7 @@ type BipartiteGraph struct {
Edges EdgeSet
}
-func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(interface{}, interface{}) (bool, error)) (*BipartiteGraph, error) {
+func NewBipartiteGraph(leftValues, rightValues []any, neighbours func(any, any) (bool, error)) (*BipartiteGraph, error) {
left := NodeOrderedSet{}
for i, v := range leftValues {
left = append(left, Node{ID: i, Value: v})
@@ -41,7 +41,7 @@ func NewBipartiteGraph(leftValues, rightValues []interface{}, neighbours func(in
// FreeLeftRight returns left node values and right node values
// of the BipartiteGraph's nodes which are not part of the given edges.
-func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []interface{}) {
+func (bg *BipartiteGraph) FreeLeftRight(edges EdgeSet) (leftValues, rightValues []any) {
for _, node := range bg.Left {
if edges.Free(node) {
leftValues = append(leftValues, node.Value)
diff --git a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
index cd597a2f..66d3578d 100644
--- a/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
+++ b/vendor/github.com/onsi/gomega/matchers/support/goraph/node/node.go
@@ -2,7 +2,7 @@ package node
type Node struct {
ID int
- Value interface{}
+ Value any
}
type NodeOrderedSet []Node
diff --git a/vendor/github.com/onsi/gomega/matchers/type_support.go b/vendor/github.com/onsi/gomega/matchers/type_support.go
index b9440ac7..d020dedc 100644
--- a/vendor/github.com/onsi/gomega/matchers/type_support.go
+++ b/vendor/github.com/onsi/gomega/matchers/type_support.go
@@ -20,16 +20,16 @@ import (
)
type omegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
-func isBool(a interface{}) bool {
+func isBool(a any) bool {
return reflect.TypeOf(a).Kind() == reflect.Bool
}
-func isNumber(a interface{}) bool {
+func isNumber(a any) bool {
if a == nil {
return false
}
@@ -37,22 +37,22 @@ func isNumber(a interface{}) bool {
return reflect.Int <= kind && kind <= reflect.Float64
}
-func isInteger(a interface{}) bool {
+func isInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Int <= kind && kind <= reflect.Int64
}
-func isUnsignedInteger(a interface{}) bool {
+func isUnsignedInteger(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Uint <= kind && kind <= reflect.Uint64
}
-func isFloat(a interface{}) bool {
+func isFloat(a any) bool {
kind := reflect.TypeOf(a).Kind()
return reflect.Float32 <= kind && kind <= reflect.Float64
}
-func toInteger(a interface{}) int64 {
+func toInteger(a any) int64 {
if isInteger(a) {
return reflect.ValueOf(a).Int()
} else if isUnsignedInteger(a) {
@@ -63,7 +63,7 @@ func toInteger(a interface{}) int64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toUnsignedInteger(a interface{}) uint64 {
+func toUnsignedInteger(a any) uint64 {
if isInteger(a) {
return uint64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -74,7 +74,7 @@ func toUnsignedInteger(a interface{}) uint64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func toFloat(a interface{}) float64 {
+func toFloat(a any) float64 {
if isInteger(a) {
return float64(reflect.ValueOf(a).Int())
} else if isUnsignedInteger(a) {
@@ -85,26 +85,26 @@ func toFloat(a interface{}) float64 {
panic(fmt.Sprintf("Expected a number! Got <%T> %#v", a, a))
}
-func isError(a interface{}) bool {
+func isError(a any) bool {
_, ok := a.(error)
return ok
}
-func isChan(a interface{}) bool {
+func isChan(a any) bool {
if isNil(a) {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Chan
}
-func isMap(a interface{}) bool {
+func isMap(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.Map
}
-func isArrayOrSlice(a interface{}) bool {
+func isArrayOrSlice(a any) bool {
if a == nil {
return false
}
@@ -116,14 +116,14 @@ func isArrayOrSlice(a interface{}) bool {
}
}
-func isString(a interface{}) bool {
+func isString(a any) bool {
if a == nil {
return false
}
return reflect.TypeOf(a).Kind() == reflect.String
}
-func toString(a interface{}) (string, bool) {
+func toString(a any) (string, bool) {
aString, isString := a.(string)
if isString {
return aString, true
@@ -147,7 +147,7 @@ func toString(a interface{}) (string, bool) {
return "", false
}
-func lengthOf(a interface{}) (int, bool) {
+func lengthOf(a any) (int, bool) {
if a == nil {
return 0, false
}
@@ -169,7 +169,7 @@ func lengthOf(a interface{}) (int, bool) {
return 0, false
}
}
-func capOf(a interface{}) (int, bool) {
+func capOf(a any) (int, bool) {
if a == nil {
return 0, false
}
@@ -181,7 +181,7 @@ func capOf(a interface{}) (int, bool) {
}
}
-func isNil(a interface{}) bool {
+func isNil(a any) bool {
if a == nil {
return true
}
diff --git a/vendor/github.com/onsi/gomega/matchers/with_transform.go b/vendor/github.com/onsi/gomega/matchers/with_transform.go
index 6f743b1b..6231c3b4 100644
--- a/vendor/github.com/onsi/gomega/matchers/with_transform.go
+++ b/vendor/github.com/onsi/gomega/matchers/with_transform.go
@@ -9,20 +9,20 @@ import (
type WithTransformMatcher struct {
// input
- Transform interface{} // must be a function of one parameter that returns one value and an optional error
+ Transform any // must be a function of one parameter that returns one value and an optional error
Matcher types.GomegaMatcher
// cached value
transformArgType reflect.Type
// state
- transformedValue interface{}
+ transformedValue any
}
// reflect.Type for error
var errorT = reflect.TypeOf((*error)(nil)).Elem()
-func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher) *WithTransformMatcher {
+func NewWithTransformMatcher(transform any, matcher types.GomegaMatcher) *WithTransformMatcher {
if transform == nil {
panic("transform function cannot be nil")
}
@@ -43,7 +43,7 @@ func NewWithTransformMatcher(transform interface{}, matcher types.GomegaMatcher)
}
}
-func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
+func (m *WithTransformMatcher) Match(actual any) (bool, error) {
// prepare a parameter to pass to the Transform function
var param reflect.Value
if actual != nil && reflect.TypeOf(actual).AssignableTo(m.transformArgType) {
@@ -72,15 +72,15 @@ func (m *WithTransformMatcher) Match(actual interface{}) (bool, error) {
return m.Matcher.Match(m.transformedValue)
}
-func (m *WithTransformMatcher) FailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) FailureMessage(_ any) (message string) {
return m.Matcher.FailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) NegatedFailureMessage(_ interface{}) (message string) {
+func (m *WithTransformMatcher) NegatedFailureMessage(_ any) (message string) {
return m.Matcher.NegatedFailureMessage(m.transformedValue)
}
-func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ interface{}) bool {
+func (m *WithTransformMatcher) MatchMayChangeInTheFuture(_ any) bool {
// TODO: Maybe this should always just return true? (Only an issue for non-deterministic transformers.)
//
// Querying the next matcher is fine if the transformer always will return the same value.
diff --git a/vendor/github.com/onsi/gomega/types/types.go b/vendor/github.com/onsi/gomega/types/types.go
index 30f2beed..685a46f3 100644
--- a/vendor/github.com/onsi/gomega/types/types.go
+++ b/vendor/github.com/onsi/gomega/types/types.go
@@ -10,20 +10,20 @@ type GomegaFailHandler func(message string, callerSkip ...int)
// A simple *testing.T interface wrapper
type GomegaTestingT interface {
Helper()
- Fatalf(format string, args ...interface{})
+ Fatalf(format string, args ...any)
}
-// Gomega represents an object that can perform synchronous and assynchronous assertions with Gomega matchers
+// Gomega represents an object that can perform synchronous and asynchronous assertions with Gomega matchers
type Gomega interface {
- Ω(actual interface{}, extra ...interface{}) Assertion
- Expect(actual interface{}, extra ...interface{}) Assertion
- ExpectWithOffset(offset int, actual interface{}, extra ...interface{}) Assertion
+ Ω(actual any, extra ...any) Assertion
+ Expect(actual any, extra ...any) Assertion
+ ExpectWithOffset(offset int, actual any, extra ...any) Assertion
- Eventually(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- EventuallyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Eventually(actualOrCtx any, args ...any) AsyncAssertion
+ EventuallyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
- Consistently(actualOrCtx interface{}, args ...interface{}) AsyncAssertion
- ConsistentlyWithOffset(offset int, actualOrCtx interface{}, args ...interface{}) AsyncAssertion
+ Consistently(actualOrCtx any, args ...any) AsyncAssertion
+ ConsistentlyWithOffset(offset int, actualOrCtx any, args ...any) AsyncAssertion
SetDefaultEventuallyTimeout(time.Duration)
SetDefaultEventuallyPollingInterval(time.Duration)
@@ -37,9 +37,9 @@ type Gomega interface {
//
// For details on writing custom matchers, check out: http://onsi.github.io/gomega/#adding-your-own-matchers
type GomegaMatcher interface {
- Match(actual interface{}) (success bool, err error)
- FailureMessage(actual interface{}) (message string)
- NegatedFailureMessage(actual interface{}) (message string)
+ Match(actual any) (success bool, err error)
+ FailureMessage(actual any) (message string)
+ NegatedFailureMessage(actual any) (message string)
}
/*
@@ -52,10 +52,10 @@ For example, a process' exit code can never change. So, gexec's Exit matcher re
for `MatchMayChangeInTheFuture` until the process exits, at which point it returns `false` forevermore.
*/
type OracleMatcher interface {
- MatchMayChangeInTheFuture(actual interface{}) bool
+ MatchMayChangeInTheFuture(actual any) bool
}
-func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
+func MatchMayChangeInTheFuture(matcher GomegaMatcher, value any) bool {
oracleMatcher, ok := matcher.(OracleMatcher)
if !ok {
return true
@@ -67,8 +67,13 @@ func MatchMayChangeInTheFuture(matcher GomegaMatcher, value interface{}) bool {
// AsyncAssertions are returned by Eventually and Consistently and enable matchers to be polled repeatedly to ensure
// they are eventually satisfied
type AsyncAssertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
+
+ // equivalent to above
+ To(matcher GomegaMatcher, optionalDescription ...any) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...any) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) AsyncAssertion
WithTimeout(interval time.Duration) AsyncAssertion
@@ -76,18 +81,18 @@ type AsyncAssertion interface {
Within(timeout time.Duration) AsyncAssertion
ProbeEvery(interval time.Duration) AsyncAssertion
WithContext(ctx context.Context) AsyncAssertion
- WithArguments(argsToForward ...interface{}) AsyncAssertion
+ WithArguments(argsToForward ...any) AsyncAssertion
MustPassRepeatedly(count int) AsyncAssertion
}
// Assertions are returned by Ω and Expect and enable assertions against Gomega matchers
type Assertion interface {
- Should(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ShouldNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ Should(matcher GomegaMatcher, optionalDescription ...any) bool
+ ShouldNot(matcher GomegaMatcher, optionalDescription ...any) bool
- To(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- ToNot(matcher GomegaMatcher, optionalDescription ...interface{}) bool
- NotTo(matcher GomegaMatcher, optionalDescription ...interface{}) bool
+ To(matcher GomegaMatcher, optionalDescription ...any) bool
+ ToNot(matcher GomegaMatcher, optionalDescription ...any) bool
+ NotTo(matcher GomegaMatcher, optionalDescription ...any) bool
WithOffset(offset int) Assertion
diff --git a/vendor/golang.org/x/exp/LICENSE b/vendor/golang.org/x/exp/LICENSE
deleted file mode 100644
index 6a66aea5..00000000
--- a/vendor/golang.org/x/exp/LICENSE
+++ /dev/null
@@ -1,27 +0,0 @@
-Copyright (c) 2009 The Go Authors. All rights reserved.
-
-Redistribution and use in source and binary forms, with or without
-modification, are permitted provided that the following conditions are
-met:
-
- * Redistributions of source code must retain the above copyright
-notice, this list of conditions and the following disclaimer.
- * Redistributions in binary form must reproduce the above
-copyright notice, this list of conditions and the following disclaimer
-in the documentation and/or other materials provided with the
-distribution.
- * Neither the name of Google Inc. nor the names of its
-contributors may be used to endorse or promote products derived from
-this software without specific prior written permission.
-
-THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
-"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
-LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
-A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
-OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
-SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
-LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
-DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
-THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
-(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
-OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/vendor/golang.org/x/exp/PATENTS b/vendor/golang.org/x/exp/PATENTS
deleted file mode 100644
index 73309904..00000000
--- a/vendor/golang.org/x/exp/PATENTS
+++ /dev/null
@@ -1,22 +0,0 @@
-Additional IP Rights Grant (Patents)
-
-"This implementation" means the copyrightable works distributed by
-Google as part of the Go project.
-
-Google hereby grants to You a perpetual, worldwide, non-exclusive,
-no-charge, royalty-free, irrevocable (except as stated in this section)
-patent license to make, have made, use, offer to sell, sell, import,
-transfer and otherwise run, modify and propagate the contents of this
-implementation of Go, where such license applies only to those patent
-claims, both currently owned or controlled by Google and acquired in
-the future, licensable by Google that are necessarily infringed by this
-implementation of Go. This grant does not include claims that would be
-infringed only as a consequence of further modification of this
-implementation. If you or your agent or exclusive licensee institute or
-order or agree to the institution of patent litigation against any
-entity (including a cross-claim or counterclaim in a lawsuit) alleging
-that this implementation of Go or any code incorporated within this
-implementation of Go constitutes direct or contributory patent
-infringement, or inducement of patent infringement, then any patent
-rights granted to you under this License for this implementation of Go
-shall terminate as of the date such litigation is filed.
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
deleted file mode 100644
index 2c033dff..00000000
--- a/vendor/golang.org/x/exp/constraints/constraints.go
+++ /dev/null
@@ -1,50 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package constraints defines a set of useful constraints to be used
-// with type parameters.
-package constraints
-
-// Signed is a constraint that permits any signed integer type.
-// If future releases of Go add new predeclared signed integer types,
-// this constraint will be modified to include them.
-type Signed interface {
- ~int | ~int8 | ~int16 | ~int32 | ~int64
-}
-
-// Unsigned is a constraint that permits any unsigned integer type.
-// If future releases of Go add new predeclared unsigned integer types,
-// this constraint will be modified to include them.
-type Unsigned interface {
- ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
-}
-
-// Integer is a constraint that permits any integer type.
-// If future releases of Go add new predeclared integer types,
-// this constraint will be modified to include them.
-type Integer interface {
- Signed | Unsigned
-}
-
-// Float is a constraint that permits any floating-point type.
-// If future releases of Go add new predeclared floating-point types,
-// this constraint will be modified to include them.
-type Float interface {
- ~float32 | ~float64
-}
-
-// Complex is a constraint that permits any complex numeric type.
-// If future releases of Go add new predeclared complex numeric types,
-// this constraint will be modified to include them.
-type Complex interface {
- ~complex64 | ~complex128
-}
-
-// Ordered is a constraint that permits any ordered type: any type
-// that supports the operators < <= >= >.
-// If future releases of Go add new ordered types,
-// this constraint will be modified to include them.
-type Ordered interface {
- Integer | Float | ~string
-}
diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go
deleted file mode 100644
index ecc0dabb..00000000
--- a/vendor/golang.org/x/exp/maps/maps.go
+++ /dev/null
@@ -1,94 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package maps defines various functions useful with maps of any type.
-package maps
-
-// Keys returns the keys of the map m.
-// The keys will be in an indeterminate order.
-func Keys[M ~map[K]V, K comparable, V any](m M) []K {
- r := make([]K, 0, len(m))
- for k := range m {
- r = append(r, k)
- }
- return r
-}
-
-// Values returns the values of the map m.
-// The values will be in an indeterminate order.
-func Values[M ~map[K]V, K comparable, V any](m M) []V {
- r := make([]V, 0, len(m))
- for _, v := range m {
- r = append(r, v)
- }
- return r
-}
-
-// Equal reports whether two maps contain the same key/value pairs.
-// Values are compared using ==.
-func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
- if len(m1) != len(m2) {
- return false
- }
- for k, v1 := range m1 {
- if v2, ok := m2[k]; !ok || v1 != v2 {
- return false
- }
- }
- return true
-}
-
-// EqualFunc is like Equal, but compares values using eq.
-// Keys are still compared with ==.
-func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
- if len(m1) != len(m2) {
- return false
- }
- for k, v1 := range m1 {
- if v2, ok := m2[k]; !ok || !eq(v1, v2) {
- return false
- }
- }
- return true
-}
-
-// Clear removes all entries from m, leaving it empty.
-func Clear[M ~map[K]V, K comparable, V any](m M) {
- for k := range m {
- delete(m, k)
- }
-}
-
-// Clone returns a copy of m. This is a shallow clone:
-// the new keys and values are set using ordinary assignment.
-func Clone[M ~map[K]V, K comparable, V any](m M) M {
- // Preserve nil in case it matters.
- if m == nil {
- return nil
- }
- r := make(M, len(m))
- for k, v := range m {
- r[k] = v
- }
- return r
-}
-
-// Copy copies all key/value pairs in src adding them to dst.
-// When a key in src is already present in dst,
-// the value in dst will be overwritten by the value associated
-// with the key in src.
-func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
- for k, v := range src {
- dst[k] = v
- }
-}
-
-// DeleteFunc deletes any key/value pairs from m for which del returns true.
-func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
- for k, v := range m {
- if del(k, v) {
- delete(m, k)
- }
- }
-}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
deleted file mode 100644
index fbf1934a..00000000
--- a/vendor/golang.org/x/exp/slices/cmp.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// min is a version of the predeclared function from the Go 1.21 release.
-func min[T constraints.Ordered](a, b T) T {
- if a < b || isNaN(a) {
- return a
- }
- return b
-}
-
-// max is a version of the predeclared function from the Go 1.21 release.
-func max[T constraints.Ordered](a, b T) T {
- if a > b || isNaN(a) {
- return a
- }
- return b
-}
-
-// cmpLess is a copy of cmp.Less from the Go 1.21 release.
-func cmpLess[T constraints.Ordered](x, y T) bool {
- return (isNaN(x) && !isNaN(y)) || x < y
-}
-
-// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
-func cmpCompare[T constraints.Ordered](x, y T) int {
- xNaN := isNaN(x)
- yNaN := isNaN(y)
- if xNaN && yNaN {
- return 0
- }
- if xNaN || x < y {
- return -1
- }
- if yNaN || x > y {
- return +1
- }
- return 0
-}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
deleted file mode 100644
index 46ceac34..00000000
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ /dev/null
@@ -1,515 +0,0 @@
-// Copyright 2021 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-// Package slices defines various functions useful with slices of any type.
-package slices
-
-import (
- "unsafe"
-
- "golang.org/x/exp/constraints"
-)
-
-// Equal reports whether two slices are equal: the same length and all
-// elements equal. If the lengths are different, Equal returns false.
-// Otherwise, the elements are compared in increasing index order, and the
-// comparison stops at the first unequal pair.
-// Floating point NaNs are not considered equal.
-func Equal[S ~[]E, E comparable](s1, s2 S) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i := range s1 {
- if s1[i] != s2[i] {
- return false
- }
- }
- return true
-}
-
-// EqualFunc reports whether two slices are equal using an equality
-// function on each pair of elements. If the lengths are different,
-// EqualFunc returns false. Otherwise, the elements are compared in
-// increasing index order, and the comparison stops at the first index
-// for which eq returns false.
-func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, v1 := range s1 {
- v2 := s2[i]
- if !eq(v1, v2) {
- return false
- }
- }
- return true
-}
-
-// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
-// of elements. The elements are compared sequentially, starting at index 0,
-// until one element is not equal to the other.
-// The result of comparing the first non-matching elements is returned.
-// If both slices are equal until one of them ends, the shorter slice is
-// considered less than the longer one.
-// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
-func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmpCompare(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
-}
-
-// CompareFunc is like [Compare] but uses a custom comparison function on each
-// pair of elements.
-// The result is the first non-zero result of cmp; if cmp always
-// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
-// and +1 if len(s1) > len(s2).
-func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmp(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
-}
-
-// Index returns the index of the first occurrence of v in s,
-// or -1 if not present.
-func Index[S ~[]E, E comparable](s S, v E) int {
- for i := range s {
- if v == s[i] {
- return i
- }
- }
- return -1
-}
-
-// IndexFunc returns the first index i satisfying f(s[i]),
-// or -1 if none do.
-func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
- for i := range s {
- if f(s[i]) {
- return i
- }
- }
- return -1
-}
-
-// Contains reports whether v is present in s.
-func Contains[S ~[]E, E comparable](s S, v E) bool {
- return Index(s, v) >= 0
-}
-
-// ContainsFunc reports whether at least one
-// element e of s satisfies f(e).
-func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
- return IndexFunc(s, f) >= 0
-}
-
-// Insert inserts the values v... into s at index i,
-// returning the modified slice.
-// The elements at s[i:] are shifted up to make room.
-// In the returned slice r, r[i] == v[0],
-// and r[i+len(v)] == value originally at r[i].
-// Insert panics if i is out of range.
-// This function is O(len(s) + len(v)).
-func Insert[S ~[]E, E any](s S, i int, v ...E) S {
- m := len(v)
- if m == 0 {
- return s
- }
- n := len(s)
- if i == n {
- return append(s, v...)
- }
- if n+m > cap(s) {
- // Use append rather than make so that we bump the size of
- // the slice up to the next storage class.
- // This is what Grow does but we don't call Grow because
- // that might copy the values twice.
- s2 := append(s[:i], make(S, n+m-i)...)
- copy(s2[i:], v)
- copy(s2[i+m:], s[i:])
- return s2
- }
- s = s[:n+m]
-
- // before:
- // s: aaaaaaaabbbbccccccccdddd
- // ^ ^ ^ ^
- // i i+m n n+m
- // after:
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- //
- // a are the values that don't move in s.
- // v are the values copied in from v.
- // b and c are the values from s that are shifted up in index.
- // d are the values that get overwritten, never to be seen again.
-
- if !overlaps(v, s[i+m:]) {
- // Easy case - v does not overlap either the c or d regions.
- // (It might be in some of a or b, or elsewhere entirely.)
- // The data we copy up doesn't write to v at all, so just do it.
-
- copy(s[i+m:], s[i:])
-
- // Now we have
- // s: aaaaaaaabbbbbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // Note the b values are duplicated.
-
- copy(s[i:], v)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
- }
-
- // The hard case - v overlaps c or d. We can't just shift up
- // the data because we'd move or clobber the values we're trying
- // to insert.
- // So instead, write v on top of d, then rotate.
- copy(s[n:], v)
-
- // Now we have
- // s: aaaaaaaabbbbccccccccvvvv
- // ^ ^ ^ ^
- // i i+m n n+m
-
- rotateRight(s[i:], m)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
-}
-
-// clearSlice sets all elements up to the length of s to the zero value of E.
-// We may use the builtin clear func instead, and remove clearSlice, when upgrading
-// to Go 1.21+.
-func clearSlice[S ~[]E, E any](s S) {
- var zero E
- for i := range s {
- s[i] = zero
- }
-}
-
-// Delete removes the elements s[i:j] from s, returning the modified slice.
-// Delete panics if j > len(s) or s[i:j] is not a valid slice of s.
-// Delete is O(len(s)-i), so if many items must be deleted, it is better to
-// make a single call deleting them all together than to delete one at a time.
-// Delete zeroes the elements s[len(s)-(j-i):len(s)].
-func Delete[S ~[]E, E any](s S, i, j int) S {
- _ = s[i:j:len(s)] // bounds check
-
- if i == j {
- return s
- }
-
- oldlen := len(s)
- s = append(s[:i], s[j:]...)
- clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
- return s
-}
-
-// DeleteFunc removes any elements from s for which del returns true,
-// returning the modified slice.
-// DeleteFunc zeroes the elements between the new length and the original length.
-func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
- i := IndexFunc(s, del)
- if i == -1 {
- return s
- }
- // Don't start copying elements until we find one to delete.
- for j := i + 1; j < len(s); j++ {
- if v := s[j]; !del(v) {
- s[i] = v
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
-}
-
-// Replace replaces the elements s[i:j] by the given v, and returns the
-// modified slice. Replace panics if s[i:j] is not a valid slice of s.
-// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
-func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
- _ = s[i:j] // verify that i:j is a valid subslice
-
- if i == j {
- return Insert(s, i, v...)
- }
- if j == len(s) {
- return append(s[:i], v...)
- }
-
- tot := len(s[:i]) + len(v) + len(s[j:])
- if tot > cap(s) {
- // Too big to fit, allocate and copy over.
- s2 := append(s[:i], make(S, tot-i)...) // See Insert
- copy(s2[i:], v)
- copy(s2[i+len(v):], s[j:])
- return s2
- }
-
- r := s[:tot]
-
- if i+len(v) <= j {
- // Easy, as v fits in the deleted portion.
- copy(r[i:], v)
- if i+len(v) != j {
- copy(r[i+len(v):], s[j:])
- }
- clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
- return r
- }
-
- // We are expanding (v is bigger than j-i).
- // The situation is something like this:
- // (example has i=4,j=8,len(s)=16,len(v)=6)
- // s: aaaaxxxxbbbbbbbbyy
- // ^ ^ ^ ^
- // i j len(s) tot
- // a: prefix of s
- // x: deleted range
- // b: more of s
- // y: area to expand into
-
- if !overlaps(r[i+len(v):], v) {
- // Easy, as v is not clobbered by the first copy.
- copy(r[i+len(v):], s[j:])
- copy(r[i:], v)
- return r
- }
-
- // This is a situation where we don't have a single place to which
- // we can copy v. Parts of it need to go to two different places.
- // We want to copy the prefix of v into y and the suffix into x, then
- // rotate |y| spots to the right.
- //
- // v[2:] v[:2]
- // | |
- // s: aaaavvvvbbbbbbbbvv
- // ^ ^ ^ ^
- // i j len(s) tot
- //
- // If either of those two destinations don't alias v, then we're good.
- y := len(v) - (j - i) // length of y portion
-
- if !overlaps(r[i:j], v) {
- copy(r[i:j], v[y:])
- copy(r[len(s):], v[:y])
- rotateRight(r[i:], y)
- return r
- }
- if !overlaps(r[len(s):], v) {
- copy(r[len(s):], v[:y])
- copy(r[i:j], v[y:])
- rotateRight(r[i:], y)
- return r
- }
-
- // Now we know that v overlaps both x and y.
- // That means that the entirety of b is *inside* v.
- // So we don't need to preserve b at all; instead we
- // can copy v first, then copy the b part of v out of
- // v to the right destination.
- k := startIdx(v, s[j:])
- copy(r[i:], v)
- copy(r[i+len(v):], r[i+k:])
- return r
-}
-
-// Clone returns a copy of the slice.
-// The elements are copied using assignment, so this is a shallow clone.
-func Clone[S ~[]E, E any](s S) S {
- // Preserve nil in case it matters.
- if s == nil {
- return nil
- }
- return append(S([]E{}), s...)
-}
-
-// Compact replaces consecutive runs of equal elements with a single copy.
-// This is like the uniq command found on Unix.
-// Compact modifies the contents of the slice s and returns the modified slice,
-// which may have a smaller length.
-// Compact zeroes the elements between the new length and the original length.
-func Compact[S ~[]E, E comparable](s S) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if s[k] != s[k-1] {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
-}
-
-// CompactFunc is like [Compact] but uses an equality function to compare elements.
-// For runs of elements that compare equal, CompactFunc keeps the first one.
-// CompactFunc zeroes the elements between the new length and the original length.
-func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if !eq(s[k], s[k-1]) {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
-}
-
-// Grow increases the slice's capacity, if necessary, to guarantee space for
-// another n elements. After Grow(n), at least n elements can be appended
-// to the slice without another allocation. If n is negative or too large to
-// allocate the memory, Grow panics.
-func Grow[S ~[]E, E any](s S, n int) S {
- if n < 0 {
- panic("cannot be negative")
- }
- if n -= cap(s) - len(s); n > 0 {
- // TODO(https://go.dev/issue/53888): Make using []E instead of S
- // to workaround a compiler bug where the runtime.growslice optimization
- // does not take effect. Revert when the compiler is fixed.
- s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
- }
- return s
-}
-
-// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
-func Clip[S ~[]E, E any](s S) S {
- return s[:len(s):len(s)]
-}
-
-// Rotation algorithm explanation:
-//
-// rotate left by 2
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join first parts
-// 89234567 01
-// recursively rotate first left part by 2
-// 23456789 01
-// join at the end
-// 2345678901
-//
-// rotate left by 8
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join last parts
-// 89 23456701
-// recursively rotate second part left by 6
-// 89 01234567
-// join at the end
-// 8901234567
-
-// TODO: There are other rotate algorithms.
-// This algorithm has the desirable property that it moves each element exactly twice.
-// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
-// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
-
-// rotateLeft rotates b left by n spaces.
-// s_final[i] = s_orig[i+r], wrapping around.
-func rotateLeft[E any](s []E, r int) {
- for r != 0 && r != len(s) {
- if r*2 <= len(s) {
- swap(s[:r], s[len(s)-r:])
- s = s[:len(s)-r]
- } else {
- swap(s[:len(s)-r], s[r:])
- s, r = s[len(s)-r:], r*2-len(s)
- }
- }
-}
-func rotateRight[E any](s []E, r int) {
- rotateLeft(s, len(s)-r)
-}
-
-// swap swaps the contents of x and y. x and y must be equal length and disjoint.
-func swap[E any](x, y []E) {
- for i := 0; i < len(x); i++ {
- x[i], y[i] = y[i], x[i]
- }
-}
-
-// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
-func overlaps[E any](a, b []E) bool {
- if len(a) == 0 || len(b) == 0 {
- return false
- }
- elemSize := unsafe.Sizeof(a[0])
- if elemSize == 0 {
- return false
- }
- // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
- // Also see crypto/internal/alias/alias.go:AnyOverlap
- return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
- uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
-}
-
-// startIdx returns the index in haystack where the needle starts.
-// prerequisite: the needle must be aliased entirely inside the haystack.
-func startIdx[E any](haystack, needle []E) int {
- p := &needle[0]
- for i := range haystack {
- if p == &haystack[i] {
- return i
- }
- }
- // TODO: what if the overlap is by a non-integral number of Es?
- panic("needle not found")
-}
-
-// Reverse reverses the elements of the slice in place.
-func Reverse[S ~[]E, E any](s S) {
- for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
- s[i], s[j] = s[j], s[i]
- }
-}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
deleted file mode 100644
index f58bbc7b..00000000
--- a/vendor/golang.org/x/exp/slices/sort.go
+++ /dev/null
@@ -1,197 +0,0 @@
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
-
-package slices
-
-import (
- "math/bits"
-
- "golang.org/x/exp/constraints"
-)
-
-// Sort sorts a slice of any ordered type in ascending order.
-// When sorting floating-point numbers, NaNs are ordered before other values.
-func Sort[S ~[]E, E constraints.Ordered](x S) {
- n := len(x)
- pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
-}
-
-// SortFunc sorts the slice x in ascending order as determined by the cmp
-// function. This sort is not guaranteed to be stable.
-// cmp(a, b) should return a negative number when a < b, a positive number when
-// a > b and zero when a == b or when a is not comparable to b in the sense
-// of the formal definition of Strict Weak Ordering.
-//
-// SortFunc requires that cmp is a strict weak ordering.
-// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
-// To indicate 'uncomparable', return 0 from the function.
-func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- n := len(x)
- pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
-}
-
-// SortStableFunc sorts the slice x while keeping the original order of equal
-// elements, using cmp to compare elements in the same way as [SortFunc].
-func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- stableCmpFunc(x, len(x), cmp)
-}
-
-// IsSorted reports whether x is sorted in ascending order.
-func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmpLess(x[i], x[i-1]) {
- return false
- }
- }
- return true
-}
-
-// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
-// comparison function as defined by [SortFunc].
-func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmp(x[i], x[i-1]) < 0 {
- return false
- }
- }
- return true
-}
-
-// Min returns the minimal value in x. It panics if x is empty.
-// For floating-point numbers, Min propagates NaNs (any NaN value in x
-// forces the output to be NaN).
-func Min[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Min: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = min(m, x[i])
- }
- return m
-}
-
-// MinFunc returns the minimal value in x, using cmp to compare elements.
-// It panics if x is empty. If there is more than one minimal element
-// according to the cmp function, MinFunc returns the first one.
-func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MinFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) < 0 {
- m = x[i]
- }
- }
- return m
-}
-
-// Max returns the maximal value in x. It panics if x is empty.
-// For floating-point E, Max propagates NaNs (any NaN value in x
-// forces the output to be NaN).
-func Max[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Max: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = max(m, x[i])
- }
- return m
-}
-
-// MaxFunc returns the maximal value in x, using cmp to compare elements.
-// It panics if x is empty. If there is more than one maximal element
-// according to the cmp function, MaxFunc returns the first one.
-func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MaxFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) > 0 {
- m = x[i]
- }
- }
- return m
-}
-
-// BinarySearch searches for target in a sorted slice and returns the position
-// where target is found, or the position where target would appear in the
-// sort order; it also returns a bool saying whether the target is really found
-// in the slice. The slice must be sorted in increasing order.
-func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
- // Inlining is faster than calling BinarySearchFunc with a lambda.
- n := len(x)
- // Define x[-1] < target and x[n] >= target.
- // Invariant: x[i-1] < target, x[j] >= target.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmpLess(x[h], target) {
- i = h + 1 // preserves x[i-1] < target
- } else {
- j = h // preserves x[j] >= target
- }
- }
- // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
- return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
-}
-
-// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
-// function. The slice must be sorted in increasing order, where "increasing"
-// is defined by cmp. cmp should return 0 if the slice element matches
-// the target, a negative number if the slice element precedes the target,
-// or a positive number if the slice element follows the target.
-// cmp must implement the same ordering as the slice, such that if
-// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
-func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
- n := len(x)
- // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
- // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmp(x[h], target) < 0 {
- i = h + 1 // preserves cmp(x[i - 1], target) < 0
- } else {
- j = h // preserves cmp(x[j], target) >= 0
- }
- }
- // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
- return i, i < n && cmp(x[i], target) == 0
-}
-
-type sortedHint int // hint for pdqsort when choosing the pivot
-
-const (
- unknownHint sortedHint = iota
- increasingHint
- decreasingHint
-)
-
-// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
-type xorshift uint64
-
-func (r *xorshift) Next() uint64 {
- *r ^= *r << 13
- *r ^= *r >> 17
- *r ^= *r << 5
- return uint64(*r)
-}
-
-func nextPowerOfTwo(length int) uint {
- return 1 << bits.Len(uint(length))
-}
-
-// isNaN reports whether x is a NaN without requiring the math package.
-// This will always return false if T is not floating-point.
-func isNaN[T constraints.Ordered](x T) bool {
- return x != x
-}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
deleted file mode 100644
index 06f2c7a2..00000000
--- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-// insertionSortCmpFunc sorts data[a:b] using insertion sort.
-func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownCmpFunc implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
- child++
- }
- if !(cmp(data[first+root], data[first+child]) < 0) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownCmpFunc(data, i, hi, first, cmp)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownCmpFunc(data, lo, i, first, cmp)
- }
-}
-
-// pdqsortCmpFunc sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsCmpFunc(data, a, b, cmp)
- limit--
- }
-
- pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
- if hint == decreasingHint {
- reverseRangeCmpFunc(data, a, b, cmp)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortCmpFunc(data, a, b, cmp) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
- mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortCmpFunc(data, a, mid, limit, cmp)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortCmpFunc(data, mid+1, b, limit, cmp)
- b = mid
- }
- }
-}
-
-// partitionCmpFunc does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !(cmp(data[a], data[i]) < 0) {
- i++
- }
- for i <= j && (cmp(data[a], data[j]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !(cmp(data[i], data[i-1]) < 0) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotCmpFunc chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
- j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
- k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianCmpFunc(data, i, j, k, &swaps, cmp)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
- if cmp(data[b], data[a]) < 0 {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- b, c = order2CmpFunc(data, b, c, swaps, cmp)
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- return b
-}
-
-// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
- return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
-}
-
-func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortCmpFunc(data, a, b, cmp)
- a = b
- b += blockSize
- }
- insertionSortCmpFunc(data, a, n, cmp)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeCmpFunc(data, a, a+blockSize, b, cmp)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeCmpFunc(data, a, m, n, cmp)
- }
- blockSize *= 2
- }
-}
-
-// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmp(data[h], data[a]) < 0 {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !(cmp(data[m], data[h]) < 0) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !(cmp(data[p-c], data[c]) < 0) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateCmpFunc(data, start, m, end, cmp)
- }
- if a < start && start < mid {
- symMergeCmpFunc(data, a, start, mid, cmp)
- }
- if mid < end && end < b {
- symMergeCmpFunc(data, mid, end, b, cmp)
- }
-}
-
-// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeCmpFunc(data, m-i, m, j, cmp)
- i -= j
- } else {
- swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
- j -= i
- }
- }
- // i == j
- swapRangeCmpFunc(data, m-i, m, i, cmp)
-}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
deleted file mode 100644
index 99b47c39..00000000
--- a/vendor/golang.org/x/exp/slices/zsortordered.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// insertionSortOrdered sorts data[a:b] using insertion sort.
-func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownOrdered implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
- child++
- }
- if !cmpLess(data[first+root], data[first+child]) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownOrdered(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownOrdered(data, lo, i, first)
- }
-}
-
-// pdqsortOrdered sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortOrdered(data, a, b)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortOrdered(data, a, b)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsOrdered(data, a, b)
- limit--
- }
-
- pivot, hint := choosePivotOrdered(data, a, b)
- if hint == decreasingHint {
- reverseRangeOrdered(data, a, b)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortOrdered(data, a, b) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !cmpLess(data[a-1], data[pivot]) {
- mid := partitionEqualOrdered(data, a, b, pivot)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortOrdered(data, a, mid, limit)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortOrdered(data, mid+1, b, limit)
- b = mid
- }
- }
-}
-
-// partitionOrdered does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !cmpLess(data[a], data[i]) {
- i++
- }
- for i <= j && cmpLess(data[a], data[j]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !cmpLess(data[i], data[i-1]) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotOrdered chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentOrdered(data, i, &swaps)
- j = medianAdjacentOrdered(data, j, &swaps)
- k = medianAdjacentOrdered(data, k, &swaps)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianOrdered(data, i, j, k, &swaps)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
- if cmpLess(data[b], data[a]) {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
- a, b = order2Ordered(data, a, b, swaps)
- b, c = order2Ordered(data, b, c, swaps)
- a, b = order2Ordered(data, a, b, swaps)
- return b
-}
-
-// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
- return medianOrdered(data, a-1, a, a+1, swaps)
-}
-
-func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableOrdered[E constraints.Ordered](data []E, n int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortOrdered(data, a, b)
- a = b
- b += blockSize
- }
- insertionSortOrdered(data, a, n)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeOrdered(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeOrdered(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmpLess(data[h], data[a]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !cmpLess(data[m], data[h]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !cmpLess(data[p-c], data[c]) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateOrdered(data, start, m, end)
- }
- if a < start && start < mid {
- symMergeOrdered(data, a, start, mid)
- }
- if mid < end && end < b {
- symMergeOrdered(data, mid, end, b)
- }
-}
-
-// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeOrdered(data, m-i, m, j)
- i -= j
- } else {
- swapRangeOrdered(data, m-i, m+j-i, i)
- j -= i
- }
- }
- // i == j
- swapRangeOrdered(data, m-i, m, i)
-}
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 93a798ab..794b2e32 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int {
// TokensAt returns the number of tokens available at time t.
func (lim *Limiter) TokensAt(t time.Time) float64 {
lim.mu.Lock()
- _, tokens := lim.advance(t) // does not mutate lim
+ tokens := lim.advance(t) // does not mutate lim
lim.mu.Unlock()
return tokens
}
@@ -186,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) {
return
}
// advance time to now
- t, tokens := r.lim.advance(t)
+ tokens := r.lim.advance(t)
// calculate new number of tokens
tokens += restoreTokens
if burst := float64(r.lim.burst); tokens > burst {
@@ -307,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -324,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -347,7 +347,7 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
}
}
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
// Calculate the remaining number of tokens resulting from the request.
tokens -= float64(n)
@@ -380,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
return r
}
-// advance calculates and returns an updated state for lim resulting from the passage of time.
+// advance calculates and returns an updated number of tokens for lim
+// resulting from the passage of time.
// lim is not changed.
// advance requires that lim.mu is held.
-func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
+func (lim *Limiter) advance(t time.Time) (newTokens float64) {
last := lim.last
if t.Before(last) {
last = t
@@ -396,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
if burst := float64(lim.burst); tokens > burst {
tokens = burst
}
- return t, tokens
+ return tokens
}
// durationFromTokens is a unit conversion function from the number of tokens to the duration
@@ -405,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
if limit <= 0 {
return InfDuration
}
- seconds := tokens / float64(limit)
- return time.Duration(float64(time.Second) * seconds)
+
+ duration := (tokens / float64(limit)) * float64(time.Second)
+
+ // Cap the duration to the maximum representable int64 value, to avoid overflow.
+ if duration > float64(math.MaxInt64) {
+ return InfDuration
+ }
+
+ return time.Duration(duration)
}
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 870271ed..0458b4f9 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -322,6 +322,7 @@ type jsonPackage struct {
ImportPath string
Dir string
Name string
+ Target string
Export string
GoFiles []string
CompiledGoFiles []string
@@ -506,6 +507,7 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
Name: p.Name,
ID: p.ImportPath,
Dir: p.Dir,
+ Target: p.Target,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
@@ -811,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string {
if cfg.Mode&NeedEmbedPatterns != 0 {
addFields("EmbedPatterns")
}
+ if cfg.Mode&NeedTarget != 0 {
+ addFields("Target")
+ }
return "-json=" + strings.Join(fields, ",")
}
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
index 969da4c2..69eec9f4 100644
--- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go
+++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -27,6 +27,7 @@ var modes = [...]struct {
{NeedModule, "NeedModule"},
{NeedEmbedFiles, "NeedEmbedFiles"},
{NeedEmbedPatterns, "NeedEmbedPatterns"},
+ {NeedTarget, "NeedTarget"},
}
func (mode LoadMode) String() string {
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index 9dedf977..c3a59b8e 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -59,10 +59,10 @@ import (
//
// Unfortunately there are a number of open bugs related to
// interactions among the LoadMode bits:
-// - https://github.com/golang/go/issues/56633
-// - https://github.com/golang/go/issues/56677
-// - https://github.com/golang/go/issues/58726
-// - https://github.com/golang/go/issues/63517
+// - https://go.dev/issue/56633
+// - https://go.dev/issue/56677
+// - https://go.dev/issue/58726
+// - https://go.dev/issue/63517
type LoadMode int
const (
@@ -118,6 +118,9 @@ const (
// NeedEmbedPatterns adds EmbedPatterns.
NeedEmbedPatterns
+ // NeedTarget adds Target.
+ NeedTarget
+
// Be sure to update loadmode_string.go when adding new items!
)
@@ -479,6 +482,10 @@ type Package struct {
// information for the package as provided by the build system.
ExportFile string
+ // Target is the absolute install path of the .a file, for libraries,
+ // and of the executable file, for binaries.
+ Target string
+
// Imports maps import paths appearing in the package's Go source files
// to corresponding loaded Packages.
Imports map[string]*Package
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
index 8d824f71..43261147 100644
--- a/vendor/golang.org/x/tools/go/types/typeutil/map.go
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -2,30 +2,35 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package typeutil defines various utilities for types, such as Map,
-// a mapping from types.Type to any values.
-package typeutil // import "golang.org/x/tools/go/types/typeutil"
+// Package typeutil defines various utilities for types, such as [Map],
+// a hash table that maps [types.Type] to any value.
+package typeutil
import (
"bytes"
"fmt"
"go/types"
- "reflect"
+ "hash/maphash"
+ "unsafe"
"golang.org/x/tools/internal/typeparams"
)
// Map is a hash-table-based mapping from types (types.Type) to
-// arbitrary any values. The concrete types that implement
+// arbitrary values. The concrete types that implement
// the Type interface are pointers. Since they are not canonicalized,
// == cannot be used to check for equivalence, and thus we cannot
// simply use a Go map.
//
// Just as with map[K]V, a nil *Map is a valid empty map.
//
-// Not thread-safe.
+// Read-only map operations ([Map.At], [Map.Len], and so on) may
+// safely be called concurrently.
+//
+// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
+// and 69559, if the latter proposals for a generic hash-map type and
+// a types.Hash function are accepted.
type Map struct {
- hasher Hasher // shared by many Maps
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
length int // number of map entries
}
@@ -36,35 +41,17 @@ type entry struct {
value any
}
-// SetHasher sets the hasher used by Map.
+// SetHasher has no effect.
//
-// All Hashers are functionally equivalent but contain internal state
-// used to cache the results of hashing previously seen types.
-//
-// A single Hasher created by MakeHasher() may be shared among many
-// Maps. This is recommended if the instances have many keys in
-// common, as it will amortize the cost of hash computation.
-//
-// A Hasher may grow without bound as new types are seen. Even when a
-// type is deleted from the map, the Hasher never shrinks, since other
-// types in the map may reference the deleted type indirectly.
-//
-// Hashers are not thread-safe, and read-only operations such as
-// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
-// read-lock) is require around all Map operations if a shared
-// hasher is accessed from multiple threads.
-//
-// If SetHasher is not called, the Map will create a private hasher at
-// the first call to Insert.
-func (m *Map) SetHasher(hasher Hasher) {
- m.hasher = hasher
-}
+// It is a relic of an optimization that is no longer profitable. Do
+// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
+func (m *Map) SetHasher(Hasher) {}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
- hash := m.hasher.Hash(key)
+ hash := hash(key)
bucket := m.table[hash]
for i, e := range bucket {
if e.key != nil && types.Identical(key, e.key) {
@@ -83,7 +70,7 @@ func (m *Map) Delete(key types.Type) bool {
// The result is nil if the entry is not present.
func (m *Map) At(key types.Type) any {
if m != nil && m.table != nil {
- for _, e := range m.table[m.hasher.Hash(key)] {
+ for _, e := range m.table[hash(key)] {
if e.key != nil && types.Identical(key, e.key) {
return e.value
}
@@ -96,7 +83,7 @@ func (m *Map) At(key types.Type) any {
// and returns the previous entry, if any.
func (m *Map) Set(key types.Type, value any) (prev any) {
if m.table != nil {
- hash := m.hasher.Hash(key)
+ hash := hash(key)
bucket := m.table[hash]
var hole *entry
for i, e := range bucket {
@@ -115,10 +102,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) {
m.table[hash] = append(bucket, entry{key, value})
}
} else {
- if m.hasher.memo == nil {
- m.hasher = MakeHasher()
- }
- hash := m.hasher.Hash(key)
+ hash := hash(key)
m.table = map[uint32][]entry{hash: {entry{key, value}}}
}
@@ -195,53 +179,35 @@ func (m *Map) KeysString() string {
return m.toString(false)
}
-////////////////////////////////////////////////////////////////////////
-// Hasher
+// -- Hasher --
-// A Hasher maps each type to its hash value.
-// For efficiency, a hasher uses memoization; thus its memory
-// footprint grows monotonically over time.
-// Hashers are not thread-safe.
-// Hashers have reference semantics.
-// Call MakeHasher to create a Hasher.
-type Hasher struct {
- memo map[types.Type]uint32
-
- // ptrMap records pointer identity.
- ptrMap map[any]uint32
-
- // sigTParams holds type parameters from the signature being hashed.
- // Signatures are considered identical modulo renaming of type parameters, so
- // within the scope of a signature type the identity of the signature's type
- // parameters is just their index.
- //
- // Since the language does not currently support referring to uninstantiated
- // generic types or functions, and instantiated signatures do not have type
- // parameter lists, we should never encounter a second non-empty type
- // parameter list when hashing a generic signature.
- sigTParams *types.TypeParamList
+// hash returns the hash of type t.
+// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
+func hash(t types.Type) uint32 {
+ return theHasher.Hash(t)
}
-// MakeHasher returns a new Hasher instance.
-func MakeHasher() Hasher {
- return Hasher{
- memo: make(map[types.Type]uint32),
- ptrMap: make(map[any]uint32),
- sigTParams: nil,
- }
-}
+// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
+// Hashers are stateless, and all are equivalent.
+type Hasher struct{}
+
+var theHasher Hasher
+
+// MakeHasher returns Hasher{}.
+// Hashers are stateless; all are equivalent.
+func MakeHasher() Hasher { return theHasher }
// Hash computes a hash value for the given type t such that
// Identical(t, t') => Hash(t) == Hash(t').
func (h Hasher) Hash(t types.Type) uint32 {
- hash, ok := h.memo[t]
- if !ok {
- hash = h.hashFor(t)
- h.memo[t] = hash
- }
- return hash
+ return hasher{inGenericSig: false}.hash(t)
}
+// hasher holds the state of a single Hash traversal: whether we are
+// inside the signature of a generic function; this is used to
+// optimize [hasher.hashTypeParam].
+type hasher struct{ inGenericSig bool }
+
// hashString computes the Fowler–Noll–Vo hash of s.
func hashString(s string) uint32 {
var h uint32
@@ -252,21 +218,21 @@ func hashString(s string) uint32 {
return h
}
-// hashFor computes the hash of t.
-func (h Hasher) hashFor(t types.Type) uint32 {
+// hash computes the hash of t.
+func (h hasher) hash(t types.Type) uint32 {
// See Identical for rationale.
switch t := t.(type) {
case *types.Basic:
return uint32(t.Kind())
case *types.Alias:
- return h.Hash(types.Unalias(t))
+ return h.hash(types.Unalias(t))
case *types.Array:
- return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+ return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
case *types.Slice:
- return 9049 + 2*h.Hash(t.Elem())
+ return 9049 + 2*h.hash(t.Elem())
case *types.Struct:
var hash uint32 = 9059
@@ -277,12 +243,12 @@ func (h Hasher) hashFor(t types.Type) uint32 {
}
hash += hashString(t.Tag(i))
hash += hashString(f.Name()) // (ignore f.Pkg)
- hash += h.Hash(f.Type())
+ hash += h.hash(f.Type())
}
return hash
case *types.Pointer:
- return 9067 + 2*h.Hash(t.Elem())
+ return 9067 + 2*h.hash(t.Elem())
case *types.Signature:
var hash uint32 = 9091
@@ -290,33 +256,14 @@ func (h Hasher) hashFor(t types.Type) uint32 {
hash *= 8863
}
- // Use a separate hasher for types inside of the signature, where type
- // parameter identity is modified to be (index, constraint). We must use a
- // new memo for this hasher as type identity may be affected by this
- // masking. For example, in func[T any](*T), the identity of *T depends on
- // whether we are mapping the argument in isolation, or recursively as part
- // of hashing the signature.
- //
- // We should never encounter a generic signature while hashing another
- // generic signature, but defensively set sigTParams only if h.mask is
- // unset.
tparams := t.TypeParams()
- if h.sigTParams == nil && tparams.Len() != 0 {
- h = Hasher{
- // There may be something more efficient than discarding the existing
- // memo, but it would require detecting whether types are 'tainted' by
- // references to type parameters.
- memo: make(map[types.Type]uint32),
- // Re-using ptrMap ensures that pointer identity is preserved in this
- // hasher.
- ptrMap: h.ptrMap,
- sigTParams: tparams,
- }
- }
+ if n := tparams.Len(); n > 0 {
+ h.inGenericSig = true // affects constraints, params, and results
- for i := 0; i < tparams.Len(); i++ {
- tparam := tparams.At(i)
- hash += 7 * h.Hash(tparam.Constraint())
+ for i := range n {
+ tparam := tparams.At(i)
+ hash += 7 * h.hash(tparam.Constraint())
+ }
}
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
@@ -350,17 +297,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
return hash
case *types.Map:
- return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+ return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
case *types.Chan:
- return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+ return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
case *types.Named:
- hash := h.hashPtr(t.Obj())
+ hash := h.hashTypeName(t.Obj())
targs := t.TypeArgs()
for i := 0; i < targs.Len(); i++ {
targ := targs.At(i)
- hash += 2 * h.Hash(targ)
+ hash += 2 * h.hash(targ)
}
return hash
@@ -374,17 +321,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
panic(fmt.Sprintf("%T: %v", t, t))
}
-func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
// See go/types.identicalTypes for rationale.
n := tuple.Len()
hash := 9137 + 2*uint32(n)
- for i := 0; i < n; i++ {
- hash += 3 * h.Hash(tuple.At(i).Type())
+ for i := range n {
+ hash += 3 * h.hash(tuple.At(i).Type())
}
return hash
}
-func (h Hasher) hashUnion(t *types.Union) uint32 {
+func (h hasher) hashUnion(t *types.Union) uint32 {
// Hash type restrictions.
terms, err := typeparams.UnionTermSet(t)
// if err != nil t has invalid type restrictions. Fall back on a non-zero
@@ -395,11 +342,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 {
return h.hashTermSet(terms)
}
-func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
+func (h hasher) hashTermSet(terms []*types.Term) uint32 {
hash := 9157 + 2*uint32(len(terms))
for _, term := range terms {
// term order is not significant.
- termHash := h.Hash(term.Type())
+ termHash := h.hash(term.Type())
if term.Tilde() {
termHash *= 9161
}
@@ -408,36 +355,42 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
return hash
}
-// hashTypeParam returns a hash of the type parameter t, with a hash value
-// depending on whether t is contained in h.sigTParams.
-//
-// If h.sigTParams is set and contains t, then we are in the process of hashing
-// a signature, and the hash value of t must depend only on t's index and
-// constraint: signatures are considered identical modulo type parameter
-// renaming. To avoid infinite recursion, we only hash the type parameter
-// index, and rely on types.Identical to handle signatures where constraints
-// are not identical.
-//
-// Otherwise the hash of t depends only on t's pointer identity.
-func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
- if h.sigTParams != nil {
- i := t.Index()
- if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
- return 9173 + 3*uint32(i)
- }
+// hashTypeParam returns the hash of a type parameter.
+func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
+ // Within the signature of a generic function, TypeParams are
+ // identical if they have the same index and constraint, so we
+ // hash them based on index.
+ //
+ // When we are outside a generic function, free TypeParams are
+ // identical iff they are the same object, so we can use a
+ // more discriminating hash consistent with object identity.
+ // This optimization saves [Map] about 4% when hashing all the
+ // types.Info.Types in the forward closure of net/http.
+ if !h.inGenericSig {
+ // Optimization: outside a generic function signature,
+ // use a more discrimating hash consistent with object identity.
+ return h.hashTypeName(t.Obj())
}
- return h.hashPtr(t.Obj())
+ return 9173 + 3*uint32(t.Index())
}
-// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
-// pointers values are not dependent on the GC.
-func (h Hasher) hashPtr(ptr any) uint32 {
- if hash, ok := h.ptrMap[ptr]; ok {
- return hash
- }
- hash := uint32(reflect.ValueOf(ptr).Pointer())
- h.ptrMap[ptr] = hash
- return hash
+var theSeed = maphash.MakeSeed()
+
+// hashTypeName hashes the pointer of tname.
+func (hasher) hashTypeName(tname *types.TypeName) uint32 {
+ // Since types.Identical uses == to compare TypeNames,
+ // the Hash function uses maphash.Comparable.
+ // TODO(adonovan): or will, when it becomes available in go1.24.
+ // In the meantime we use the pointer's numeric value.
+ //
+ // hash := maphash.Comparable(theSeed, tname)
+ //
+ // (Another approach would be to hash the name and package
+ // path, and whether or not it is a package-level typename. It
+ // is rare for a package to define multiple local types with
+ // the same name.)
+ hash := uintptr(unsafe.Pointer(tname))
+ return uint32(hash ^ (hash >> 32))
}
// shallowHash computes a hash of t without looking at any of its
@@ -454,7 +407,7 @@ func (h Hasher) hashPtr(ptr any) uint32 {
// include m itself; there is no mention of the named type X that
// might help us break the cycle.
// (See comment in go/types.identical, case *Interface, for more.)
-func (h Hasher) shallowHash(t types.Type) uint32 {
+func (h hasher) shallowHash(t types.Type) uint32 {
// t is the type of an interface method (Signature),
// its params or results (Tuples), or their immediate
// elements (mostly Slice, Pointer, Basic, Named),
@@ -475,7 +428,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
case *types.Tuple:
n := t.Len()
hash := 9137 + 2*uint32(n)
- for i := 0; i < n; i++ {
+ for i := range n {
hash += 53471161 * h.shallowHash(t.At(i).Type())
}
return hash
@@ -508,10 +461,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
return 9127
case *types.Named:
- return h.hashPtr(t.Obj())
+ return h.hashTypeName(t.Obj())
case *types.TypeParam:
- return h.hashPtr(t.Obj())
+ return h.hashTypeParam(t)
}
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
index 6f5d8a21..5662a311 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -2,52 +2,183 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
-
-// This file implements FindExportData.
+// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
+// This file also additionally implements FindExportData for gcexportdata.NewReader.
package gcimporter
import (
"bufio"
+ "bytes"
+ "errors"
"fmt"
+ "go/build"
"io"
- "strconv"
+ "os"
+ "os/exec"
+ "path/filepath"
"strings"
+ "sync"
)
-func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
- // See $GOROOT/include/ar.h.
- hdr := make([]byte, 16+12+6+6+8+10+2)
- _, err = io.ReadFull(r, hdr)
- if err != nil {
- return
- }
- // leave for debugging
- if false {
- fmt.Printf("header: %s", hdr)
- }
- s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
- length, err := strconv.Atoi(s)
- size = int64(length)
- if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
- err = fmt.Errorf("invalid archive header")
- return
- }
- name = strings.TrimSpace(string(hdr[:16]))
- return
-}
-
// FindExportData positions the reader r at the beginning of the
// export data section of an underlying cmd/compile created archive
// file by reading from it. The reader must be positioned at the
// start of the file before calling this function.
-// The size result is the length of the export data in bytes.
+// This returns the length of the export data in bytes.
//
// This function is needed by [gcexportdata.Read], which must
// accept inputs produced by the last two releases of cmd/compile,
// plus tip.
func FindExportData(r *bufio.Reader) (size int64, err error) {
+ arsize, err := FindPackageDefinition(r)
+ if err != nil {
+ return
+ }
+ size = int64(arsize)
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
+ return
+ }
+ size -= int64(len(objapi))
+ for _, h := range headers {
+ size -= int64(len(h))
+ }
+
+ // Check for the binary export data section header "$$B\n".
+ // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ hdr := string(line)
+ if hdr != "$$B\n" {
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+ size -= int64(len(hdr))
+
+ // For files with a binary export data header "$$B\n",
+ // these are always terminated by an end-of-section marker "\n$$\n".
+ // So the last bytes must always be this constant.
+ //
+ // The end-of-section marker is not a part of the export data itself.
+ // Do not include these in size.
+ //
+ // It would be nice to have sanity check that the final bytes after
+ // the export data are indeed the end-of-section marker. The split
+ // of gcexportdata.NewReader and gcexportdata.Read make checking this
+ // ugly so gcimporter gives up enforcing this. The compiler and go/types
+ // importer do enforce this, which seems good enough.
+ const endofsection = "\n$$\n"
+ size -= int64(len(endofsection))
+
+ if size < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
+ return
+ }
+
+ return
+}
+
+// ReadUnified reads the contents of the unified export data from a reader r
+// that contains the contents of a GC-created archive file.
+//
+// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
+//
+// Supported GC-created archive files have 4 layers of nesting:
+// - An archive file containing a package definition file.
+// - The package definition file contains headers followed by a data section.
+// Headers are lines (≤ 4kb) that do not start with "$$".
+// - The data section starts with "$$B\n" followed by export data followed
+// by an end of section marker "\n$$\n". (The section start "$$\n" is no
+// longer supported.)
+// - The export data starts with a format byte ('u') followed by the in
+// the given format. (See ReadExportDataHeader for older formats.)
+//
+// Putting this together, the bytes in a GC-created archive files are expected
+// to look like the following.
+// See cmd/internal/archive for more details on ar file headers.
+//
+// | \n | ar file signature
+// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
+// | go object <...>\n | objabi header
+// | \n | other headers such as build id
+// | $$B\n | binary format marker
+// | u\n | unified export
+// | $$\n | end-of-section marker
+// | [optional padding] | padding byte (0x0A) if size is odd
+// | [ar file header] | other ar files
+// | [ar file data] |
+func ReadUnified(r *bufio.Reader) (data []byte, err error) {
+ // We historically guaranteed headers at the default buffer size (4096) work.
+ // This ensures we can use ReadSlice throughout.
+ const minBufferSize = 4096
+ r = bufio.NewReaderSize(r, minBufferSize)
+
+ size, err := FindPackageDefinition(r)
+ if err != nil {
+ return
+ }
+ n := size
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
+ return
+ }
+ n -= len(objapi)
+ for _, h := range headers {
+ n -= len(h)
+ }
+
+ hdrlen, err := ReadExportDataHeader(r)
+ if err != nil {
+ return
+ }
+ n -= hdrlen
+
+ // size also includes the end of section marker. Remove that many bytes from the end.
+ const marker = "\n$$\n"
+ n -= len(marker)
+
+ if n < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
+ return
+ }
+
+ // Read n bytes from buf.
+ data = make([]byte, n)
+ _, err = io.ReadFull(r, data)
+ if err != nil {
+ return
+ }
+
+ // Check for marker at the end.
+ var suffix [len(marker)]byte
+ _, err = io.ReadFull(r, suffix[:])
+ if err != nil {
+ return
+ }
+ if s := string(suffix[:]); s != marker {
+ err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
+ return
+ }
+
+ return
+}
+
+// FindPackageDefinition positions the reader r at the beginning of a package
+// definition file ("__.PKGDEF") within a GC-created archive by reading
+// from it, and returns the size of the package definition file in the archive.
+//
+// The reader must be positioned at the start of the archive file before calling
+// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
+//
+// See cmd/internal/archive for details on the archive format.
+func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
+ // Uses ReadSlice to limit risk of malformed inputs.
+
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
@@ -61,56 +192,230 @@ func FindExportData(r *bufio.Reader) (size int64, err error) {
return
}
- // Archive file. Scan to __.PKGDEF.
- var name string
- if name, size, err = readGopackHeader(r); err != nil {
- return
- }
- arsize := size
-
- // First entry should be __.PKGDEF.
- if name != "__.PKGDEF" {
- err = fmt.Errorf("go archive is missing __.PKGDEF")
- return
- }
-
- // Read first line of __.PKGDEF data, so that line
- // is once again the first line of the input.
- if line, err = r.ReadSlice('\n'); err != nil {
- err = fmt.Errorf("can't find export data (%v)", err)
- return
- }
- size -= int64(len(line))
-
- // Now at __.PKGDEF in archive or still at beginning of file.
- // Either way, line should begin with "go object ".
- if !strings.HasPrefix(string(line), "go object ") {
- err = fmt.Errorf("not a Go object file")
- return
- }
-
- // Skip over object headers to get to the export data section header "$$B\n".
- // Object headers are lines that do not start with '$'.
- for line[0] != '$' {
- if line, err = r.ReadSlice('\n'); err != nil {
- err = fmt.Errorf("can't find export data (%v)", err)
- return
- }
- size -= int64(len(line))
- }
-
- // Check for the binary export data section header "$$B\n".
- hdr := string(line)
- if hdr != "$$B\n" {
- err = fmt.Errorf("unknown export data header: %q", hdr)
- return
- }
- // TODO(taking): Remove end-of-section marker "\n$$\n" from size.
-
- if size < 0 {
- err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
+ // package export block should be first
+ size = readArchiveHeader(r, "__.PKGDEF")
+ if size <= 0 {
+ err = fmt.Errorf("not a package file")
return
}
return
}
+
+// ReadObjectHeaders reads object headers from the reader. Object headers are
+// lines that do not start with an end-of-section marker "$$". The first header
+// is the objabi header. On success, the reader will be positioned at the beginning
+// of the end-of-section marker.
+//
+// It returns an error if any header does not fit in r.Size() bytes.
+func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
+ // line is a temporary buffer for headers.
+ // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
+ var line []byte
+
+ // objapi header should be the first line
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ objapi = string(line)
+
+ // objapi header begins with "go object ".
+ if !strings.HasPrefix(objapi, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", objapi)
+ return
+ }
+
+ // process remaining object header lines
+ for {
+ // check for an end of section marker "$$"
+ line, err = r.Peek(2)
+ if err != nil {
+ return
+ }
+ if string(line) == "$$" {
+ return // stop
+ }
+
+ // read next header
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ headers = append(headers, string(line))
+ }
+}
+
+// ReadExportDataHeader reads the export data header and format from r.
+// It returns the number of bytes read, or an error if the format is no longer
+// supported or it failed to read.
+//
+// The only currently supported format is binary export data in the
+// unified export format.
+func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
+ // Read export data header.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+
+ hdr := string(line)
+ switch hdr {
+ case "$$\n":
+ err = fmt.Errorf("old textual export format no longer supported (recompile package)")
+ return
+
+ case "$$B\n":
+ var format byte
+ format, err = r.ReadByte()
+ if err != nil {
+ return
+ }
+ // The unified export format starts with a 'u'.
+ switch format {
+ case 'u':
+ default:
+ // Older no longer supported export formats include:
+ // indexed export format which started with an 'i'; and
+ // the older binary export format which started with a 'c',
+ // 'd', or 'v' (from "version").
+ err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
+ return
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+
+ n = len(hdr) + 1 // + 1 is for 'u'
+ return
+}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+//
+// FindPkg is only used in tests within x/tools.
+func FindPkg(path, srcDir string) (filename, id string, err error) {
+ // TODO(taking): Move internal/exportdata.FindPkg into its own file,
+ // and then this copy into a _test package.
+ if path == "" {
+ return "", "", errors.New("path is empty")
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ var bp *build.Package
+ bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ if bp.Goroot && bp.Dir != "" {
+ filename, err = lookupGorootExport(bp.Dir)
+ if err == nil {
+ _, err = os.Stat(filename)
+ }
+ if err == nil {
+ return filename, bp.ImportPath, nil
+ }
+ }
+ goto notfound
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ }
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ f, statErr := os.Stat(filename)
+ if statErr == nil && !f.IsDir() {
+ return filename, id, nil
+ }
+ if err == nil {
+ err = statErr
+ }
+ }
+
+notfound:
+ if err == nil {
+ return "", path, fmt.Errorf("can't find import: %q", path)
+ }
+ return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
+}
+
+var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
+
+var exportMap sync.Map // package dir → func() (string, error)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+//
+// lookupGorootExport is only used in tests within x/tools.
+func lookupGorootExport(pkgDir string) (string, error) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ err error
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
+ listOnce.Do(func() {
+ cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
+ var output []byte
+ output, err = cmd.Output()
+ if err != nil {
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ err = errors.New(string(ee.Stderr))
+ }
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, err
+ })
+ }
+
+ return f.(func() (string, error))()
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index dbbca860..3dbd21d1 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import (
"bufio"
- "bytes"
"fmt"
- "go/build"
"go/token"
"go/types"
"io"
"os"
- "os/exec"
- "path/filepath"
- "strings"
- "sync"
)
const (
@@ -45,127 +39,14 @@ const (
trace = false
)
-var exportMap sync.Map // package dir → func() (string, bool)
-
-// lookupGorootExport returns the location of the export data
-// (normally found in the build cache, but located in GOROOT/pkg
-// in prior Go releases) for the package located in pkgDir.
-//
-// (We use the package's directory instead of its import path
-// mainly to simplify handling of the packages in src/vendor
-// and cmd/vendor.)
-func lookupGorootExport(pkgDir string) (string, bool) {
- f, ok := exportMap.Load(pkgDir)
- if !ok {
- var (
- listOnce sync.Once
- exportPath string
- )
- f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
- listOnce.Do(func() {
- cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
- cmd.Dir = build.Default.GOROOT
- var output []byte
- output, err := cmd.Output()
- if err != nil {
- return
- }
-
- exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
- if len(exports) != 1 {
- return
- }
-
- exportPath = exports[0]
- })
-
- return exportPath, exportPath != ""
- })
- }
-
- return f.(func() (string, bool))()
-}
-
-var pkgExts = [...]string{".a", ".o"}
-
-// FindPkg returns the filename and unique package id for an import
-// path based on package information provided by build.Import (using
-// the build.Default build.Context). A relative srcDir is interpreted
-// relative to the current working directory.
-// If no file was found, an empty filename is returned.
-func FindPkg(path, srcDir string) (filename, id string) {
- if path == "" {
- return
- }
-
- var noext string
- switch {
- default:
- // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
- // Don't require the source files to be present.
- if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
- srcDir = abs
- }
- bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
- if bp.PkgObj == "" {
- var ok bool
- if bp.Goroot && bp.Dir != "" {
- filename, ok = lookupGorootExport(bp.Dir)
- }
- if !ok {
- id = path // make sure we have an id to print in error message
- return
- }
- } else {
- noext = strings.TrimSuffix(bp.PkgObj, ".a")
- id = bp.ImportPath
- }
-
- case build.IsLocalImport(path):
- // "./x" -> "/this/directory/x.ext", "/this/directory/x"
- noext = filepath.Join(srcDir, path)
- id = noext
-
- case filepath.IsAbs(path):
- // for completeness only - go/build.Import
- // does not support absolute imports
- // "/x" -> "/x.ext", "/x"
- noext = path
- id = path
- }
-
- if false { // for debugging
- if path != id {
- fmt.Printf("%s -> %s\n", path, id)
- }
- }
-
- if filename != "" {
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
- }
- }
-
- // try extensions
- for _, ext := range pkgExts {
- filename = noext + ext
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
- }
- }
-
- filename = "" // not found
- return
-}
-
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
//
-// TODO(taking): Import is only used in tests. Move to gcimporter_test.
-func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+// Import is only used in tests.
+func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
- var filename, id string
+ var id string
if lookup != nil {
// With custom lookup specified, assume that caller has
// converted path to a canonical import path for use in the map.
@@ -184,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
rc = f
} else {
- filename, id = FindPkg(path, srcDir)
+ var filename string
+ filename, id, err = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types.Unsafe, nil
}
- return nil, fmt.Errorf("can't find import: %q", id)
+ return nil, err
}
// no need to re-import if the package was imported completely before
@@ -212,54 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
defer rc.Close()
- var size int64
buf := bufio.NewReader(rc)
- if size, err = FindExportData(buf); err != nil {
- return
- }
-
- var data []byte
- data, err = io.ReadAll(buf)
+ data, err := ReadUnified(buf)
if err != nil {
+ err = fmt.Errorf("import %q: %v", path, err)
return
}
- if len(data) == 0 {
- return nil, fmt.Errorf("no data to load a package from for path %s", id)
- }
- // TODO(gri): allow clients of go/importer to provide a FileSet.
- // Or, define a new standard go/types/gcexportdata package.
- fset := token.NewFileSet()
+ // unified: emitted by cmd/compile since go1.20.
+ _, pkg, err = UImportData(fset, packages, data, id)
- // Select appropriate importer.
- switch data[0] {
- case 'v', 'c', 'd':
- // binary: emitted by cmd/compile till go1.10; obsolete.
- return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
-
- case 'i':
- // indexed: emitted by cmd/compile till go1.19;
- // now used only for serializing go/types.
- // See https://github.com/golang/go/issues/69491.
- _, pkg, err := IImportData(fset, packages, data[1:], id)
- return pkg, err
-
- case 'u':
- // unified: emitted by cmd/compile since go1.20.
- _, pkg, err := UImportData(fset, packages, data[1:size], id)
- return pkg, err
-
- default:
- l := len(data)
- if l > 10 {
- l = 10
- }
- return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
- }
+ return
}
-
-type byPath []*types.Package
-
-func (a byPath) Len() int { return len(a) }
-func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index e260c0e8..12943927 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -5,8 +5,6 @@
// Indexed package import.
// See iexport.go for the export data format.
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
-
package gcimporter
import (
@@ -673,7 +671,9 @@ func (r *importReader) obj(name string) {
case varTag:
typ := r.typ()
- r.declare(types.NewVar(pos, r.currPkg, name, typ))
+ v := types.NewVar(pos, r.currPkg, name, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ r.declare(v)
default:
errorf("unexpected tag: %v", tag)
@@ -1111,3 +1111,9 @@ func (r *importReader) byte() byte {
}
return x
}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go
new file mode 100644
index 00000000..4af810dc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
+func readArchiveHeader(b *bufio.Reader, name string) int {
+ // architecture-independent object file output
+ const HeaderSize = 60
+
+ var buf [HeaderSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 1db40861..522287d1 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -11,10 +11,10 @@ import (
"go/token"
"go/types"
"sort"
- "strings"
"golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/pkgbits"
+ "golang.org/x/tools/internal/typesinternal"
)
// A pkgReader holds the shared state for reading a unified IR package
@@ -71,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []
}
s := string(data)
- s = s[:strings.LastIndex(s, "\n$$\n")]
input := pkgbits.NewPkgDecoder(path, s)
pkg = readUnifiedPackage(fset, nil, imports, input)
return
@@ -266,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
func (r *reader) doPkg() *types.Package {
path := r.String()
switch path {
- case "":
+ // cmd/compile emits path="main" for main packages because
+ // that's the linker symbol prefix it used; but we need
+ // the package's path as it would be reported by go list,
+ // hence "main" below.
+ // See test at go/packages.TestMainPackagePathInModeTypes.
+ case "", "main":
path = r.p.PkgPath()
case "builtin":
return nil // universe
@@ -569,6 +573,7 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
sig := fn.Type().(*types.Signature)
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
+ typesinternal.SetVarKind(recv, typesinternal.RecvVar)
methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
}
@@ -616,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
case pkgbits.ObjVar:
pos := r.pos()
typ := r.typ()
- declare(types.NewVar(pos, objPkg, objName, typ))
+ v := types.NewVar(pos, objPkg, objName, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ declare(v)
}
}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index e333efc8..7ea90134 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -28,7 +28,7 @@ import (
"golang.org/x/tools/internal/event/label"
)
-// An Runner will run go command invocations and serialize
+// A Runner will run go command invocations and serialize
// them if it sees a concurrency error.
type Runner struct {
// once guards the runner initialization.
@@ -179,7 +179,7 @@ type Invocation struct {
CleanEnv bool
Env []string
WorkingDir string
- Logf func(format string, args ...interface{})
+ Logf func(format string, args ...any)
}
// Postcondition: both error results have same nilness.
@@ -388,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
case err := <-resChan:
return err
case <-timer.C:
- HandleHangingGoCommand(startTime, cmd)
+ // HandleHangingGoCommand terminates this process.
+ // Pass off resChan in case we can collect the command error.
+ handleHangingGoCommand(startTime, cmd, resChan)
case <-ctx.Done():
}
} else {
@@ -413,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
}
// Didn't shut down in response to interrupt. Kill it hard.
- // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
- // on certain platforms, such as unix.
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
log.Printf("error killing the Go command: %v", err)
}
@@ -422,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
return <-resChan
}
-func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) {
+// handleHangingGoCommand outputs debugging information to help diagnose the
+// cause of a hanging Go command, and then exits with log.Fatalf.
+func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) {
switch runtime.GOOS {
- case "linux", "darwin", "freebsd", "netbsd":
+ case "linux", "darwin", "freebsd", "netbsd", "openbsd":
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
-The gopls test runner has detected a hanging go command. In order to debug
-this, the output of ps and lsof/fstat is printed below.
+ The gopls test runner has detected a hanging go command. In order to debug
+ this, the output of ps and lsof/fstat is printed below.
-See golang/go#54461 for more details.`)
+ See golang/go#54461 for more details.`)
fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
fmt.Fprintln(os.Stderr, "-------------------------")
@@ -438,7 +440,7 @@ See golang/go#54461 for more details.`)
psCmd.Stdout = os.Stderr
psCmd.Stderr = os.Stderr
if err := psCmd.Run(); err != nil {
- panic(fmt.Sprintf("running ps: %v", err))
+ log.Printf("Handling hanging Go command: running ps: %v", err)
}
listFiles := "lsof"
@@ -452,10 +454,24 @@ See golang/go#54461 for more details.`)
listFilesCmd.Stdout = os.Stderr
listFilesCmd.Stderr = os.Stderr
if err := listFilesCmd.Run(); err != nil {
- panic(fmt.Sprintf("running %s: %v", listFiles, err))
+ log.Printf("Handling hanging Go command: running %s: %v", listFiles, err)
+ }
+ // Try to extract information about the slow go process by issuing a SIGQUIT.
+ if err := cmd.Process.Signal(sigStuckProcess); err == nil {
+ select {
+ case err := <-resChan:
+ stderr := "not a bytes.Buffer"
+ if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil {
+ stderr = buf.String()
+ }
+ log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr)
+ case <-time.After(5 * time.Second):
+ }
+ } else {
+ log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err)
}
}
- panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid))
+ log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)
}
func cmdDebugStr(cmd *exec.Cmd) string {
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
new file mode 100644
index 00000000..469c648e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package gocommand
+
+import "os"
+
+// sigStuckProcess is the signal to send to kill a hanging subprocess.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var sigStuckProcess = os.Kill
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
new file mode 100644
index 00000000..169d37c8
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package gocommand
+
+import "syscall"
+
+// Sigstuckprocess is the signal to send to kill a hanging subprocess.
+// Send SIGQUIT to get a stack trace.
+var sigStuckProcess = syscall.SIGQUIT
diff --git a/vendor/golang.org/x/tools/internal/imports/fix.go b/vendor/golang.org/x/tools/internal/imports/fix.go
index 5ae57697..bf6b0aad 100644
--- a/vendor/golang.org/x/tools/internal/imports/fix.go
+++ b/vendor/golang.org/x/tools/internal/imports/fix.go
@@ -780,7 +780,7 @@ func GetAllCandidates(ctx context.Context, wrapped func(ImportFix), searchPrefix
return true
},
dirFound: func(pkg *pkg) bool {
- if !canUse(filename, pkg.dir) {
+ if !CanUse(filename, pkg.dir) {
return false
}
// Try the assumed package name first, then a simpler path match
@@ -815,7 +815,7 @@ func GetImportPaths(ctx context.Context, wrapped func(ImportFix), searchPrefix,
return true
},
dirFound: func(pkg *pkg) bool {
- if !canUse(filename, pkg.dir) {
+ if !CanUse(filename, pkg.dir) {
return false
}
return strings.HasPrefix(pkg.importPathShort, searchPrefix)
@@ -927,7 +927,7 @@ type ProcessEnv struct {
WorkingDir string
// If Logf is non-nil, debug logging is enabled through this function.
- Logf func(format string, args ...interface{})
+ Logf func(format string, args ...any)
// If set, ModCache holds a shared cache of directory info to use across
// multiple ProcessEnvs.
@@ -1132,6 +1132,9 @@ func addStdlibCandidates(pass *pass, refs References) error {
// but we have no way of figuring out what the user is using
// TODO: investigate using the toolchain version to disambiguate in the stdlib
add("math/rand/v2")
+ // math/rand has an overlapping API
+ // TestIssue66407 fails without this
+ add("math/rand")
continue
}
for importPath := range stdlib.PackageSymbols {
@@ -1736,7 +1739,7 @@ func (s *symbolSearcher) searchOne(ctx context.Context, c pkgDistance, symbols m
// searching for "client.New")
func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
// Check "internal" and "vendor" visibility:
- if !canUse(filename, pkg.dir) {
+ if !CanUse(filename, pkg.dir) {
return false
}
@@ -1759,9 +1762,9 @@ func pkgIsCandidate(filename string, refs References, pkg *pkg) bool {
return false
}
-// canUse reports whether the package in dir is usable from filename,
+// CanUse reports whether the package in dir is usable from filename,
// respecting the Go "internal" and "vendor" visibility rules.
-func canUse(filename, dir string) bool {
+func CanUse(filename, dir string) bool {
// Fast path check, before any allocations. If it doesn't contain vendor
// or internal, it's not tricky:
// Note that this can false-negative on directories like "notinternal",
diff --git a/vendor/golang.org/x/tools/internal/imports/source_env.go b/vendor/golang.org/x/tools/internal/imports/source_env.go
index d14abaa3..ec996c3c 100644
--- a/vendor/golang.org/x/tools/internal/imports/source_env.go
+++ b/vendor/golang.org/x/tools/internal/imports/source_env.go
@@ -67,7 +67,7 @@ func (s *ProcessEnvSource) ResolveReferences(ctx context.Context, filename strin
// same package name. Don't try to import ourselves.
return false
}
- if !canUse(filename, pkg.dir) {
+ if !CanUse(filename, pkg.dir) {
return false
}
mu.Lock()
diff --git a/vendor/golang.org/x/tools/internal/modindex/index.go b/vendor/golang.org/x/tools/internal/modindex/index.go
index 27b6dd83..9665356c 100644
--- a/vendor/golang.org/x/tools/internal/modindex/index.go
+++ b/vendor/golang.org/x/tools/internal/modindex/index.go
@@ -17,6 +17,7 @@ import (
"path/filepath"
"strconv"
"strings"
+ "testing"
"time"
)
@@ -85,6 +86,28 @@ type Entry struct {
Names []string // exported names and information
}
+// IndexDir is where the module index is stored.
+var IndexDir string
+
+// Set IndexDir
+func init() {
+ var dir string
+ var err error
+ if testing.Testing() {
+ dir = os.TempDir()
+ } else {
+ dir, err = os.UserCacheDir()
+ // shouldn't happen, but TempDir is better than
+ // creating ./go/imports
+ if err != nil {
+ dir = os.TempDir()
+ }
+ }
+ dir = filepath.Join(dir, "go", "imports")
+ os.MkdirAll(dir, 0777)
+ IndexDir = dir
+}
+
// ReadIndex reads the latest version of the on-disk index
// for the cache directory cd.
// It returns (nil, nil) if there is no index, but returns
@@ -95,10 +118,7 @@ func ReadIndex(cachedir string) (*Index, error) {
return nil, err
}
cd := Abspath(cachedir)
- dir, err := IndexDir()
- if err != nil {
- return nil, err
- }
+ dir := IndexDir
base := indexNameBase(cd)
iname := filepath.Join(dir, base)
buf, err := os.ReadFile(iname)
@@ -185,12 +205,8 @@ func readIndexFrom(cd Abspath, bx io.Reader) (*Index, error) {
// write the index as a text file
func writeIndex(cachedir Abspath, ix *Index) error {
- dir, err := IndexDir()
- if err != nil {
- return err
- }
ipat := fmt.Sprintf("index-%d-*", CurrentVersion)
- fd, err := os.CreateTemp(dir, ipat)
+ fd, err := os.CreateTemp(IndexDir, ipat)
if err != nil {
return err // can this happen?
}
@@ -201,7 +217,7 @@ func writeIndex(cachedir Abspath, ix *Index) error {
content := fd.Name()
content = filepath.Base(content)
base := indexNameBase(cachedir)
- nm := filepath.Join(dir, base)
+ nm := filepath.Join(IndexDir, base)
err = os.WriteFile(nm, []byte(content), 0666)
if err != nil {
return err
@@ -241,18 +257,6 @@ func writeIndexToFile(x *Index, fd *os.File) error {
return nil
}
-// tests can override this
-var IndexDir = indexDir
-
-// IndexDir computes the directory containing the index
-func indexDir() (string, error) {
- dir, err := os.UserCacheDir()
- if err != nil {
- return "", fmt.Errorf("cannot open UserCacheDir, %w", err)
- }
- return filepath.Join(dir, "go", "imports"), nil
-}
-
// return the base name of the file containing the name of the current index
func indexNameBase(cachedir Abspath) string {
// crc64 is a way to convert path names into 16 hex digits.
diff --git a/vendor/golang.org/x/tools/internal/modindex/lookup.go b/vendor/golang.org/x/tools/internal/modindex/lookup.go
index 29d4e3d7..5499c5c6 100644
--- a/vendor/golang.org/x/tools/internal/modindex/lookup.go
+++ b/vendor/golang.org/x/tools/internal/modindex/lookup.go
@@ -16,6 +16,7 @@ type Candidate struct {
Dir string
ImportPath string
Type LexType
+ Deprecated bool
// information for Funcs
Results int16 // how many results
Sig []Field // arg names and types
@@ -34,6 +35,36 @@ const (
Func
)
+// LookupAll only returns those Candidates whose import path
+// finds all the nms.
+func (ix *Index) LookupAll(pkg string, names ...string) map[string][]Candidate {
+ // this can be made faster when benchmarks show that it needs to be
+ names = uniquify(names)
+ byImpPath := make(map[string][]Candidate)
+ for _, nm := range names {
+ cands := ix.Lookup(pkg, nm, false)
+ for _, c := range cands {
+ byImpPath[c.ImportPath] = append(byImpPath[c.ImportPath], c)
+ }
+ }
+ for k, v := range byImpPath {
+ if len(v) != len(names) {
+ delete(byImpPath, k)
+ }
+ }
+ return byImpPath
+}
+
+// remove duplicates
+func uniquify(in []string) []string {
+ if len(in) == 0 {
+ return in
+ }
+ in = slices.Clone(in)
+ slices.Sort(in)
+ return slices.Compact(in)
+}
+
// Lookup finds all the symbols in the index with the given PkgName and name.
// If prefix is true, it finds all of these with name as a prefix.
func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
@@ -79,8 +110,9 @@ func (ix *Index) Lookup(pkg, name string, prefix bool) []Candidate {
Dir: string(e.Dir),
ImportPath: e.ImportPath,
Type: asLexType(flds[1][0]),
+ Deprecated: len(flds[1]) > 1 && flds[1][1] == 'D',
}
- if flds[1] == "F" {
+ if px.Type == Func {
n, err := strconv.Atoi(flds[2])
if err != nil {
continue // should never happen
@@ -111,6 +143,7 @@ func toFields(sig []string) []Field {
}
// benchmarks show this is measurably better than strings.Split
+// split into first 4 fields separated by single space
func fastSplit(x string) []string {
ans := make([]string, 0, 4)
nxt := 0
diff --git a/vendor/golang.org/x/tools/internal/modindex/symbols.go b/vendor/golang.org/x/tools/internal/modindex/symbols.go
index 2e285ed9..b918529d 100644
--- a/vendor/golang.org/x/tools/internal/modindex/symbols.go
+++ b/vendor/golang.org/x/tools/internal/modindex/symbols.go
@@ -12,6 +12,7 @@ import (
"go/types"
"os"
"path/filepath"
+ "runtime"
"slices"
"strings"
@@ -19,29 +20,30 @@ import (
)
// The name of a symbol contains information about the symbol:
-// T for types
-// C for consts
-// V for vars
+// T for types, TD if the type is deprecated
+// C for consts, CD if the const is deprecated
+// V for vars, VD if the var is deprecated
// and for funcs: F ( )*
// any spaces in are replaced by $s so that the fields
-// of the name are space separated
+// of the name are space separated. F is replaced by FD if the func
+// is deprecated.
type symbol struct {
pkg string // name of the symbols's package
name string // declared name
- kind string // T, C, V, or F
+ kind string // T, C, V, or F, follwed by D if deprecated
sig string // signature information, for F
}
// find the symbols for the best directories
func getSymbols(cd Abspath, dirs map[string][]*directory) {
var g errgroup.Group
- g.SetLimit(-1) // maybe throttle this some day
+ g.SetLimit(max(2, runtime.GOMAXPROCS(0)/2))
for _, vv := range dirs {
// throttling some day?
d := vv[0]
g.Go(func() error {
thedir := filepath.Join(string(cd), string(d.path))
- mode := parser.SkipObjectResolution
+ mode := parser.SkipObjectResolution | parser.ParseComments
fi, err := os.ReadDir(thedir)
if err != nil {
@@ -84,6 +86,9 @@ func getFileExports(f *ast.File) []symbol {
// generic functions just like non-generic ones.
sig := dtype.Params
kind := "F"
+ if isDeprecated(decl.Doc) {
+ kind += "D"
+ }
result := []string{fmt.Sprintf("%d", dtype.Results.NumFields())}
for _, x := range sig.List {
// This code creates a string representing the type.
@@ -107,7 +112,7 @@ func getFileExports(f *ast.File) []symbol {
// print struct tags. So for this to happen the type of a formal parameter
// has to be a explict struct, e.g. foo(x struct{a int "$"}) and ExprString
// would have to show the struct tag. Even testing for this case seems
- // a waste of effort, but let's not ignore such pathologies
+ // a waste of effort, but let's remember the possibility
if strings.Contains(tp, "$") {
continue
}
@@ -127,12 +132,16 @@ func getFileExports(f *ast.File) []symbol {
ans = append(ans, *s)
}
case *ast.GenDecl:
+ depr := isDeprecated(decl.Doc)
switch decl.Tok {
case token.CONST, token.VAR:
tp := "V"
if decl.Tok == token.CONST {
tp = "C"
}
+ if depr {
+ tp += "D"
+ }
for _, sp := range decl.Specs {
for _, x := range sp.(*ast.ValueSpec).Names {
if s := newsym(pkg, x.Name, tp, ""); s != nil {
@@ -141,8 +150,12 @@ func getFileExports(f *ast.File) []symbol {
}
}
case token.TYPE:
+ tp := "T"
+ if depr {
+ tp += "D"
+ }
for _, sp := range decl.Specs {
- if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, "T", ""); s != nil {
+ if s := newsym(pkg, sp.(*ast.TypeSpec).Name.Name, tp, ""); s != nil {
ans = append(ans, *s)
}
}
@@ -160,6 +173,22 @@ func newsym(pkg, name, kind, sig string) *symbol {
return &sym
}
+func isDeprecated(doc *ast.CommentGroup) bool {
+ if doc == nil {
+ return false
+ }
+ // go.dev/wiki/Deprecated Paragraph starting 'Deprecated:'
+ // This code fails for /* Deprecated: */, but it's the code from
+ // gopls/internal/analysis/deprecated
+ lines := strings.Split(doc.Text(), "\n\n")
+ for _, line := range lines {
+ if strings.HasPrefix(line, "Deprecated:") {
+ return true
+ }
+ }
+ return false
+}
+
// return the package name and the value for the symbols.
// if there are multiple packages, choose one arbitrarily
// the returned slice is sorted lexicographically
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 66e69b43..78460591 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,7 +5,7 @@
// Package packagesinternal exposes internal-only fields from go/packages.
package packagesinternal
-var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
+var GetDepsErrors = func(p any) []*PackageError { return nil }
type PackageError struct {
ImportStack []string // shortest path from package named on command line to this one
@@ -16,5 +16,5 @@ type PackageError struct {
var TypecheckCgo int
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
-var SetModFlag = func(config interface{}, value string) {}
+var SetModFlag = func(config any, value string) {}
var SetModFile = func(config interface{}, value string) {}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index cdaac9ab..9f0b871f 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
{"ErrTooLarge", Var, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
+ {"FieldsFuncSeq", Func, 24},
+ {"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
+ {"Lines", Func, 24},
{"Map", Func, 0},
{"MinRead", Const, 0},
{"NewBuffer", Func, 0},
@@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
+ {"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
+ {"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
{"NewCTR", Func, 0},
{"NewGCM", Func, 2},
{"NewGCMWithNonceSize", Func, 5},
+ {"NewGCMWithRandomNonce", Func, 24},
{"NewGCMWithTagSize", Func, 11},
{"NewOFB", Func, 0},
{"Stream", Type, 0},
@@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
{"Unmarshal", Func, 0},
{"UnmarshalCompressed", Func, 15},
},
+ "crypto/fips140": {
+ {"Enabled", Func, 24},
+ },
+ "crypto/hkdf": {
+ {"Expand", Func, 24},
+ {"Extract", Func, 24},
+ {"Key", Func, 24},
+ },
"crypto/hmac": {
{"Equal", Func, 1},
{"New", Func, 0},
@@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
{"Size", Const, 0},
{"Sum", Func, 2},
},
+ "crypto/mlkem": {
+ {"(*DecapsulationKey1024).Bytes", Method, 24},
+ {"(*DecapsulationKey1024).Decapsulate", Method, 24},
+ {"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
+ {"(*DecapsulationKey768).Bytes", Method, 24},
+ {"(*DecapsulationKey768).Decapsulate", Method, 24},
+ {"(*DecapsulationKey768).EncapsulationKey", Method, 24},
+ {"(*EncapsulationKey1024).Bytes", Method, 24},
+ {"(*EncapsulationKey1024).Encapsulate", Method, 24},
+ {"(*EncapsulationKey768).Bytes", Method, 24},
+ {"(*EncapsulationKey768).Encapsulate", Method, 24},
+ {"CiphertextSize1024", Const, 24},
+ {"CiphertextSize768", Const, 24},
+ {"DecapsulationKey1024", Type, 24},
+ {"DecapsulationKey768", Type, 24},
+ {"EncapsulationKey1024", Type, 24},
+ {"EncapsulationKey768", Type, 24},
+ {"EncapsulationKeySize1024", Const, 24},
+ {"EncapsulationKeySize768", Const, 24},
+ {"GenerateKey1024", Func, 24},
+ {"GenerateKey768", Func, 24},
+ {"NewDecapsulationKey1024", Func, 24},
+ {"NewDecapsulationKey768", Func, 24},
+ {"NewEncapsulationKey1024", Func, 24},
+ {"NewEncapsulationKey768", Func, 24},
+ {"SeedSize", Const, 24},
+ {"SharedKeySize", Const, 24},
+ },
+ "crypto/pbkdf2": {
+ {"Key", Func, 24},
+ },
"crypto/rand": {
{"Int", Func, 0},
{"Prime", Func, 0},
{"Read", Func, 0},
{"Reader", Var, 0},
+ {"Text", Func, 24},
},
"crypto/rc4": {
{"(*Cipher).Reset", Method, 0},
@@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
{"Sum224", Func, 2},
{"Sum256", Func, 2},
},
+ "crypto/sha3": {
+ {"(*SHA3).AppendBinary", Method, 24},
+ {"(*SHA3).BlockSize", Method, 24},
+ {"(*SHA3).MarshalBinary", Method, 24},
+ {"(*SHA3).Reset", Method, 24},
+ {"(*SHA3).Size", Method, 24},
+ {"(*SHA3).Sum", Method, 24},
+ {"(*SHA3).UnmarshalBinary", Method, 24},
+ {"(*SHA3).Write", Method, 24},
+ {"(*SHAKE).AppendBinary", Method, 24},
+ {"(*SHAKE).BlockSize", Method, 24},
+ {"(*SHAKE).MarshalBinary", Method, 24},
+ {"(*SHAKE).Read", Method, 24},
+ {"(*SHAKE).Reset", Method, 24},
+ {"(*SHAKE).UnmarshalBinary", Method, 24},
+ {"(*SHAKE).Write", Method, 24},
+ {"New224", Func, 24},
+ {"New256", Func, 24},
+ {"New384", Func, 24},
+ {"New512", Func, 24},
+ {"NewCSHAKE128", Func, 24},
+ {"NewCSHAKE256", Func, 24},
+ {"NewSHAKE128", Func, 24},
+ {"NewSHAKE256", Func, 24},
+ {"SHA3", Type, 24},
+ {"SHAKE", Type, 24},
+ {"Sum224", Func, 24},
+ {"Sum256", Func, 24},
+ {"Sum384", Func, 24},
+ {"Sum512", Func, 24},
+ {"SumSHAKE128", Func, 24},
+ {"SumSHAKE256", Func, 24},
+ },
"crypto/sha512": {
{"BlockSize", Const, 0},
{"New", Func, 0},
@@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
{"ConstantTimeEq", Func, 0},
{"ConstantTimeLessOrEq", Func, 2},
{"ConstantTimeSelect", Func, 0},
+ {"WithDataIndependentTiming", Func, 24},
{"XORBytes", Func, 20},
},
"crypto/tls": {
@@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
{"ClientHelloInfo", Type, 4},
{"ClientHelloInfo.CipherSuites", Field, 4},
{"ClientHelloInfo.Conn", Field, 8},
+ {"ClientHelloInfo.Extensions", Field, 24},
{"ClientHelloInfo.ServerName", Field, 4},
{"ClientHelloInfo.SignatureSchemes", Field, 8},
{"ClientHelloInfo.SupportedCurves", Field, 4},
@@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{
{"Config.CurvePreferences", Field, 3},
{"Config.DynamicRecordSizingDisabled", Field, 7},
{"Config.EncryptedClientHelloConfigList", Field, 23},
+ {"Config.EncryptedClientHelloKeys", Field, 24},
{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
{"Config.GetCertificate", Field, 4},
{"Config.GetClientCertificate", Field, 8},
@@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{
{"ECHRejectionError", Type, 23},
{"ECHRejectionError.RetryConfigList", Field, 23},
{"Ed25519", Const, 13},
+ {"EncryptedClientHelloKey", Type, 24},
+ {"EncryptedClientHelloKey.Config", Field, 24},
+ {"EncryptedClientHelloKey.PrivateKey", Field, 24},
+ {"EncryptedClientHelloKey.SendAsRetry", Field, 24},
{"InsecureCipherSuites", Func, 14},
{"Listen", Func, 0},
{"LoadX509KeyPair", Func, 0},
@@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
{"VersionTLS12", Const, 2},
{"VersionTLS13", Const, 12},
{"X25519", Const, 8},
+ {"X25519MLKEM768", Const, 24},
{"X509KeyPair", Func, 0},
},
"crypto/x509": {
@@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{
{"(ConstraintViolationError).Error", Method, 0},
{"(HostnameError).Error", Method, 0},
{"(InsecureAlgorithmError).Error", Method, 6},
+ {"(OID).AppendBinary", Method, 24},
+ {"(OID).AppendText", Method, 24},
{"(OID).Equal", Method, 22},
{"(OID).EqualASN1OID", Method, 22},
{"(OID).MarshalBinary", Method, 23},
@@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.Extensions", Field, 2},
{"Certificate.ExtraExtensions", Field, 2},
{"Certificate.IPAddresses", Field, 1},
+ {"Certificate.InhibitAnyPolicy", Field, 24},
+ {"Certificate.InhibitAnyPolicyZero", Field, 24},
+ {"Certificate.InhibitPolicyMapping", Field, 24},
+ {"Certificate.InhibitPolicyMappingZero", Field, 24},
{"Certificate.IsCA", Field, 0},
{"Certificate.Issuer", Field, 0},
{"Certificate.IssuingCertificateURL", Field, 2},
@@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.PermittedURIDomains", Field, 10},
{"Certificate.Policies", Field, 22},
{"Certificate.PolicyIdentifiers", Field, 0},
+ {"Certificate.PolicyMappings", Field, 24},
{"Certificate.PublicKey", Field, 0},
{"Certificate.PublicKeyAlgorithm", Field, 0},
{"Certificate.Raw", Field, 0},
@@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.RawSubject", Field, 0},
{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
{"Certificate.RawTBSCertificate", Field, 0},
+ {"Certificate.RequireExplicitPolicy", Field, 24},
+ {"Certificate.RequireExplicitPolicyZero", Field, 24},
{"Certificate.SerialNumber", Field, 0},
{"Certificate.Signature", Field, 0},
{"Certificate.SignatureAlgorithm", Field, 0},
@@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
{"NameConstraintsWithoutSANs", Const, 10},
{"NameMismatch", Const, 8},
{"NewCertPool", Func, 0},
+ {"NoValidChains", Const, 24},
{"NotAuthorizedToSign", Const, 0},
{"OID", Type, 22},
{"OIDFromInts", Func, 22},
@@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{
{"ParsePKCS8PrivateKey", Func, 0},
{"ParsePKIXPublicKey", Func, 0},
{"ParseRevocationList", Func, 19},
+ {"PolicyMapping", Type, 24},
+ {"PolicyMapping.IssuerDomainPolicy", Field, 24},
+ {"PolicyMapping.SubjectDomainPolicy", Field, 24},
{"PublicKeyAlgorithm", Type, 0},
{"PureEd25519", Const, 13},
{"RSA", Const, 0},
@@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
{"UnknownPublicKeyAlgorithm", Const, 0},
{"UnknownSignatureAlgorithm", Const, 0},
{"VerifyOptions", Type, 0},
+ {"VerifyOptions.CertificatePolicies", Field, 24},
{"VerifyOptions.CurrentTime", Field, 0},
{"VerifyOptions.DNSName", Field, 0},
{"VerifyOptions.Intermediates", Field, 0},
@@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*File).DynString", Method, 1},
{"(*File).DynValue", Method, 21},
{"(*File).DynamicSymbols", Method, 4},
+ {"(*File).DynamicVersionNeeds", Method, 24},
+ {"(*File).DynamicVersions", Method, 24},
{"(*File).ImportedLibraries", Method, 0},
{"(*File).ImportedSymbols", Method, 0},
{"(*File).Section", Method, 0},
@@ -2240,6 +2343,19 @@ var PackageSymbols = map[string][]Symbol{
{"DynFlag", Type, 0},
{"DynFlag1", Type, 21},
{"DynTag", Type, 0},
+ {"DynamicVersion", Type, 24},
+ {"DynamicVersion.Deps", Field, 24},
+ {"DynamicVersion.Flags", Field, 24},
+ {"DynamicVersion.Index", Field, 24},
+ {"DynamicVersion.Name", Field, 24},
+ {"DynamicVersionDep", Type, 24},
+ {"DynamicVersionDep.Dep", Field, 24},
+ {"DynamicVersionDep.Flags", Field, 24},
+ {"DynamicVersionDep.Index", Field, 24},
+ {"DynamicVersionFlag", Type, 24},
+ {"DynamicVersionNeed", Type, 24},
+ {"DynamicVersionNeed.Name", Field, 24},
+ {"DynamicVersionNeed.Needs", Field, 24},
{"EI_ABIVERSION", Const, 0},
{"EI_CLASS", Const, 0},
{"EI_DATA", Const, 0},
@@ -3726,8 +3842,19 @@ var PackageSymbols = map[string][]Symbol{
{"Symbol.Size", Field, 0},
{"Symbol.Value", Field, 0},
{"Symbol.Version", Field, 13},
+ {"Symbol.VersionIndex", Field, 24},
+ {"Symbol.VersionScope", Field, 24},
+ {"SymbolVersionScope", Type, 24},
{"Type", Type, 0},
+ {"VER_FLG_BASE", Const, 24},
+ {"VER_FLG_INFO", Const, 24},
+ {"VER_FLG_WEAK", Const, 24},
{"Version", Type, 0},
+ {"VersionScopeGlobal", Const, 24},
+ {"VersionScopeHidden", Const, 24},
+ {"VersionScopeLocal", Const, 24},
+ {"VersionScopeNone", Const, 24},
+ {"VersionScopeSpecific", Const, 24},
},
"debug/gosym": {
{"(*DecodingError).Error", Method, 0},
@@ -4453,8 +4580,10 @@ var PackageSymbols = map[string][]Symbol{
{"FS", Type, 16},
},
"encoding": {
+ {"BinaryAppender", Type, 24},
{"BinaryMarshaler", Type, 2},
{"BinaryUnmarshaler", Type, 2},
+ {"TextAppender", Type, 24},
{"TextMarshaler", Type, 2},
{"TextUnmarshaler", Type, 2},
},
@@ -5984,13 +6113,16 @@ var PackageSymbols = map[string][]Symbol{
{"(*Interface).Complete", Method, 5},
{"(*Interface).Embedded", Method, 5},
{"(*Interface).EmbeddedType", Method, 11},
+ {"(*Interface).EmbeddedTypes", Method, 24},
{"(*Interface).Empty", Method, 5},
{"(*Interface).ExplicitMethod", Method, 5},
+ {"(*Interface).ExplicitMethods", Method, 24},
{"(*Interface).IsComparable", Method, 18},
{"(*Interface).IsImplicit", Method, 18},
{"(*Interface).IsMethodSet", Method, 18},
{"(*Interface).MarkImplicit", Method, 18},
{"(*Interface).Method", Method, 5},
+ {"(*Interface).Methods", Method, 24},
{"(*Interface).NumEmbeddeds", Method, 5},
{"(*Interface).NumExplicitMethods", Method, 5},
{"(*Interface).NumMethods", Method, 5},
@@ -6011,9 +6143,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*MethodSet).At", Method, 5},
{"(*MethodSet).Len", Method, 5},
{"(*MethodSet).Lookup", Method, 5},
+ {"(*MethodSet).Methods", Method, 24},
{"(*MethodSet).String", Method, 5},
{"(*Named).AddMethod", Method, 5},
{"(*Named).Method", Method, 5},
+ {"(*Named).Methods", Method, 24},
{"(*Named).NumMethods", Method, 5},
{"(*Named).Obj", Method, 5},
{"(*Named).Origin", Method, 18},
@@ -6054,6 +6188,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Pointer).String", Method, 5},
{"(*Pointer).Underlying", Method, 5},
{"(*Scope).Child", Method, 5},
+ {"(*Scope).Children", Method, 24},
{"(*Scope).Contains", Method, 5},
{"(*Scope).End", Method, 5},
{"(*Scope).Innermost", Method, 5},
@@ -6089,6 +6224,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*StdSizes).Offsetsof", Method, 5},
{"(*StdSizes).Sizeof", Method, 5},
{"(*Struct).Field", Method, 5},
+ {"(*Struct).Fields", Method, 24},
{"(*Struct).NumFields", Method, 5},
{"(*Struct).String", Method, 5},
{"(*Struct).Tag", Method, 5},
@@ -6100,8 +6236,10 @@ var PackageSymbols = map[string][]Symbol{
{"(*Tuple).Len", Method, 5},
{"(*Tuple).String", Method, 5},
{"(*Tuple).Underlying", Method, 5},
+ {"(*Tuple).Variables", Method, 24},
{"(*TypeList).At", Method, 18},
{"(*TypeList).Len", Method, 18},
+ {"(*TypeList).Types", Method, 24},
{"(*TypeName).Exported", Method, 5},
{"(*TypeName).Id", Method, 5},
{"(*TypeName).IsAlias", Method, 9},
@@ -6119,9 +6257,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*TypeParam).Underlying", Method, 18},
{"(*TypeParamList).At", Method, 18},
{"(*TypeParamList).Len", Method, 18},
+ {"(*TypeParamList).TypeParams", Method, 24},
{"(*Union).Len", Method, 18},
{"(*Union).String", Method, 18},
{"(*Union).Term", Method, 18},
+ {"(*Union).Terms", Method, 24},
{"(*Union).Underlying", Method, 18},
{"(*Var).Anonymous", Method, 5},
{"(*Var).Embedded", Method, 11},
@@ -6392,10 +6532,12 @@ var PackageSymbols = map[string][]Symbol{
{"(*Hash).WriteByte", Method, 14},
{"(*Hash).WriteString", Method, 14},
{"Bytes", Func, 19},
+ {"Comparable", Func, 24},
{"Hash", Type, 14},
{"MakeSeed", Func, 14},
{"Seed", Type, 14},
{"String", Func, 19},
+ {"WriteComparable", Func, 24},
},
"html": {
{"EscapeString", Func, 0},
@@ -7082,6 +7224,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*JSONHandler).WithGroup", Method, 21},
{"(*Level).UnmarshalJSON", Method, 21},
{"(*Level).UnmarshalText", Method, 21},
+ {"(*LevelVar).AppendText", Method, 24},
{"(*LevelVar).Level", Method, 21},
{"(*LevelVar).MarshalText", Method, 21},
{"(*LevelVar).Set", Method, 21},
@@ -7110,6 +7253,7 @@ var PackageSymbols = map[string][]Symbol{
{"(Attr).Equal", Method, 21},
{"(Attr).String", Method, 21},
{"(Kind).String", Method, 21},
+ {"(Level).AppendText", Method, 24},
{"(Level).Level", Method, 21},
{"(Level).MarshalJSON", Method, 21},
{"(Level).MarshalText", Method, 21},
@@ -7140,6 +7284,7 @@ var PackageSymbols = map[string][]Symbol{
{"Debug", Func, 21},
{"DebugContext", Func, 21},
{"Default", Func, 21},
+ {"DiscardHandler", Var, 24},
{"Duration", Func, 21},
{"DurationValue", Func, 21},
{"Error", Func, 21},
@@ -7375,6 +7520,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Float).Acc", Method, 5},
{"(*Float).Add", Method, 5},
{"(*Float).Append", Method, 5},
+ {"(*Float).AppendText", Method, 24},
{"(*Float).Cmp", Method, 5},
{"(*Float).Copy", Method, 5},
{"(*Float).Float32", Method, 5},
@@ -7421,6 +7567,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).And", Method, 0},
{"(*Int).AndNot", Method, 0},
{"(*Int).Append", Method, 6},
+ {"(*Int).AppendText", Method, 24},
{"(*Int).Binomial", Method, 0},
{"(*Int).Bit", Method, 0},
{"(*Int).BitLen", Method, 0},
@@ -7477,6 +7624,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).Xor", Method, 0},
{"(*Rat).Abs", Method, 0},
{"(*Rat).Add", Method, 0},
+ {"(*Rat).AppendText", Method, 24},
{"(*Rat).Cmp", Method, 0},
{"(*Rat).Denom", Method, 0},
{"(*Rat).Float32", Method, 4},
@@ -7659,11 +7807,13 @@ var PackageSymbols = map[string][]Symbol{
{"Zipf", Type, 0},
},
"math/rand/v2": {
+ {"(*ChaCha8).AppendBinary", Method, 24},
{"(*ChaCha8).MarshalBinary", Method, 22},
{"(*ChaCha8).Read", Method, 23},
{"(*ChaCha8).Seed", Method, 22},
{"(*ChaCha8).Uint64", Method, 22},
{"(*ChaCha8).UnmarshalBinary", Method, 22},
+ {"(*PCG).AppendBinary", Method, 24},
{"(*PCG).MarshalBinary", Method, 22},
{"(*PCG).Seed", Method, 22},
{"(*PCG).Uint64", Method, 22},
@@ -7931,6 +8081,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*UnixListener).SyscallConn", Method, 10},
{"(Flags).String", Method, 0},
{"(HardwareAddr).String", Method, 0},
+ {"(IP).AppendText", Method, 24},
{"(IP).DefaultMask", Method, 0},
{"(IP).Equal", Method, 0},
{"(IP).IsGlobalUnicast", Method, 0},
@@ -8131,6 +8282,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*MaxBytesError).Error", Method, 19},
{"(*ProtocolError).Error", Method, 0},
{"(*ProtocolError).Is", Method, 21},
+ {"(*Protocols).SetHTTP1", Method, 24},
+ {"(*Protocols).SetHTTP2", Method, 24},
+ {"(*Protocols).SetUnencryptedHTTP2", Method, 24},
{"(*Request).AddCookie", Method, 0},
{"(*Request).BasicAuth", Method, 4},
{"(*Request).Clone", Method, 13},
@@ -8190,6 +8344,10 @@ var PackageSymbols = map[string][]Symbol{
{"(Header).Values", Method, 14},
{"(Header).Write", Method, 0},
{"(Header).WriteSubset", Method, 0},
+ {"(Protocols).HTTP1", Method, 24},
+ {"(Protocols).HTTP2", Method, 24},
+ {"(Protocols).String", Method, 24},
+ {"(Protocols).UnencryptedHTTP2", Method, 24},
{"AllowQuerySemicolons", Func, 17},
{"CanonicalHeaderKey", Func, 0},
{"Client", Type, 0},
@@ -8252,6 +8410,18 @@ var PackageSymbols = map[string][]Symbol{
{"FileSystem", Type, 0},
{"Flusher", Type, 0},
{"Get", Func, 0},
+ {"HTTP2Config", Type, 24},
+ {"HTTP2Config.CountError", Field, 24},
+ {"HTTP2Config.MaxConcurrentStreams", Field, 24},
+ {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
+ {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
+ {"HTTP2Config.MaxReadFrameSize", Field, 24},
+ {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
+ {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
+ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
+ {"HTTP2Config.PingTimeout", Field, 24},
+ {"HTTP2Config.SendPingTimeout", Field, 24},
+ {"HTTP2Config.WriteByteTimeout", Field, 24},
{"Handle", Func, 0},
{"HandleFunc", Func, 0},
{"Handler", Type, 0},
@@ -8292,6 +8462,7 @@ var PackageSymbols = map[string][]Symbol{
{"PostForm", Func, 0},
{"ProtocolError", Type, 0},
{"ProtocolError.ErrorString", Field, 0},
+ {"Protocols", Type, 24},
{"ProxyFromEnvironment", Func, 0},
{"ProxyURL", Func, 0},
{"PushOptions", Type, 8},
@@ -8361,9 +8532,11 @@ var PackageSymbols = map[string][]Symbol{
{"Server.ConnState", Field, 3},
{"Server.DisableGeneralOptionsHandler", Field, 20},
{"Server.ErrorLog", Field, 3},
+ {"Server.HTTP2", Field, 24},
{"Server.Handler", Field, 0},
{"Server.IdleTimeout", Field, 8},
{"Server.MaxHeaderBytes", Field, 0},
+ {"Server.Protocols", Field, 24},
{"Server.ReadHeaderTimeout", Field, 8},
{"Server.ReadTimeout", Field, 0},
{"Server.TLSConfig", Field, 0},
@@ -8453,12 +8626,14 @@ var PackageSymbols = map[string][]Symbol{
{"Transport.ExpectContinueTimeout", Field, 6},
{"Transport.ForceAttemptHTTP2", Field, 13},
{"Transport.GetProxyConnectHeader", Field, 16},
+ {"Transport.HTTP2", Field, 24},
{"Transport.IdleConnTimeout", Field, 7},
{"Transport.MaxConnsPerHost", Field, 11},
{"Transport.MaxIdleConns", Field, 7},
{"Transport.MaxIdleConnsPerHost", Field, 0},
{"Transport.MaxResponseHeaderBytes", Field, 7},
{"Transport.OnProxyConnectResponse", Field, 20},
+ {"Transport.Protocols", Field, 24},
{"Transport.Proxy", Field, 0},
{"Transport.ProxyConnectHeader", Field, 8},
{"Transport.ReadBufferSize", Field, 13},
@@ -8646,6 +8821,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*AddrPort).UnmarshalText", Method, 18},
{"(*Prefix).UnmarshalBinary", Method, 18},
{"(*Prefix).UnmarshalText", Method, 18},
+ {"(Addr).AppendBinary", Method, 24},
+ {"(Addr).AppendText", Method, 24},
{"(Addr).AppendTo", Method, 18},
{"(Addr).As16", Method, 18},
{"(Addr).As4", Method, 18},
@@ -8676,6 +8853,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Addr).WithZone", Method, 18},
{"(Addr).Zone", Method, 18},
{"(AddrPort).Addr", Method, 18},
+ {"(AddrPort).AppendBinary", Method, 24},
+ {"(AddrPort).AppendText", Method, 24},
{"(AddrPort).AppendTo", Method, 18},
{"(AddrPort).Compare", Method, 22},
{"(AddrPort).IsValid", Method, 18},
@@ -8684,6 +8863,8 @@ var PackageSymbols = map[string][]Symbol{
{"(AddrPort).Port", Method, 18},
{"(AddrPort).String", Method, 18},
{"(Prefix).Addr", Method, 18},
+ {"(Prefix).AppendBinary", Method, 24},
+ {"(Prefix).AppendText", Method, 24},
{"(Prefix).AppendTo", Method, 18},
{"(Prefix).Bits", Method, 18},
{"(Prefix).Contains", Method, 18},
@@ -8868,6 +9049,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Error).Temporary", Method, 6},
{"(*Error).Timeout", Method, 6},
{"(*Error).Unwrap", Method, 13},
+ {"(*URL).AppendBinary", Method, 24},
{"(*URL).EscapedFragment", Method, 15},
{"(*URL).EscapedPath", Method, 5},
{"(*URL).Hostname", Method, 8},
@@ -8967,6 +9149,17 @@ var PackageSymbols = map[string][]Symbol{
{"(*ProcessState).SysUsage", Method, 0},
{"(*ProcessState).SystemTime", Method, 0},
{"(*ProcessState).UserTime", Method, 0},
+ {"(*Root).Close", Method, 24},
+ {"(*Root).Create", Method, 24},
+ {"(*Root).FS", Method, 24},
+ {"(*Root).Lstat", Method, 24},
+ {"(*Root).Mkdir", Method, 24},
+ {"(*Root).Name", Method, 24},
+ {"(*Root).Open", Method, 24},
+ {"(*Root).OpenFile", Method, 24},
+ {"(*Root).OpenRoot", Method, 24},
+ {"(*Root).Remove", Method, 24},
+ {"(*Root).Stat", Method, 24},
{"(*SyscallError).Error", Method, 0},
{"(*SyscallError).Timeout", Method, 10},
{"(*SyscallError).Unwrap", Method, 13},
@@ -9060,6 +9253,8 @@ var PackageSymbols = map[string][]Symbol{
{"O_WRONLY", Const, 0},
{"Open", Func, 0},
{"OpenFile", Func, 0},
+ {"OpenInRoot", Func, 24},
+ {"OpenRoot", Func, 24},
{"PathError", Type, 0},
{"PathError.Err", Field, 0},
{"PathError.Op", Field, 0},
@@ -9081,6 +9276,7 @@ var PackageSymbols = map[string][]Symbol{
{"Remove", Func, 0},
{"RemoveAll", Func, 0},
{"Rename", Func, 0},
+ {"Root", Type, 24},
{"SEEK_CUR", Const, 0},
{"SEEK_END", Const, 0},
{"SEEK_SET", Const, 0},
@@ -9422,6 +9618,7 @@ var PackageSymbols = map[string][]Symbol{
{"Zero", Func, 0},
},
"regexp": {
+ {"(*Regexp).AppendText", Method, 24},
{"(*Regexp).Copy", Method, 6},
{"(*Regexp).Expand", Method, 0},
{"(*Regexp).ExpandString", Method, 0},
@@ -9602,6 +9799,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*StackRecord).Stack", Method, 0},
{"(*TypeAssertionError).Error", Method, 0},
{"(*TypeAssertionError).RuntimeError", Method, 0},
+ {"(Cleanup).Stop", Method, 24},
+ {"AddCleanup", Func, 24},
{"BlockProfile", Func, 1},
{"BlockProfileRecord", Type, 1},
{"BlockProfileRecord.Count", Field, 1},
@@ -9612,6 +9811,7 @@ var PackageSymbols = map[string][]Symbol{
{"Caller", Func, 0},
{"Callers", Func, 0},
{"CallersFrames", Func, 7},
+ {"Cleanup", Type, 24},
{"Compiler", Const, 0},
{"Error", Type, 0},
{"Frame", Type, 7},
@@ -9974,6 +10174,8 @@ var PackageSymbols = map[string][]Symbol{
{"EqualFold", Func, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
+ {"FieldsFuncSeq", Func, 24},
+ {"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@@ -9986,6 +10188,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
+ {"Lines", Func, 24},
{"Map", Func, 0},
{"NewReader", Func, 0},
{"NewReplacer", Func, 0},
@@ -9997,7 +10200,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
+ {"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
+ {"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@@ -16413,7 +16618,9 @@ var PackageSymbols = map[string][]Symbol{
{"ValueOf", Func, 0},
},
"testing": {
+ {"(*B).Chdir", Method, 24},
{"(*B).Cleanup", Method, 14},
+ {"(*B).Context", Method, 24},
{"(*B).Elapsed", Method, 20},
{"(*B).Error", Method, 0},
{"(*B).Errorf", Method, 0},
@@ -16425,6 +16632,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).Helper", Method, 9},
{"(*B).Log", Method, 0},
{"(*B).Logf", Method, 0},
+ {"(*B).Loop", Method, 24},
{"(*B).Name", Method, 8},
{"(*B).ReportAllocs", Method, 1},
{"(*B).ReportMetric", Method, 13},
@@ -16442,7 +16650,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).StopTimer", Method, 0},
{"(*B).TempDir", Method, 15},
{"(*F).Add", Method, 18},
+ {"(*F).Chdir", Method, 24},
{"(*F).Cleanup", Method, 18},
+ {"(*F).Context", Method, 24},
{"(*F).Error", Method, 18},
{"(*F).Errorf", Method, 18},
{"(*F).Fail", Method, 18},
@@ -16463,7 +16673,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*F).TempDir", Method, 18},
{"(*M).Run", Method, 4},
{"(*PB).Next", Method, 3},
+ {"(*T).Chdir", Method, 24},
{"(*T).Cleanup", Method, 14},
+ {"(*T).Context", Method, 24},
{"(*T).Deadline", Method, 15},
{"(*T).Error", Method, 0},
{"(*T).Errorf", Method, 0},
@@ -16954,7 +17166,9 @@ var PackageSymbols = map[string][]Symbol{
{"(Time).Add", Method, 0},
{"(Time).AddDate", Method, 0},
{"(Time).After", Method, 0},
+ {"(Time).AppendBinary", Method, 24},
{"(Time).AppendFormat", Method, 5},
+ {"(Time).AppendText", Method, 24},
{"(Time).Before", Method, 0},
{"(Time).Clock", Method, 0},
{"(Time).Compare", Method, 20},
@@ -17428,4 +17642,9 @@ var PackageSymbols = map[string][]Symbol{
{"String", Func, 0},
{"StringData", Func, 0},
},
+ "weak": {
+ {"(Pointer).Value", Method, 24},
+ {"Make", Func, 24},
+ {"Pointer", Type, 24},
+ },
}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
index 0b84acc5..cdae2b8e 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -66,75 +66,3 @@ func IsTypeParam(t types.Type) bool {
_, ok := types.Unalias(t).(*types.TypeParam)
return ok
}
-
-// GenericAssignableTo is a generalization of types.AssignableTo that
-// implements the following rule for uninstantiated generic types:
-//
-// If V and T are generic named types, then V is considered assignable to T if,
-// for every possible instantiation of V[A_1, ..., A_N], the instantiation
-// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
-//
-// If T has structural constraints, they must be satisfied by V.
-//
-// For example, consider the following type declarations:
-//
-// type Interface[T any] interface {
-// Accept(T)
-// }
-//
-// type Container[T any] struct {
-// Element T
-// }
-//
-// func (c Container[T]) Accept(t T) { c.Element = t }
-//
-// In this case, GenericAssignableTo reports that instantiations of Container
-// are assignable to the corresponding instantiation of Interface.
-func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
- V = types.Unalias(V)
- T = types.Unalias(T)
-
- // If V and T are not both named, or do not have matching non-empty type
- // parameter lists, fall back on types.AssignableTo.
-
- VN, Vnamed := V.(*types.Named)
- TN, Tnamed := T.(*types.Named)
- if !Vnamed || !Tnamed {
- return types.AssignableTo(V, T)
- }
-
- vtparams := VN.TypeParams()
- ttparams := TN.TypeParams()
- if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
- return types.AssignableTo(V, T)
- }
-
- // V and T have the same (non-zero) number of type params. Instantiate both
- // with the type parameters of V. This must always succeed for V, and will
- // succeed for T if and only if the type set of each type parameter of V is a
- // subset of the type set of the corresponding type parameter of T, meaning
- // that every instantiation of V corresponds to a valid instantiation of T.
-
- // Minor optimization: ensure we share a context across the two
- // instantiations below.
- if ctxt == nil {
- ctxt = types.NewContext()
- }
-
- var targs []types.Type
- for i := 0; i < vtparams.Len(); i++ {
- targs = append(targs, vtparams.At(i))
- }
-
- vinst, err := types.Instantiate(ctxt, V, targs, true)
- if err != nil {
- panic("type parameters should satisfy their own constraints")
- }
-
- tinst, err := types.Instantiate(ctxt, T, targs, true)
- if err != nil {
- return false
- }
-
- return types.AssignableTo(vinst, tinst)
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
index 6e83c6fb..27a2b179 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -109,8 +109,13 @@ func CoreType(T types.Type) types.Type {
//
// NormalTerms makes no guarantees about the order of terms, except that it
// is deterministic.
-func NormalTerms(typ types.Type) ([]*types.Term, error) {
- switch typ := typ.Underlying().(type) {
+func NormalTerms(T types.Type) ([]*types.Term, error) {
+ // typeSetOf(T) == typeSetOf(Unalias(T))
+ typ := types.Unalias(T)
+ if named, ok := typ.(*types.Named); ok {
+ typ = named.Underlying()
+ }
+ switch typ := typ.(type) {
case *types.TypeParam:
return StructuralTerms(typ)
case *types.Union:
@@ -118,7 +123,7 @@ func NormalTerms(typ types.Type) ([]*types.Term, error) {
case *types.Interface:
return InterfaceTermSet(typ)
default:
- return []*types.Term{types.NewTerm(false, typ)}, nil
+ return []*types.Term{types.NewTerm(false, T)}, nil
}
}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 131caab2..235a6def 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -966,7 +966,7 @@ const (
// var _ = string(x)
InvalidConversion
- // InvalidUntypedConversion occurs when an there is no valid implicit
+ // InvalidUntypedConversion occurs when there is no valid implicit
// conversion from an untyped value satisfying the type constraints of the
// context in which it is used.
//
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
new file mode 100644
index 00000000..b64f714e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -0,0 +1,46 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/types"
+ "strconv"
+)
+
+// FileQualifier returns a [types.Qualifier] function that qualifies
+// imported symbols appropriately based on the import environment of a given
+// file.
+// If the same package is imported multiple times, the last appearance is
+// recorded.
+func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
+ // Construct mapping of import paths to their defined names.
+ // It is only necessary to look at renaming imports.
+ imports := make(map[string]string)
+ for _, imp := range f.Imports {
+ if imp.Name != nil && imp.Name.Name != "_" {
+ path, _ := strconv.Unquote(imp.Path.Value)
+ imports[path] = imp.Name.Name
+ }
+ }
+
+ // Define qualifier to replace full package paths with names of the imports.
+ return func(p *types.Package) string {
+ if p == nil || p == pkg {
+ return ""
+ }
+
+ if name, ok := imports[p.Path()]; ok {
+ if name == "." {
+ return ""
+ } else {
+ return name
+ }
+ }
+
+ // If there is no local renaming, fall back to the package name.
+ return p.Name()
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
index ba6f4f4e..8352ea76 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -11,6 +11,9 @@ import (
// ReceiverNamed returns the named type (if any) associated with the
// type of recv, which may be of the form N or *N, or aliases thereof.
// It also reports whether a Pointer was present.
+//
+// The named result may be nil if recv is from a method on an
+// anonymous interface or struct types or in ill-typed code.
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
t := recv.Type()
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index df3ea521..34534879 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -82,6 +82,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
type NamedOrAlias interface {
types.Type
Obj() *types.TypeName
+ // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
}
// TypeParams is a light shim around t.TypeParams().
@@ -119,3 +120,8 @@ func Origin(t NamedOrAlias) NamedOrAlias {
}
return t
}
+
+// IsPackageLevel reports whether obj is a package-level symbol.
+func IsPackageLevel(obj types.Object) bool {
+ return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
new file mode 100644
index 00000000..e5da0495
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
@@ -0,0 +1,40 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
+// this API that actually does something.
+
+import "go/types"
+
+type VarKind uint8
+
+const (
+ _ VarKind = iota // (not meaningful)
+ PackageVar // a package-level variable
+ LocalVar // a local variable
+ RecvVar // a method receiver variable
+ ParamVar // a function parameter variable
+ ResultVar // a function result variable
+ FieldVar // a struct field
+)
+
+func (kind VarKind) String() string {
+ return [...]string{
+ 0: "VarKind(0)",
+ PackageVar: "PackageVar",
+ LocalVar: "LocalVar",
+ RecvVar: "RecvVar",
+ ParamVar: "ParamVar",
+ ResultVar: "ResultVar",
+ FieldVar: "FieldVar",
+ }[kind]
+}
+
+// GetVarKind returns an invalid VarKind.
+func GetVarKind(v *types.Var) VarKind { return 0 }
+
+// SetVarKind has no effect.
+func SetVarKind(v *types.Var, kind VarKind) {}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
index 10669806..d272949c 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -9,62 +9,97 @@ import (
"go/ast"
"go/token"
"go/types"
- "strconv"
"strings"
)
-// ZeroString returns the string representation of the "zero" value of the type t.
-// This string can be used on the right-hand side of an assignment where the
-// left-hand side has that explicit type.
-// Exception: This does not apply to tuples. Their string representation is
-// informational only and cannot be used in an assignment.
+// ZeroString returns the string representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroString may return a partially correct
+// string representation. The caller should use the returned isValid boolean
+// to determine the validity of the expression.
+//
// When assigning to a wider type (such as 'any'), it's the caller's
// responsibility to handle any necessary type conversions.
+//
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// References to named types are qualified by an appropriate (optional)
+// qualifier function.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+//
// See [ZeroExpr] for a variant that returns an [ast.Expr].
-func ZeroString(t types.Type, qf types.Qualifier) string {
+func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
switch t := t.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
- return "false"
+ return "false", true
case t.Info()&types.IsNumeric != 0:
- return "0"
+ return "0", true
case t.Info()&types.IsString != 0:
- return `""`
+ return `""`, true
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
- return "nil"
+ return "nil", true
+ case t.Kind() == types.Invalid:
+ return "invalid", false
default:
- panic(fmt.Sprint("ZeroString for unexpected type:", t))
+ panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
}
- case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
- return "nil"
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return "nil", true
- case *types.Named, *types.Alias:
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return "invalid", false
+ }
+ return "nil", true
+
+ case *types.Named:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
- return types.TypeString(t, qf) + "{}"
+ return types.TypeString(t, qual) + "{}", true
default:
- return ZeroString(under, qf)
+ return ZeroString(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qual) + "{}", true
+ default:
+ // A type parameter can have alias but alias type's underlying type
+ // can never be a type parameter.
+ // Use types.Unalias to preserve the info of type parameter instead
+ // of call Underlying() going right through and get the underlying
+ // type of the type parameter which is always an interface.
+ return ZeroString(types.Unalias(t), qual)
}
case *types.Array, *types.Struct:
- return types.TypeString(t, qf) + "{}"
+ return types.TypeString(t, qual) + "{}", true
case *types.TypeParam:
// Assumes func new is not shadowed.
- return "*new(" + types.TypeString(t, qf) + ")"
+ return "*new(" + types.TypeString(t, qual) + ")", true
case *types.Tuple:
// Tuples are not normal values.
// We are currently format as "(t[0], ..., t[n])". Could be something else.
+ isValid := true
components := make([]string, t.Len())
for i := 0; i < t.Len(); i++ {
- components[i] = ZeroString(t.At(i).Type(), qf)
+ comp, ok := ZeroString(t.At(i).Type(), qual)
+
+ components[i] = comp
+ isValid = isValid && ok
}
- return "(" + strings.Join(components, ", ") + ")"
+ return "(" + strings.Join(components, ", ") + ")", isValid
case *types.Union:
// Variables of these types cannot be created, so it makes
@@ -76,45 +111,72 @@ func ZeroString(t types.Type, qf types.Qualifier) string {
}
}
-// ZeroExpr returns the ast.Expr representation of the "zero" value of the type t.
-// ZeroExpr is defined for types that are suitable for variables.
-// It may panic for other types such as Tuple or Union.
+// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
+// representation. The caller should use the returned isValid boolean to determine
+// the validity of the expression.
+//
+// This function is designed for types suitable for variables and should not be
+// used with Tuple or Union types.References to named types are qualified by an
+// appropriate (optional) qualifier function.
+//
// See [ZeroString] for a variant that returns a string.
-func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
- switch t := typ.(type) {
+func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
+ switch t := t.(type) {
case *types.Basic:
switch {
case t.Info()&types.IsBoolean != 0:
- return &ast.Ident{Name: "false"}
+ return &ast.Ident{Name: "false"}, true
case t.Info()&types.IsNumeric != 0:
- return &ast.BasicLit{Kind: token.INT, Value: "0"}
+ return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
case t.Info()&types.IsString != 0:
- return &ast.BasicLit{Kind: token.STRING, Value: `""`}
+ return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
case t.Kind() == types.UnsafePointer:
fallthrough
case t.Kind() == types.UntypedNil:
- return ast.NewIdent("nil")
+ return ast.NewIdent("nil"), true
+ case t.Kind() == types.Invalid:
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
default:
- panic(fmt.Sprint("ZeroExpr for unexpected type:", t))
+ panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
}
- case *types.Pointer, *types.Slice, *types.Interface, *types.Chan, *types.Map, *types.Signature:
- return ast.NewIdent("nil")
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return ast.NewIdent("nil"), true
- case *types.Named, *types.Alias:
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+ }
+ return ast.NewIdent("nil"), true
+
+ case *types.Named:
switch under := t.Underlying().(type) {
case *types.Struct, *types.Array:
return &ast.CompositeLit{
- Type: TypeExpr(f, pkg, typ),
- }
+ Type: TypeExpr(t, qual),
+ }, true
default:
- return ZeroExpr(f, pkg, under)
+ return ZeroExpr(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+ default:
+ return ZeroExpr(types.Unalias(t), qual)
}
case *types.Array, *types.Struct:
return &ast.CompositeLit{
- Type: TypeExpr(f, pkg, typ),
- }
+ Type: TypeExpr(t, qual),
+ }, true
case *types.TypeParam:
return &ast.StarExpr{ // *new(T)
@@ -125,7 +187,7 @@ func ZeroExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
ast.NewIdent(t.Obj().Name()),
},
},
- }
+ }, true
case *types.Tuple:
// Unlike ZeroString, there is no ast.Expr can express tuple by
@@ -157,16 +219,14 @@ func IsZeroExpr(expr ast.Expr) bool {
}
// TypeExpr returns syntax for the specified type. References to named types
-// from packages other than pkg are qualified by an appropriate package name, as
-// defined by the import environment of file.
+// are qualified by an appropriate (optional) qualifier function.
// It may panic for types such as Tuple or Union.
-func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
- switch t := typ.(type) {
+func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
+ switch t := t.(type) {
case *types.Basic:
switch t.Kind() {
case types.UnsafePointer:
- // TODO(hxjiang): replace the implementation with types.Qualifier.
- return &ast.SelectorExpr{X: ast.NewIdent("unsafe"), Sel: ast.NewIdent("Pointer")}
+ return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
default:
return ast.NewIdent(t.Name())
}
@@ -174,7 +234,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
case *types.Pointer:
return &ast.UnaryExpr{
Op: token.MUL,
- X: TypeExpr(f, pkg, t.Elem()),
+ X: TypeExpr(t.Elem(), qual),
}
case *types.Array:
@@ -183,18 +243,18 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
Kind: token.INT,
Value: fmt.Sprintf("%d", t.Len()),
},
- Elt: TypeExpr(f, pkg, t.Elem()),
+ Elt: TypeExpr(t.Elem(), qual),
}
case *types.Slice:
return &ast.ArrayType{
- Elt: TypeExpr(f, pkg, t.Elem()),
+ Elt: TypeExpr(t.Elem(), qual),
}
case *types.Map:
return &ast.MapType{
- Key: TypeExpr(f, pkg, t.Key()),
- Value: TypeExpr(f, pkg, t.Elem()),
+ Key: TypeExpr(t.Key(), qual),
+ Value: TypeExpr(t.Elem(), qual),
}
case *types.Chan:
@@ -204,14 +264,14 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
}
return &ast.ChanType{
Dir: dir,
- Value: TypeExpr(f, pkg, t.Elem()),
+ Value: TypeExpr(t.Elem(), qual),
}
case *types.Signature:
var params []*ast.Field
for i := 0; i < t.Params().Len(); i++ {
params = append(params, &ast.Field{
- Type: TypeExpr(f, pkg, t.Params().At(i).Type()),
+ Type: TypeExpr(t.Params().At(i).Type(), qual),
Names: []*ast.Ident{
{
Name: t.Params().At(i).Name(),
@@ -226,7 +286,7 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
var returns []*ast.Field
for i := 0; i < t.Results().Len(); i++ {
returns = append(returns, &ast.Field{
- Type: TypeExpr(f, pkg, t.Results().At(i).Type()),
+ Type: TypeExpr(t.Results().At(i).Type(), qual),
})
}
return &ast.FuncType{
@@ -238,23 +298,9 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
},
}
- case interface{ Obj() *types.TypeName }: // *types.{Alias,Named,TypeParam}
- switch t.Obj().Pkg() {
- case pkg, nil:
- return ast.NewIdent(t.Obj().Name())
- }
- pkgName := t.Obj().Pkg().Name()
-
- // TODO(hxjiang): replace the implementation with types.Qualifier.
- // If the file already imports the package under another name, use that.
- for _, cand := range f.Imports {
- if path, _ := strconv.Unquote(cand.Path.Value); path == t.Obj().Pkg().Path() {
- if cand.Name != nil && cand.Name.Name != "" {
- pkgName = cand.Name.Name
- }
- }
- }
- if pkgName == "." {
+ case *types.TypeParam:
+ pkgName := qual(t.Obj().Pkg())
+ if pkgName == "" || t.Obj().Pkg() == nil {
return ast.NewIdent(t.Obj().Name())
}
return &ast.SelectorExpr{
@@ -262,6 +308,36 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
Sel: ast.NewIdent(t.Obj().Name()),
}
+ // types.TypeParam also implements interface NamedOrAlias. To differentiate,
+ // case TypeParam need to be present before case NamedOrAlias.
+ // TODO(hxjiang): remove this comment once TypeArgs() is added to interface
+ // NamedOrAlias.
+ case NamedOrAlias:
+ var expr ast.Expr = ast.NewIdent(t.Obj().Name())
+ if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
+ expr = &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: expr.(*ast.Ident),
+ }
+ }
+
+ // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
+ // typesinternal.NamedOrAlias.
+ if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
+ if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
+ var indices []ast.Expr
+ for i := range typeArgs.Len() {
+ indices = append(indices, TypeExpr(typeArgs.At(i), qual))
+ }
+ expr = &ast.IndexListExpr{
+ X: expr,
+ Indices: indices,
+ }
+ }
+ }
+
+ return expr
+
case *types.Struct:
return ast.NewIdent(t.String())
@@ -269,9 +345,43 @@ func TypeExpr(f *ast.File, pkg *types.Package, typ types.Type) ast.Expr {
return ast.NewIdent(t.String())
case *types.Union:
- // TODO(hxjiang): handle the union through syntax (~A | ... | ~Z).
- // Remove nil check when calling typesinternal.TypeExpr.
- return nil
+ if t.Len() == 0 {
+ panic("Union type should have at least one term")
+ }
+ // Same as go/ast, the return expression will put last term in the
+ // Y field at topmost level of BinaryExpr.
+ // For union of type "float32 | float64 | int64", the structure looks
+ // similar to:
+ // {
+ // X: {
+ // X: float32,
+ // Op: |
+ // Y: float64,
+ // }
+ // Op: |,
+ // Y: int64,
+ // }
+ var union ast.Expr
+ for i := range t.Len() {
+ term := t.Term(i)
+ termExpr := TypeExpr(term.Type(), qual)
+ if term.Tilde() {
+ termExpr = &ast.UnaryExpr{
+ Op: token.TILDE,
+ X: termExpr,
+ }
+ }
+ if i == 0 {
+ union = termExpr
+ } else {
+ union = &ast.BinaryExpr{
+ X: union,
+ Op: token.OR,
+ Y: termExpr,
+ }
+ }
+ }
+ return union
case *types.Tuple:
panic("invalid input type types.Tuple")
diff --git a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
index 0d7823b3..d88162ff 100644
--- a/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
+++ b/vendor/gomodules.xyz/jsonpatch/v2/jsonpatch.go
@@ -70,12 +70,14 @@ func CreatePatch(a, b []byte) ([]Operation, error) {
}
var aI interface{}
var bI interface{}
- err := json.Unmarshal(a, &aI)
- if err != nil {
+ aDec := json.NewDecoder(bytes.NewReader(a))
+ aDec.UseNumber()
+ if err := aDec.Decode(&aI); err != nil {
return nil, errBadJSONDoc
}
- err = json.Unmarshal(b, &bI)
- if err != nil {
+ bDec := json.NewDecoder(bytes.NewReader(b))
+ bDec.UseNumber()
+ if err := bDec.Decode(&bI); err != nil {
return nil, errBadJSONDoc
}
return handleValues(aI, bI, "", []Operation{})
@@ -94,6 +96,11 @@ func matchesValue(av, bv interface{}) bool {
if ok && bt == at {
return true
}
+ case json.Number:
+ bt, ok := bv.(json.Number)
+ if ok && bt == at {
+ return true
+ }
case float64:
bt, ok := bv.(float64)
if ok && bt == at {
@@ -212,7 +219,7 @@ func handleValues(av, bv interface{}, p string, patch []Operation) ([]Operation,
if err != nil {
return nil, err
}
- case string, float64, bool:
+ case string, float64, bool, json.Number:
if !matchesValue(av, bv) {
patch = append(patch, NewOperation("replace", p, bv))
}
diff --git a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
index 24bc98ac..b5380505 100644
--- a/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
+++ b/vendor/google.golang.org/protobuf/encoding/prototext/decode.go
@@ -185,11 +185,6 @@ func (d decoder) unmarshalMessage(m protoreflect.Message, checkDelims bool) erro
} else if xtErr != nil && xtErr != protoregistry.NotFound {
return d.newError(tok.Pos(), "unable to resolve [%s]: %v", tok.RawString(), xtErr)
}
- if flags.ProtoLegacy {
- if fd != nil && fd.IsWeak() && fd.Message().IsPlaceholder() {
- fd = nil // reset since the weak reference is not linked in
- }
- }
// Handle unknown fields.
if fd == nil {
diff --git a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
index 7e87c760..669133d0 100644
--- a/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
+++ b/vendor/google.golang.org/protobuf/internal/encoding/tag/tag.go
@@ -26,7 +26,7 @@ var byteType = reflect.TypeOf(byte(0))
// The type is the underlying field type (e.g., a repeated field may be
// represented by []T, but the Go type passed in is just T).
// A list of enum value descriptors must be provided for enum fields.
-// This does not populate the Enum or Message (except for weak message).
+// This does not populate the Enum or Message.
//
// This function is a best effort attempt; parsing errors are ignored.
func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescriptors) protoreflect.FieldDescriptor {
@@ -109,9 +109,6 @@ func Unmarshal(tag string, goType reflect.Type, evs protoreflect.EnumValueDescri
}
case s == "packed":
f.L1.EditionFeatures.IsPacked = true
- case strings.HasPrefix(s, "weak="):
- f.L1.IsWeak = true
- f.L1.Message = filedesc.PlaceholderMessage(protoreflect.FullName(s[len("weak="):]))
case strings.HasPrefix(s, "def="):
// The default tag is special in that everything afterwards is the
// default regardless of the presence of commas.
@@ -183,9 +180,6 @@ func Marshal(fd protoreflect.FieldDescriptor, enumName string) string {
// the exact same semantics from the previous generator.
tag = append(tag, "json="+jsonName)
}
- if fd.IsWeak() {
- tag = append(tag, "weak="+string(fd.Message().FullName()))
- }
// The previous implementation does not tag extension fields as proto3,
// even when the field is defined in a proto3 file. Match that behavior
// for consistency.
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
index 378b826f..688aabe4 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc.go
@@ -19,7 +19,6 @@ import (
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/internal/strs"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// Edition is an Enum for proto2.Edition
@@ -275,7 +274,6 @@ type (
Kind protoreflect.Kind
StringName stringName
IsProto3Optional bool // promoted from google.protobuf.FieldDescriptorProto
- IsWeak bool // promoted from google.protobuf.FieldOptions
IsLazy bool // promoted from google.protobuf.FieldOptions
Default defaultValue
ContainingOneof protoreflect.OneofDescriptor // must be consistent with Message.Oneofs.Fields
@@ -369,7 +367,7 @@ func (fd *Field) IsPacked() bool {
return fd.L1.EditionFeatures.IsPacked
}
func (fd *Field) IsExtension() bool { return false }
-func (fd *Field) IsWeak() bool { return fd.L1.IsWeak }
+func (fd *Field) IsWeak() bool { return false }
func (fd *Field) IsLazy() bool { return fd.L1.IsLazy }
func (fd *Field) IsList() bool { return fd.Cardinality() == protoreflect.Repeated && !fd.IsMap() }
func (fd *Field) IsMap() bool { return fd.Message() != nil && fd.Message().IsMapEntry() }
@@ -396,11 +394,6 @@ func (fd *Field) Enum() protoreflect.EnumDescriptor {
return fd.L1.Enum
}
func (fd *Field) Message() protoreflect.MessageDescriptor {
- if fd.L1.IsWeak {
- if d, _ := protoregistry.GlobalFiles.FindDescriptorByName(fd.L1.Message.FullName()); d != nil {
- return d.(protoreflect.MessageDescriptor)
- }
- }
return fd.L1.Message
}
func (fd *Field) IsMapEntry() bool {
diff --git a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
index 67a51b32..d4c94458 100644
--- a/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/filedesc/desc_lazy.go
@@ -32,11 +32,6 @@ func (file *File) resolveMessages() {
for j := range md.L2.Fields.List {
fd := &md.L2.Fields.List[j]
- // Weak fields are resolved upon actual use.
- if fd.L1.IsWeak {
- continue
- }
-
// Resolve message field dependency.
switch fd.L1.Kind {
case protoreflect.EnumKind:
@@ -150,8 +145,6 @@ func (fd *File) unmarshalFull(b []byte) {
switch num {
case genid.FileDescriptorProto_PublicDependency_field_number:
fd.L2.Imports[v].IsPublic = true
- case genid.FileDescriptorProto_WeakDependency_field_number:
- fd.L2.Imports[v].IsWeak = true
}
case protowire.BytesType:
v, m := protowire.ConsumeBytes(b)
@@ -502,8 +495,6 @@ func (fd *Field) unmarshalOptions(b []byte) {
switch num {
case genid.FieldOptions_Packed_field_number:
fd.L1.EditionFeatures.IsPacked = protowire.DecodeBool(v)
- case genid.FieldOptions_Weak_field_number:
- fd.L1.IsWeak = protowire.DecodeBool(v)
case genid.FieldOptions_Lazy_field_number:
fd.L1.IsLazy = protowire.DecodeBool(v)
case FieldOptions_EnforceUTF8:
diff --git a/vendor/google.golang.org/protobuf/internal/filetype/build.go b/vendor/google.golang.org/protobuf/internal/filetype/build.go
index ba83fea4..e1b4130b 100644
--- a/vendor/google.golang.org/protobuf/internal/filetype/build.go
+++ b/vendor/google.golang.org/protobuf/internal/filetype/build.go
@@ -63,7 +63,7 @@ type Builder struct {
// message declarations in "flattened ordering".
//
// Dependencies are Go types for enums or messages referenced by
- // message fields (excluding weak fields), for parent extended messages of
+ // message fields, for parent extended messages of
// extension fields, for enums or messages referenced by extension fields,
// and for input and output messages referenced by service methods.
// Dependencies must come after declarations, but the ordering of
diff --git a/vendor/google.golang.org/protobuf/internal/flags/flags.go b/vendor/google.golang.org/protobuf/internal/flags/flags.go
index 58372dd3..a06ccabc 100644
--- a/vendor/google.golang.org/protobuf/internal/flags/flags.go
+++ b/vendor/google.golang.org/protobuf/internal/flags/flags.go
@@ -6,7 +6,7 @@
package flags
// ProtoLegacy specifies whether to enable support for legacy functionality
-// such as MessageSets, weak fields, and various other obscure behavior
+// such as MessageSets, and various other obscure behavior
// that is necessary to maintain backwards compatibility with proto1 or
// the pre-release variants of proto2 and proto3.
//
diff --git a/vendor/google.golang.org/protobuf/internal/genid/goname.go b/vendor/google.golang.org/protobuf/internal/genid/goname.go
index 693d2e9e..99bb95ba 100644
--- a/vendor/google.golang.org/protobuf/internal/genid/goname.go
+++ b/vendor/google.golang.org/protobuf/internal/genid/goname.go
@@ -11,15 +11,10 @@ const (
SizeCache_goname = "sizeCache"
SizeCacheA_goname = "XXX_sizecache"
- WeakFields_goname = "weakFields"
- WeakFieldsA_goname = "XXX_weak"
-
UnknownFields_goname = "unknownFields"
UnknownFieldsA_goname = "XXX_unrecognized"
ExtensionFields_goname = "extensionFields"
ExtensionFieldsA_goname = "XXX_InternalExtensions"
ExtensionFieldsB_goname = "XXX_extensions"
-
- WeakFieldPrefix_goname = "XXX_weak_"
)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
index 7c1f66c8..d14d7d93 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_field.go
@@ -5,15 +5,12 @@
package impl
import (
- "fmt"
"reflect"
- "sync"
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/errors"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/runtime/protoiface"
)
@@ -121,78 +118,6 @@ func (mi *MessageInfo) initOneofFieldCoders(od protoreflect.OneofDescriptor, si
}
}
-func makeWeakMessageFieldCoder(fd protoreflect.FieldDescriptor) pointerCoderFuncs {
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- })
- }
-
- return pointerCoderFuncs{
- size: func(p pointer, f *coderFieldInfo, opts marshalOptions) int {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return 0
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return sizeMessage(m, f.tagsize, opts)
- },
- marshal: func(b []byte, p pointer, f *coderFieldInfo, opts marshalOptions) ([]byte, error) {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return b, nil
- }
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- return appendMessage(b, m, f.wiretag, opts)
- },
- unmarshal: func(b []byte, p pointer, wtyp protowire.Type, f *coderFieldInfo, opts unmarshalOptions) (unmarshalOutput, error) {
- fs := p.WeakFields()
- m, ok := fs.get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- return unmarshalOutput{}, errUnknown
- }
- m = messageType.New().Interface()
- fs.set(f.num, m)
- }
- return consumeMessage(b, m, wtyp, opts)
- },
- isInit: func(p pointer, f *coderFieldInfo) error {
- m, ok := p.WeakFields().get(f.num)
- if !ok {
- return nil
- }
- return proto.CheckInitialized(m)
- },
- merge: func(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
- sm, ok := src.WeakFields().get(f.num)
- if !ok {
- return
- }
- dm, ok := dst.WeakFields().get(f.num)
- if !ok {
- lazyInit()
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v is not linked in", fd.Message().FullName()))
- }
- dm = messageType.New().Interface()
- dst.WeakFields().set(f.num, dm)
- }
- opts.Merge(dm, sm)
- },
- }
-}
-
func makeMessageFieldCoder(fd protoreflect.FieldDescriptor, ft reflect.Type) pointerCoderFuncs {
if mi := getMessageInfo(ft); mi != nil {
funcs := pointerCoderFuncs{
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
index fb35f0ba..229c6980 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_map.go
@@ -94,7 +94,7 @@ func sizeMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, opts marshalO
return 0
}
n := 0
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
key := mapi.conv.keyConv.PBValueOf(iter.Key()).MapKey()
keySize := mapi.keyFuncs.size(key.Value(), mapKeyTagSize, opts)
@@ -281,7 +281,7 @@ func appendMap(b []byte, mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo, o
if opts.Deterministic() {
return appendMapDeterministic(b, mapv, mapi, f, opts)
}
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
var err error
b = protowire.AppendVarint(b, f.wiretag)
@@ -328,7 +328,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
if !mi.needsInitCheck {
return nil
}
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
val := pointerOfValue(iter.Value())
if err := mi.checkInitializedPointer(val); err != nil {
@@ -336,7 +336,7 @@ func isInitMap(mapv reflect.Value, mapi *mapInfo, f *coderFieldInfo) error {
}
}
} else {
- iter := mapRange(mapv)
+ iter := mapv.MapRange()
for iter.Next() {
val := mapi.conv.valConv.PBValueOf(iter.Value())
if err := mapi.valFuncs.isInit(val); err != nil {
@@ -356,7 +356,7 @@ func mergeMap(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), iter.Value())
}
@@ -371,7 +371,7 @@ func mergeMapOfBytes(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
dstm.SetMapIndex(iter.Key(), reflect.ValueOf(append(emptyBuf[:], iter.Value().Bytes()...)))
}
@@ -386,7 +386,7 @@ func mergeMapOfMessage(dst, src pointer, f *coderFieldInfo, opts mergeOptions) {
if dstm.IsNil() {
dstm.Set(reflect.MakeMap(f.ft))
}
- iter := mapRange(srcm)
+ iter := srcm.MapRange()
for iter.Next() {
val := reflect.New(f.ft.Elem().Elem())
if f.mi != nil {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
deleted file mode 100644
index 4b15493f..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go111.go
+++ /dev/null
@@ -1,38 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.12
-// +build !go1.12
-
-package impl
-
-import "reflect"
-
-type mapIter struct {
- v reflect.Value
- keys []reflect.Value
-}
-
-// mapRange provides a less-efficient equivalent to
-// the Go 1.12 reflect.Value.MapRange method.
-func mapRange(v reflect.Value) *mapIter {
- return &mapIter{v: v}
-}
-
-func (i *mapIter) Next() bool {
- if i.keys == nil {
- i.keys = i.v.MapKeys()
- } else {
- i.keys = i.keys[1:]
- }
- return len(i.keys) > 0
-}
-
-func (i *mapIter) Key() reflect.Value {
- return i.keys[0]
-}
-
-func (i *mapIter) Value() reflect.Value {
- return i.v.MapIndex(i.keys[0])
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go b/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
deleted file mode 100644
index 0b31b66e..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_map_go112.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.12
-// +build go1.12
-
-package impl
-
-import "reflect"
-
-func mapRange(v reflect.Value) *reflect.MapIter { return v.MapRange() }
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
index 2f7b363e..f78b57b0 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message.go
@@ -118,12 +118,9 @@ func (mi *MessageInfo) makeCoderMethods(t reflect.Type, si structInfo) {
},
}
case isOneof:
- fieldOffset = offsetOf(fs, mi.Exporter)
- case fd.IsWeak():
- fieldOffset = si.weakOffset
- funcs = makeWeakMessageFieldCoder(fd)
+ fieldOffset = offsetOf(fs)
default:
- fieldOffset = offsetOf(fs, mi.Exporter)
+ fieldOffset = offsetOf(fs)
childMessage, funcs = fieldCoder(fd, ft)
}
cf := &preallocFields[i]
diff --git a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
index 88c16ae5..41c1f74e 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/codec_message_opaque.go
@@ -45,19 +45,16 @@ func (mi *MessageInfo) makeOpaqueCoderMethods(t reflect.Type, si opaqueStructInf
var childMessage *MessageInfo
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
- fieldOffset = offsetOf(fs, mi.Exporter)
- case fd.IsWeak():
- fieldOffset = si.weakOffset
- funcs = makeWeakMessageFieldCoder(fd)
+ fieldOffset = offsetOf(fs)
case fd.Message() != nil && !fd.IsMap():
- fieldOffset = offsetOf(fs, mi.Exporter)
+ fieldOffset = offsetOf(fs)
if fd.IsList() {
childMessage, funcs = makeOpaqueRepeatedMessageFieldCoder(fd, ft)
} else {
childMessage, funcs = makeOpaqueMessageFieldCoder(fd, ft)
}
default:
- fieldOffset = offsetOf(fs, mi.Exporter)
+ fieldOffset = offsetOf(fs)
childMessage, funcs = fieldCoder(fd, ft)
}
cf := &coderFieldInfo{
diff --git a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
index 304244a6..e4580b3a 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/convert_map.go
@@ -101,7 +101,7 @@ func (ms *mapReflect) Mutable(k protoreflect.MapKey) protoreflect.Value {
return v
}
func (ms *mapReflect) Range(f func(protoreflect.MapKey, protoreflect.Value) bool) {
- iter := mapRange(ms.v)
+ iter := ms.v.MapRange()
for iter.Next() {
k := ms.keyConv.PBValueOf(iter.Key()).MapKey()
v := ms.valConv.PBValueOf(iter.Value())
diff --git a/vendor/google.golang.org/protobuf/internal/impl/lazy.go b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
index e8fb6c35..c7de31e2 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/lazy.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/lazy.go
@@ -131,7 +131,7 @@ func (mi *MessageInfo) skipField(b []byte, f *coderFieldInfo, wtyp protowire.Typ
fmi := f.validation.mi
if fmi == nil {
fd := mi.Desc.Fields().ByNumber(f.num)
- if fd == nil || !fd.IsWeak() {
+ if fd == nil {
return out, ValidationUnknown
}
messageName := fd.Message().FullName()
diff --git a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
index bf0b6049..a51dffbe 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/legacy_message.go
@@ -310,12 +310,9 @@ func aberrantAppendField(md *filedesc.Message, goType reflect.Type, tag, tagKey,
fd.L0.Parent = md
fd.L0.Index = n
- if fd.L1.IsWeak || fd.L1.EditionFeatures.IsPacked {
+ if fd.L1.EditionFeatures.IsPacked {
fd.L1.Options = func() protoreflect.ProtoMessage {
opts := descopts.Field.ProtoReflect().New()
- if fd.L1.IsWeak {
- opts.Set(opts.Descriptor().Fields().ByName("weak"), protoreflect.ValueOfBool(true))
- }
if fd.L1.EditionFeatures.IsPacked {
opts.Set(opts.Descriptor().Fields().ByName("packed"), protoreflect.ValueOfBool(fd.L1.EditionFeatures.IsPacked))
}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message.go b/vendor/google.golang.org/protobuf/internal/impl/message.go
index fa10a0f5..d50423dc 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message.go
@@ -14,7 +14,6 @@ import (
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
// MessageInfo provides protobuf related functionality for a given Go type
@@ -120,7 +119,6 @@ type (
var (
sizecacheType = reflect.TypeOf(SizeCache(0))
- weakFieldsType = reflect.TypeOf(WeakFields(nil))
unknownFieldsAType = reflect.TypeOf(unknownFieldsA(nil))
unknownFieldsBType = reflect.TypeOf(unknownFieldsB(nil))
extensionFieldsType = reflect.TypeOf(ExtensionFields(nil))
@@ -129,8 +127,6 @@ var (
type structInfo struct {
sizecacheOffset offset
sizecacheType reflect.Type
- weakOffset offset
- weakType reflect.Type
unknownOffset offset
unknownType reflect.Type
extensionOffset offset
@@ -148,7 +144,6 @@ type structInfo struct {
func (mi *MessageInfo) makeStructInfo(t reflect.Type) structInfo {
si := structInfo{
sizecacheOffset: invalidOffset,
- weakOffset: invalidOffset,
unknownOffset: invalidOffset,
extensionOffset: invalidOffset,
lazyOffset: invalidOffset,
@@ -165,28 +160,23 @@ fieldLoop:
switch f := t.Field(i); f.Name {
case genid.SizeCache_goname, genid.SizeCacheA_goname:
if f.Type == sizecacheType {
- si.sizecacheOffset = offsetOf(f, mi.Exporter)
+ si.sizecacheOffset = offsetOf(f)
si.sizecacheType = f.Type
}
- case genid.WeakFields_goname, genid.WeakFieldsA_goname:
- if f.Type == weakFieldsType {
- si.weakOffset = offsetOf(f, mi.Exporter)
- si.weakType = f.Type
- }
case genid.UnknownFields_goname, genid.UnknownFieldsA_goname:
if f.Type == unknownFieldsAType || f.Type == unknownFieldsBType {
- si.unknownOffset = offsetOf(f, mi.Exporter)
+ si.unknownOffset = offsetOf(f)
si.unknownType = f.Type
}
case genid.ExtensionFields_goname, genid.ExtensionFieldsA_goname, genid.ExtensionFieldsB_goname:
if f.Type == extensionFieldsType {
- si.extensionOffset = offsetOf(f, mi.Exporter)
+ si.extensionOffset = offsetOf(f)
si.extensionType = f.Type
}
case "lazyFields", "XXX_lazyUnmarshalInfo":
- si.lazyOffset = offsetOf(f, mi.Exporter)
+ si.lazyOffset = offsetOf(f)
case "XXX_presence":
- si.presenceOffset = offsetOf(f, mi.Exporter)
+ si.presenceOffset = offsetOf(f)
default:
for _, s := range strings.Split(f.Tag.Get("protobuf"), ",") {
if len(s) > 0 && strings.Trim(s, "0123456789") == "" {
@@ -256,9 +246,6 @@ func (mi *MessageInfo) Message(i int) protoreflect.MessageType {
mi.init()
fd := mi.Desc.Fields().Get(i)
switch {
- case fd.IsWeak():
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(fd.Message().FullName())
- return mt
case fd.IsMap():
return mapEntryType{fd.Message(), mi.fieldTypes[fd.Number()]}
default:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
index d407dd79..dd55e8e0 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_opaque.go
@@ -56,9 +56,6 @@ func opaqueInitHook(mi *MessageInfo) bool {
usePresence, _ := usePresenceForField(si, fd)
switch {
- case fd.IsWeak():
- // Weak fields are no different for opaque.
- fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
// Oneofs are no different for opaque.
fi = fieldInfoForOneof(fd, si.oneofsByName[fd.ContainingOneof().Name()], mi.Exporter, si.oneofWrappersByNumber[fd.Number()])
@@ -88,9 +85,7 @@ func opaqueInitHook(mi *MessageInfo) bool {
mi.oneofs = map[protoreflect.Name]*oneofInfo{}
for i := 0; i < mi.Desc.Oneofs().Len(); i++ {
od := mi.Desc.Oneofs().Get(i)
- if !od.IsSynthetic() {
- mi.oneofs[od.Name()] = makeOneofInfo(od, si.structInfo, mi.Exporter)
- }
+ mi.oneofs[od.Name()] = makeOneofInfoOpaque(mi, od, si.structInfo, mi.Exporter)
}
mi.denseFields = make([]*fieldInfo, fds.Len()*2)
@@ -119,12 +114,32 @@ func opaqueInitHook(mi *MessageInfo) bool {
return true
}
+func makeOneofInfoOpaque(mi *MessageInfo, od protoreflect.OneofDescriptor, si structInfo, x exporter) *oneofInfo {
+ oi := &oneofInfo{oneofDesc: od}
+ if od.IsSynthetic() {
+ fd := od.Fields().Get(0)
+ index, _ := presenceIndex(mi.Desc, fd)
+ oi.which = func(p pointer) protoreflect.FieldNumber {
+ if p.IsNil() {
+ return 0
+ }
+ if !mi.present(p, index) {
+ return 0
+ }
+ return od.Fields().Get(0).Number()
+ }
+ return oi
+ }
+ // Dispatch to non-opaque oneof implementation for non-synthetic oneofs.
+ return makeOneofInfo(od, si, x)
+}
+
func (mi *MessageInfo) fieldInfoForMapOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
if ft.Kind() != reflect.Map {
panic(fmt.Sprintf("invalid type: got %v, want map kind", ft))
}
- fieldOffset := offsetOf(fs, mi.Exporter)
+ fieldOffset := offsetOf(fs)
conv := NewConverter(ft, fd)
return fieldInfo{
fieldDesc: fd,
@@ -178,7 +193,7 @@ func (mi *MessageInfo) fieldInfoForScalarListOpaque(si opaqueStructInfo, fd prot
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(reflect.PtrTo(ft), fd)
- fieldOffset := offsetOf(fs, mi.Exporter)
+ fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
return fieldInfo{
fieldDesc: fd,
@@ -228,7 +243,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaque(si opaqueStructInfo, fd pro
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(ft, fd)
- fieldOffset := offsetOf(fs, mi.Exporter)
+ fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
fieldNumber := fd.Number()
return fieldInfo{
@@ -321,7 +336,7 @@ func (mi *MessageInfo) fieldInfoForMessageListOpaqueNoPresence(si opaqueStructIn
panic(fmt.Sprintf("invalid type: got %v, want slice kind", ft))
}
conv := NewConverter(ft, fd)
- fieldOffset := offsetOf(fs, mi.Exporter)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -393,7 +408,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref
deref = true
}
conv := NewConverter(ft, fd)
- fieldOffset := offsetOf(fs, mi.Exporter)
+ fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
var getter func(p pointer) protoreflect.Value
if !nullable {
@@ -462,7 +477,7 @@ func (mi *MessageInfo) fieldInfoForScalarOpaque(si opaqueStructInfo, fd protoref
func (mi *MessageInfo) fieldInfoForMessageOpaque(si opaqueStructInfo, fd protoreflect.FieldDescriptor, fs reflect.StructField) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
- fieldOffset := offsetOf(fs, mi.Exporter)
+ fieldOffset := offsetOf(fs)
index, _ := presenceIndex(mi.Desc, fd)
fieldNumber := fd.Number()
elemType := fs.Type.Elem()
@@ -602,8 +617,6 @@ func usePresenceForField(si opaqueStructInfo, fd protoreflect.FieldDescriptor) (
switch {
case fd.ContainingOneof() != nil && !fd.ContainingOneof().IsSynthetic():
return false, false
- case fd.IsWeak():
- return false, false
case fd.IsMap():
return false, false
case fd.Kind() == protoreflect.MessageKind || fd.Kind() == protoreflect.GroupKind:
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
index 31c19b54..0d20132f 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect.go
@@ -72,8 +72,6 @@ func (mi *MessageInfo) makeKnownFieldsFunc(si structInfo) {
fi = fieldInfoForMap(fd, fs, mi.Exporter)
case fd.IsList():
fi = fieldInfoForList(fd, fs, mi.Exporter)
- case fd.IsWeak():
- fi = fieldInfoForWeakMessage(fd, si.weakOffset)
case fd.Message() != nil:
fi = fieldInfoForMessage(fd, fs, mi.Exporter)
default:
@@ -219,9 +217,6 @@ func (mi *MessageInfo) makeFieldTypes(si structInfo) {
}
case fd.Message() != nil:
ft = fs.Type
- if fd.IsWeak() {
- ft = nil
- }
isMessage = true
}
if isMessage && ft != nil && ft.Kind() != reflect.Ptr {
diff --git a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
index a7406462..68d4ae32 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/message_reflect_field.go
@@ -8,11 +8,8 @@ import (
"fmt"
"math"
"reflect"
- "sync"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
)
type fieldInfo struct {
@@ -76,7 +73,7 @@ func fieldInfoForOneof(fd protoreflect.FieldDescriptor, fs reflect.StructField,
isMessage := fd.Message() != nil
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
// NOTE: The logic below intentionally assumes that oneof fields are
// well-formatted. That is, the oneof interface never contains a
@@ -152,7 +149,7 @@ func fieldInfoForMap(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -205,7 +202,7 @@ func fieldInfoForList(fd protoreflect.FieldDescriptor, fs reflect.StructField, x
conv := NewConverter(reflect.PtrTo(ft), fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -269,7 +266,7 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
conv := NewConverter(ft, fd)
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
// Generate specialized getter functions to avoid going through reflect.Value
if nullable {
@@ -332,85 +329,12 @@ func fieldInfoForScalar(fd protoreflect.FieldDescriptor, fs reflect.StructField,
}
}
-func fieldInfoForWeakMessage(fd protoreflect.FieldDescriptor, weakOffset offset) fieldInfo {
- if !flags.ProtoLegacy {
- panic("no support for proto1 weak fields")
- }
-
- var once sync.Once
- var messageType protoreflect.MessageType
- lazyInit := func() {
- once.Do(func() {
- messageName := fd.Message().FullName()
- messageType, _ = protoregistry.GlobalTypes.FindMessageByName(messageName)
- if messageType == nil {
- panic(fmt.Sprintf("weak message %v for field %v is not linked in", messageName, fd.FullName()))
- }
- })
- }
-
- num := fd.Number()
- return fieldInfo{
- fieldDesc: fd,
- has: func(p pointer) bool {
- if p.IsNil() {
- return false
- }
- _, ok := p.Apply(weakOffset).WeakFields().get(num)
- return ok
- },
- clear: func(p pointer) {
- p.Apply(weakOffset).WeakFields().clear(num)
- },
- get: func(p pointer) protoreflect.Value {
- lazyInit()
- if p.IsNil() {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- m, ok := p.Apply(weakOffset).WeakFields().get(num)
- if !ok {
- return protoreflect.ValueOfMessage(messageType.Zero())
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- set: func(p pointer, v protoreflect.Value) {
- lazyInit()
- m := v.Message()
- if m.Descriptor() != messageType.Descriptor() {
- if got, want := m.Descriptor().FullName(), messageType.Descriptor().FullName(); got != want {
- panic(fmt.Sprintf("field %v has mismatching message descriptor: got %v, want %v", fd.FullName(), got, want))
- }
- panic(fmt.Sprintf("field %v has mismatching message descriptor: %v", fd.FullName(), m.Descriptor().FullName()))
- }
- p.Apply(weakOffset).WeakFields().set(num, m.Interface())
- },
- mutable: func(p pointer) protoreflect.Value {
- lazyInit()
- fs := p.Apply(weakOffset).WeakFields()
- m, ok := fs.get(num)
- if !ok {
- m = messageType.New().Interface()
- fs.set(num, m)
- }
- return protoreflect.ValueOfMessage(m.ProtoReflect())
- },
- newMessage: func() protoreflect.Message {
- lazyInit()
- return messageType.New()
- },
- newField: func() protoreflect.Value {
- lazyInit()
- return protoreflect.ValueOfMessage(messageType.New())
- },
- }
-}
-
func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField, x exporter) fieldInfo {
ft := fs.Type
conv := NewConverter(ft, fd)
// TODO: Implement unsafe fast path?
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
return fieldInfo{
fieldDesc: fd,
has: func(p pointer) bool {
@@ -419,7 +343,7 @@ func fieldInfoForMessage(fd protoreflect.FieldDescriptor, fs reflect.StructField
}
rv := p.Apply(fieldOffset).AsValueOf(fs.Type).Elem()
if fs.Type.Kind() != reflect.Ptr {
- return !isZero(rv)
+ return !rv.IsZero()
}
return !rv.IsNil()
},
@@ -466,7 +390,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
oi := &oneofInfo{oneofDesc: od}
if od.IsSynthetic() {
fs := si.fieldsByNumber[od.Fields().Get(0).Number()]
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -479,7 +403,7 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
}
} else {
fs := si.oneofsByName[od.Name()]
- fieldOffset := offsetOf(fs, x)
+ fieldOffset := offsetOf(fs)
oi.which = func(p pointer) protoreflect.FieldNumber {
if p.IsNil() {
return 0
@@ -497,41 +421,3 @@ func makeOneofInfo(od protoreflect.OneofDescriptor, si structInfo, x exporter) *
}
return oi
}
-
-// isZero is identical to reflect.Value.IsZero.
-// TODO: Remove this when Go1.13 is the minimally supported Go version.
-func isZero(v reflect.Value) bool {
- switch v.Kind() {
- case reflect.Bool:
- return !v.Bool()
- case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
- return v.Int() == 0
- case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
- return v.Uint() == 0
- case reflect.Float32, reflect.Float64:
- return math.Float64bits(v.Float()) == 0
- case reflect.Complex64, reflect.Complex128:
- c := v.Complex()
- return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
- case reflect.Array:
- for i := 0; i < v.Len(); i++ {
- if !isZero(v.Index(i)) {
- return false
- }
- }
- return true
- case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
- return v.IsNil()
- case reflect.String:
- return v.Len() == 0
- case reflect.Struct:
- for i := 0; i < v.NumField(); i++ {
- if !isZero(v.Field(i)) {
- return false
- }
- }
- return true
- default:
- panic(&reflect.ValueError{Method: "reflect.Value.IsZero", Kind: v.Kind()})
- }
-}
diff --git a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
index 041ebde2..62f8bf66 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/pointer_unsafe.go
@@ -22,7 +22,7 @@ type Pointer unsafe.Pointer
type offset uintptr
// offsetOf returns a field offset for the struct field.
-func offsetOf(f reflect.StructField, x exporter) offset {
+func offsetOf(f reflect.StructField) offset {
return offset(f.Offset)
}
@@ -111,7 +111,6 @@ func (p pointer) StringSlice() *[]string { return (*[]string)(p.p
func (p pointer) Bytes() *[]byte { return (*[]byte)(p.p) }
func (p pointer) BytesPtr() **[]byte { return (**[]byte)(p.p) }
func (p pointer) BytesSlice() *[][]byte { return (*[][]byte)(p.p) }
-func (p pointer) WeakFields() *weakFields { return (*weakFields)(p.p) }
func (p pointer) Extensions() *map[int32]ExtensionField { return (*map[int32]ExtensionField)(p.p) }
func (p pointer) LazyInfoPtr() **protolazy.XXX_lazyUnmarshalInfo {
return (**protolazy.XXX_lazyUnmarshalInfo)(p.p)
diff --git a/vendor/google.golang.org/protobuf/internal/impl/validate.go b/vendor/google.golang.org/protobuf/internal/impl/validate.go
index b534a3d6..7b2995dd 100644
--- a/vendor/google.golang.org/protobuf/internal/impl/validate.go
+++ b/vendor/google.golang.org/protobuf/internal/impl/validate.go
@@ -211,9 +211,7 @@ func newValidationInfo(fd protoreflect.FieldDescriptor, ft reflect.Type) validat
switch fd.Kind() {
case protoreflect.MessageKind:
vi.typ = validationTypeMessage
- if !fd.IsWeak() {
- vi.mi = getMessageInfo(ft)
- }
+ vi.mi = getMessageInfo(ft)
case protoreflect.GroupKind:
vi.typ = validationTypeGroup
vi.mi = getMessageInfo(ft)
@@ -320,26 +318,6 @@ State:
}
if f != nil {
vi = f.validation
- if vi.typ == validationTypeMessage && vi.mi == nil {
- // Probable weak field.
- //
- // TODO: Consider storing the results of this lookup somewhere
- // rather than recomputing it on every validation.
- fd := st.mi.Desc.Fields().ByNumber(num)
- if fd == nil || !fd.IsWeak() {
- break
- }
- messageName := fd.Message().FullName()
- messageType, err := protoregistry.GlobalTypes.FindMessageByName(messageName)
- switch err {
- case nil:
- vi.mi, _ = messageType.(*MessageInfo)
- case protoregistry.NotFound:
- vi.typ = validationTypeBytes
- default:
- return out, ValidationUnknown
- }
- }
break
}
// Possible extension field.
diff --git a/vendor/google.golang.org/protobuf/internal/impl/weak.go b/vendor/google.golang.org/protobuf/internal/impl/weak.go
deleted file mode 100644
index eb79a7ba..00000000
--- a/vendor/google.golang.org/protobuf/internal/impl/weak.go
+++ /dev/null
@@ -1,74 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package impl
-
-import (
- "fmt"
-
- "google.golang.org/protobuf/reflect/protoreflect"
- "google.golang.org/protobuf/reflect/protoregistry"
-)
-
-// weakFields adds methods to the exported WeakFields type for internal use.
-//
-// The exported type is an alias to an unnamed type, so methods can't be
-// defined directly on it.
-type weakFields WeakFields
-
-func (w weakFields) get(num protoreflect.FieldNumber) (protoreflect.ProtoMessage, bool) {
- m, ok := w[int32(num)]
- return m, ok
-}
-
-func (w *weakFields) set(num protoreflect.FieldNumber, m protoreflect.ProtoMessage) {
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
-
-func (w *weakFields) clear(num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) HasWeak(w WeakFields, num protoreflect.FieldNumber) bool {
- _, ok := w[int32(num)]
- return ok
-}
-
-func (Export) ClearWeak(w *WeakFields, num protoreflect.FieldNumber) {
- delete(*w, int32(num))
-}
-
-func (Export) GetWeak(w WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName) protoreflect.ProtoMessage {
- if m, ok := w[int32(num)]; ok {
- return m
- }
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- return mt.Zero().Interface()
-}
-
-func (Export) SetWeak(w *WeakFields, num protoreflect.FieldNumber, name protoreflect.FullName, m protoreflect.ProtoMessage) {
- if m != nil {
- mt, _ := protoregistry.GlobalTypes.FindMessageByName(name)
- if mt == nil {
- panic(fmt.Sprintf("message %v for weak field is not linked in", name))
- }
- if mt != m.ProtoReflect().Type() {
- panic(fmt.Sprintf("invalid message type for weak field: got %T, want %T", m, mt.Zero().Interface()))
- }
- }
- if m == nil || !m.ProtoReflect().IsValid() {
- delete(*w, int32(num))
- return
- }
- if *w == nil {
- *w = make(weakFields)
- }
- (*w)[int32(num)] = m
-}
diff --git a/vendor/google.golang.org/protobuf/internal/version/version.go b/vendor/google.golang.org/protobuf/internal/version/version.go
index 3018450d..01efc330 100644
--- a/vendor/google.golang.org/protobuf/internal/version/version.go
+++ b/vendor/google.golang.org/protobuf/internal/version/version.go
@@ -52,7 +52,7 @@ import (
const (
Major = 1
Minor = 36
- Patch = 1
+ Patch = 5
PreRelease = ""
)
diff --git a/vendor/google.golang.org/protobuf/proto/decode.go b/vendor/google.golang.org/protobuf/proto/decode.go
index a3b5e142..4cbf1aea 100644
--- a/vendor/google.golang.org/protobuf/proto/decode.go
+++ b/vendor/google.golang.org/protobuf/proto/decode.go
@@ -8,7 +8,6 @@ import (
"google.golang.org/protobuf/encoding/protowire"
"google.golang.org/protobuf/internal/encoding/messageset"
"google.golang.org/protobuf/internal/errors"
- "google.golang.org/protobuf/internal/flags"
"google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/internal/pragma"
"google.golang.org/protobuf/reflect/protoreflect"
@@ -172,10 +171,6 @@ func (o UnmarshalOptions) unmarshalMessageSlow(b []byte, m protoreflect.Message)
var err error
if fd == nil {
err = errUnknown
- } else if flags.ProtoLegacy {
- if fd.IsWeak() && fd.Message().IsPlaceholder() {
- err = errUnknown // weak referent is not linked in
- }
}
// Parse the field value.
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
index 69a05050..823dbf3b 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc.go
@@ -132,17 +132,11 @@ func (o FileOptions) New(fd *descriptorpb.FileDescriptorProto, r Resolver) (prot
}
f.L2.Imports[i].IsPublic = true
}
- for _, i := range fd.GetWeakDependency() {
- if !(0 <= i && int(i) < len(f.L2.Imports)) || f.L2.Imports[i].IsWeak {
- return nil, errors.New("invalid or duplicate weak import index: %d", i)
- }
- f.L2.Imports[i].IsWeak = true
- }
imps := importSet{f.Path(): true}
for i, path := range fd.GetDependency() {
imp := &f.L2.Imports[i]
f, err := r.FindFileByPath(path)
- if err == protoregistry.NotFound && (o.AllowUnresolvable || imp.IsWeak) {
+ if err == protoregistry.NotFound && o.AllowUnresolvable {
f = filedesc.PlaceholderFile(path)
} else if err != nil {
return nil, errors.New("could not resolve import %q: %v", path, err)
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
index ebcb4a8a..9da34998 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_init.go
@@ -149,7 +149,6 @@ func (r descsByName) initFieldsFromDescriptorProto(fds []*descriptorpb.FieldDesc
if opts := fd.GetOptions(); opts != nil {
opts = proto.Clone(opts).(*descriptorpb.FieldOptions)
f.L1.Options = func() protoreflect.ProtoMessage { return opts }
- f.L1.IsWeak = opts.GetWeak()
f.L1.IsLazy = opts.GetLazy()
if opts.Packed != nil {
f.L1.EditionFeatures.IsPacked = opts.GetPacked()
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
index f3cebab2..ff692436 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_resolve.go
@@ -43,7 +43,7 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
o.L1.Fields.List = append(o.L1.Fields.List, f)
}
- if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName()), f.IsWeak()); err != nil {
+ if f.L1.Kind, f.L1.Enum, f.L1.Message, err = r.findTarget(f.Kind(), f.Parent().FullName(), partialName(fd.GetTypeName())); err != nil {
return errors.New("message field %q cannot resolve type: %v", f.FullName(), err)
}
if f.L1.Kind == protoreflect.GroupKind && (f.IsMap() || f.IsMapEntry()) {
@@ -73,10 +73,10 @@ func (r *resolver) resolveMessageDependencies(ms []filedesc.Message, mds []*desc
func (r *resolver) resolveExtensionDependencies(xs []filedesc.Extension, xds []*descriptorpb.FieldDescriptorProto) (err error) {
for i, xd := range xds {
x := &xs[i]
- if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee()), false); err != nil {
+ if x.L1.Extendee, err = r.findMessageDescriptor(x.Parent().FullName(), partialName(xd.GetExtendee())); err != nil {
return errors.New("extension field %q cannot resolve extendee: %v", x.FullName(), err)
}
- if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName()), false); err != nil {
+ if x.L1.Kind, x.L2.Enum, x.L2.Message, err = r.findTarget(x.Kind(), x.Parent().FullName(), partialName(xd.GetTypeName())); err != nil {
return errors.New("extension field %q cannot resolve type: %v", x.FullName(), err)
}
if xd.DefaultValue != nil {
@@ -95,11 +95,11 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc
s := &ss[i]
for j, md := range sd.GetMethod() {
m := &s.L2.Methods.List[j]
- m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()), false)
+ m.L1.Input, err = r.findMessageDescriptor(m.Parent().FullName(), partialName(md.GetInputType()))
if err != nil {
return errors.New("service method %q cannot resolve input: %v", m.FullName(), err)
}
- m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()), false)
+ m.L1.Output, err = r.findMessageDescriptor(s.FullName(), partialName(md.GetOutputType()))
if err != nil {
return errors.New("service method %q cannot resolve output: %v", m.FullName(), err)
}
@@ -111,16 +111,16 @@ func (r *resolver) resolveServiceDependencies(ss []filedesc.Service, sds []*desc
// findTarget finds an enum or message descriptor if k is an enum, message,
// group, or unknown. If unknown, and the name could be resolved, the kind
// returned kind is set based on the type of the resolved descriptor.
-func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
+func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName, ref partialName) (protoreflect.Kind, protoreflect.EnumDescriptor, protoreflect.MessageDescriptor, error) {
switch k {
case protoreflect.EnumKind:
- ed, err := r.findEnumDescriptor(scope, ref, isWeak)
+ ed, err := r.findEnumDescriptor(scope, ref)
if err != nil {
return 0, nil, nil, err
}
return k, ed, nil, nil
case protoreflect.MessageKind, protoreflect.GroupKind:
- md, err := r.findMessageDescriptor(scope, ref, isWeak)
+ md, err := r.findMessageDescriptor(scope, ref)
if err != nil {
return 0, nil, nil, err
}
@@ -129,7 +129,7 @@ func (r *resolver) findTarget(k protoreflect.Kind, scope protoreflect.FullName,
// Handle unspecified kinds (possible with parsers that operate
// on a per-file basis without knowledge of dependencies).
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return k, filedesc.PlaceholderEnum(ref.FullName()), filedesc.PlaceholderMessage(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return 0, nil, nil, errors.New("%q not found", ref.FullName())
@@ -206,9 +206,9 @@ func (r *resolver) findDescriptor(scope protoreflect.FullName, ref partialName)
}
}
-func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.EnumDescriptor, error) {
+func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.EnumDescriptor, error) {
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return filedesc.PlaceholderEnum(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return nil, errors.New("%q not found", ref.FullName())
@@ -222,9 +222,9 @@ func (r *resolver) findEnumDescriptor(scope protoreflect.FullName, ref partialNa
return ed, nil
}
-func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName, isWeak bool) (protoreflect.MessageDescriptor, error) {
+func (r *resolver) findMessageDescriptor(scope protoreflect.FullName, ref partialName) (protoreflect.MessageDescriptor, error) {
d, err := r.findDescriptor(scope, ref)
- if err == protoregistry.NotFound && (r.allowUnresolvable || isWeak) {
+ if err == protoregistry.NotFound && r.allowUnresolvable {
return filedesc.PlaceholderMessage(ref.FullName()), nil
} else if err == protoregistry.NotFound {
return nil, errors.New("%q not found", ref.FullName())
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
index 6de31c2e..c343d922 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/desc_validate.go
@@ -149,12 +149,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds
return errors.New("message field %q under proto3 optional semantics must be within a single element oneof", f.FullName())
}
}
- if f.IsWeak() && !flags.ProtoLegacy {
- return errors.New("message field %q is a weak field, which is a legacy proto1 feature that is no longer supported", f.FullName())
- }
- if f.IsWeak() && (!f.HasPresence() || !isOptionalMessage(f) || f.ContainingOneof() != nil) {
- return errors.New("message field %q may only be weak for an optional message", f.FullName())
- }
if f.IsPacked() && !isPackable(f) {
return errors.New("message field %q is not packable", f.FullName())
}
@@ -199,9 +193,6 @@ func validateMessageDeclarations(file *filedesc.File, ms []filedesc.Message, mds
if f.Cardinality() != protoreflect.Optional {
return errors.New("message field %q belongs in a oneof and must be optional", f.FullName())
}
- if f.IsWeak() {
- return errors.New("message field %q belongs in a oneof and must not be a weak reference", f.FullName())
- }
}
}
@@ -254,9 +245,6 @@ func validateExtensionDeclarations(f *filedesc.File, xs []filedesc.Extension, xd
return errors.New("extension field %q has an invalid number: %d", x.FullName(), x.Number())
}
}
- if xd.GetOptions().GetWeak() {
- return errors.New("extension field %q cannot be a weak reference", x.FullName())
- }
if x.IsPacked() && !isPackable(x) {
return errors.New("extension field %q is not packable", x.FullName())
}
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
index bf0a0ccd..697a61b2 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/editions.go
@@ -11,6 +11,7 @@ import (
"google.golang.org/protobuf/internal/editiondefaults"
"google.golang.org/protobuf/internal/filedesc"
+ "google.golang.org/protobuf/internal/genid"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
@@ -125,16 +126,43 @@ func mergeEditionFeatures(parentDesc protoreflect.Descriptor, child *descriptorp
parentFS.IsJSONCompliant = *jf == descriptorpb.FeatureSet_ALLOW
}
- if goFeatures, ok := proto.GetExtension(child, gofeaturespb.E_Go).(*gofeaturespb.GoFeatures); ok && goFeatures != nil {
- if luje := goFeatures.LegacyUnmarshalJsonEnum; luje != nil {
- parentFS.GenerateLegacyUnmarshalJSON = *luje
- }
- if sep := goFeatures.StripEnumPrefix; sep != nil {
- parentFS.StripEnumPrefix = int(*sep)
- }
- if al := goFeatures.ApiLevel; al != nil {
- parentFS.APILevel = int(*al)
- }
+ // We must not use proto.GetExtension(child, gofeaturespb.E_Go)
+ // because that only works for messages we generated, but not for
+ // dynamicpb messages. See golang/protobuf#1669.
+ //
+ // Further, we harden this code against adversarial inputs: a
+ // service which accepts descriptors from a possibly malicious
+ // source shouldn't crash.
+ goFeatures := child.ProtoReflect().Get(gofeaturespb.E_Go.TypeDescriptor())
+ if !goFeatures.IsValid() {
+ return parentFS
+ }
+ gf, ok := goFeatures.Interface().(protoreflect.Message)
+ if !ok {
+ return parentFS
+ }
+ // gf.Interface() could be *dynamicpb.Message or *gofeaturespb.GoFeatures.
+ fields := gf.Descriptor().Fields()
+
+ if fd := fields.ByNumber(genid.GoFeatures_LegacyUnmarshalJsonEnum_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.BoolKind &&
+ gf.Has(fd) {
+ parentFS.GenerateLegacyUnmarshalJSON = gf.Get(fd).Bool()
+ }
+
+ if fd := fields.ByNumber(genid.GoFeatures_StripEnumPrefix_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.EnumKind &&
+ gf.Has(fd) {
+ parentFS.StripEnumPrefix = int(gf.Get(fd).Enum())
+ }
+
+ if fd := fields.ByNumber(genid.GoFeatures_ApiLevel_field_number); fd != nil &&
+ !fd.IsList() &&
+ fd.Kind() == protoreflect.EnumKind &&
+ gf.Has(fd) {
+ parentFS.APILevel = int(gf.Get(fd).Enum())
}
return parentFS
diff --git a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
index a5de8d40..9b880aa8 100644
--- a/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
+++ b/vendor/google.golang.org/protobuf/reflect/protodesc/proto.go
@@ -32,9 +32,6 @@ func ToFileDescriptorProto(file protoreflect.FileDescriptor) *descriptorpb.FileD
if imp.IsPublic {
p.PublicDependency = append(p.PublicDependency, int32(i))
}
- if imp.IsWeak {
- p.WeakDependency = append(p.WeakDependency, int32(i))
- }
}
for i, locs := 0, file.SourceLocations(); i < locs.Len(); i++ {
loc := locs.Get(i)
diff --git a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
index cd8fadba..cd7fbc87 100644
--- a/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
+++ b/vendor/google.golang.org/protobuf/reflect/protoreflect/type.go
@@ -68,7 +68,7 @@ type Descriptor interface {
// dependency is not resolved, in which case only name information is known.
//
// Placeholder types may only be returned by the following accessors
- // as a result of unresolved dependencies or weak imports:
+ // as a result of unresolved dependencies:
//
// ╔═══════════════════════════════════╤═════════════════════╗
// ║ Accessor │ Descriptor ║
@@ -168,11 +168,7 @@ type FileImport struct {
// The current file and the imported file must be within proto package.
IsPublic bool
- // IsWeak reports whether this is a weak import, which does not impose
- // a direct dependency on the target file.
- //
- // Weak imports are a legacy proto1 feature. Equivalent behavior is
- // achieved using proto2 extension fields or proto3 Any messages.
+ // Deprecated: support for weak fields has been removed.
IsWeak bool
}
@@ -325,9 +321,7 @@ type FieldDescriptor interface {
// specified in the source .proto file.
HasOptionalKeyword() bool
- // IsWeak reports whether this is a weak field, which does not impose a
- // direct dependency on the target type.
- // If true, then Message returns a placeholder type.
+ // Deprecated: support for weak fields has been removed.
IsWeak() bool
// IsPacked reports whether repeated primitive numeric kinds should be
diff --git a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
index a551e7ae..a5163376 100644
--- a/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
+++ b/vendor/google.golang.org/protobuf/types/descriptorpb/descriptor.pb.go
@@ -46,6 +46,7 @@ import (
protoimpl "google.golang.org/protobuf/runtime/protoimpl"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
// The full set of known editions.
@@ -4360,7 +4361,7 @@ func (x *GeneratedCodeInfo_Annotation) GetSemantic() GeneratedCodeInfo_Annotatio
var File_google_protobuf_descriptor_proto protoreflect.FileDescriptor
-var file_google_protobuf_descriptor_proto_rawDesc = []byte{
+var file_google_protobuf_descriptor_proto_rawDesc = string([]byte{
0x0a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x64, 0x65, 0x73, 0x63, 0x72, 0x69, 0x70, 0x74, 0x6f, 0x72, 0x2e, 0x70, 0x72, 0x6f,
0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
@@ -5130,16 +5131,16 @@ var file_google_protobuf_descriptor_proto_rawDesc = []byte{
0x70, 0x62, 0xf8, 0x01, 0x01, 0xa2, 0x02, 0x03, 0x47, 0x50, 0x42, 0xaa, 0x02, 0x1a, 0x47, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x52, 0x65,
0x66, 0x6c, 0x65, 0x63, 0x74, 0x69, 0x6f, 0x6e,
-}
+})
var (
file_google_protobuf_descriptor_proto_rawDescOnce sync.Once
- file_google_protobuf_descriptor_proto_rawDescData = file_google_protobuf_descriptor_proto_rawDesc
+ file_google_protobuf_descriptor_proto_rawDescData []byte
)
func file_google_protobuf_descriptor_proto_rawDescGZIP() []byte {
file_google_protobuf_descriptor_proto_rawDescOnce.Do(func() {
- file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_descriptor_proto_rawDescData)
+ file_google_protobuf_descriptor_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)))
})
return file_google_protobuf_descriptor_proto_rawDescData
}
@@ -5292,7 +5293,7 @@ func file_google_protobuf_descriptor_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_descriptor_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_descriptor_proto_rawDesc), len(file_google_protobuf_descriptor_proto_rawDesc)),
NumEnums: 17,
NumMessages: 33,
NumExtensions: 0,
@@ -5304,7 +5305,6 @@ func file_google_protobuf_descriptor_proto_init() {
MessageInfos: file_google_protobuf_descriptor_proto_msgTypes,
}.Build()
File_google_protobuf_descriptor_proto = out.File
- file_google_protobuf_descriptor_proto_rawDesc = nil
file_google_protobuf_descriptor_proto_goTypes = nil
file_google_protobuf_descriptor_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
index e0b72eaf..28d24bad 100644
--- a/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
+++ b/vendor/google.golang.org/protobuf/types/gofeaturespb/go_features.pb.go
@@ -16,6 +16,7 @@ import (
descriptorpb "google.golang.org/protobuf/types/descriptorpb"
reflect "reflect"
sync "sync"
+ unsafe "unsafe"
)
type GoFeatures_APILevel int32
@@ -227,7 +228,7 @@ var (
var File_google_protobuf_go_features_proto protoreflect.FileDescriptor
-var file_google_protobuf_go_features_proto_rawDesc = []byte{
+var file_google_protobuf_go_features_proto_rawDesc = string([]byte{
0x0a, 0x21, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x67, 0x6f, 0x5f, 0x66, 0x65, 0x61, 0x74, 0x75, 0x72, 0x65, 0x73, 0x2e, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x12, 0x02, 0x70, 0x62, 0x1a, 0x20, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f,
@@ -283,16 +284,16 @@ var file_google_protobuf_go_features_proto_rawDesc = []byte{
0x67, 0x6f, 0x6c, 0x61, 0x6e, 0x67, 0x2e, 0x6f, 0x72, 0x67, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2f, 0x74, 0x79, 0x70, 0x65, 0x73, 0x2f, 0x67, 0x6f, 0x66, 0x65, 0x61, 0x74,
0x75, 0x72, 0x65, 0x73, 0x70, 0x62,
-}
+})
var (
file_google_protobuf_go_features_proto_rawDescOnce sync.Once
- file_google_protobuf_go_features_proto_rawDescData = file_google_protobuf_go_features_proto_rawDesc
+ file_google_protobuf_go_features_proto_rawDescData []byte
)
func file_google_protobuf_go_features_proto_rawDescGZIP() []byte {
file_google_protobuf_go_features_proto_rawDescOnce.Do(func() {
- file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_go_features_proto_rawDescData)
+ file_google_protobuf_go_features_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)))
})
return file_google_protobuf_go_features_proto_rawDescData
}
@@ -326,7 +327,7 @@ func file_google_protobuf_go_features_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_go_features_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_go_features_proto_rawDesc), len(file_google_protobuf_go_features_proto_rawDesc)),
NumEnums: 2,
NumMessages: 1,
NumExtensions: 1,
@@ -339,7 +340,6 @@ func file_google_protobuf_go_features_proto_init() {
ExtensionInfos: file_google_protobuf_go_features_proto_extTypes,
}.Build()
File_google_protobuf_go_features_proto = out.File
- file_google_protobuf_go_features_proto_rawDesc = nil
file_google_protobuf_go_features_proto_goTypes = nil
file_google_protobuf_go_features_proto_depIdxs = nil
}
diff --git a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
index 191552cc..497da66e 100644
--- a/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
+++ b/vendor/google.golang.org/protobuf/types/known/anypb/any.pb.go
@@ -122,6 +122,7 @@ import (
reflect "reflect"
strings "strings"
sync "sync"
+ unsafe "unsafe"
)
// `Any` contains an arbitrary serialized protocol buffer message along with a
@@ -411,7 +412,7 @@ func (x *Any) GetValue() []byte {
var File_google_protobuf_any_proto protoreflect.FileDescriptor
-var file_google_protobuf_any_proto_rawDesc = []byte{
+var file_google_protobuf_any_proto_rawDesc = string([]byte{
0x0a, 0x19, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2f, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75,
0x66, 0x2f, 0x61, 0x6e, 0x79, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x12, 0x0f, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x22, 0x36, 0x0a, 0x03,
@@ -427,16 +428,16 @@ var file_google_protobuf_any_proto_rawDesc = []byte{
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x50, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x57, 0x65,
0x6c, 0x6c, 0x4b, 0x6e, 0x6f, 0x77, 0x6e, 0x54, 0x79, 0x70, 0x65, 0x73, 0x62, 0x06, 0x70, 0x72,
0x6f, 0x74, 0x6f, 0x33,
-}
+})
var (
file_google_protobuf_any_proto_rawDescOnce sync.Once
- file_google_protobuf_any_proto_rawDescData = file_google_protobuf_any_proto_rawDesc
+ file_google_protobuf_any_proto_rawDescData []byte
)
func file_google_protobuf_any_proto_rawDescGZIP() []byte {
file_google_protobuf_any_proto_rawDescOnce.Do(func() {
- file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(file_google_protobuf_any_proto_rawDescData)
+ file_google_protobuf_any_proto_rawDescData = protoimpl.X.CompressGZIP(unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)))
})
return file_google_protobuf_any_proto_rawDescData
}
@@ -462,7 +463,7 @@ func file_google_protobuf_any_proto_init() {
out := protoimpl.TypeBuilder{
File: protoimpl.DescBuilder{
GoPackagePath: reflect.TypeOf(x{}).PkgPath(),
- RawDescriptor: file_google_protobuf_any_proto_rawDesc,
+ RawDescriptor: unsafe.Slice(unsafe.StringData(file_google_protobuf_any_proto_rawDesc), len(file_google_protobuf_any_proto_rawDesc)),
NumEnums: 0,
NumMessages: 1,
NumExtensions: 0,
@@ -473,7 +474,6 @@ func file_google_protobuf_any_proto_init() {
MessageInfos: file_google_protobuf_any_proto_msgTypes,
}.Build()
File_google_protobuf_any_proto = out.File
- file_google_protobuf_any_proto_rawDesc = nil
file_google_protobuf_any_proto_goTypes = nil
file_google_protobuf_any_proto_depIdxs = nil
}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 44331d0b..75c0ca6a 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -1,7 +1,7 @@
# github.com/blang/semver v3.5.1+incompatible
## explicit
github.com/blang/semver
-# github.com/containernetworking/cni v1.2.3
+# github.com/containernetworking/cni v1.3.0
## explicit; go 1.21
github.com/containernetworking/cni/pkg/ns
github.com/containernetworking/cni/pkg/skel
@@ -13,7 +13,7 @@ github.com/containernetworking/cni/pkg/types/create
github.com/containernetworking/cni/pkg/types/internal
github.com/containernetworking/cni/pkg/utils
github.com/containernetworking/cni/pkg/version
-# github.com/containernetworking/plugins v1.6.1
+# github.com/containernetworking/plugins v1.6.2
## explicit; go 1.23
github.com/containernetworking/plugins/pkg/ns
github.com/containernetworking/plugins/pkg/testutils
@@ -26,15 +26,15 @@ github.com/emicklei/go-restful/v3
github.com/emicklei/go-restful/v3/log
# github.com/evanphx/json-patch v5.6.0+incompatible
## explicit
-# github.com/fsnotify/fsnotify v1.8.0
+# github.com/fsnotify/fsnotify v1.9.0
## explicit; go 1.17
github.com/fsnotify/fsnotify
github.com/fsnotify/fsnotify/internal
# github.com/fxamacker/cbor/v2 v2.7.0
## explicit; go 1.17
github.com/fxamacker/cbor/v2
-# github.com/go-co-op/gocron/v2 v2.12.4
-## explicit; go 1.20
+# github.com/go-co-op/gocron/v2 v2.16.1
+## explicit; go 1.21.0
github.com/go-co-op/gocron/v2
# github.com/go-logr/logr v1.4.2
## explicit; go 1.18
@@ -66,8 +66,8 @@ github.com/google/gnostic-models/extensions
github.com/google/gnostic-models/jsonschema
github.com/google/gnostic-models/openapiv2
github.com/google/gnostic-models/openapiv3
-# github.com/google/go-cmp v0.6.0
-## explicit; go 1.13
+# github.com/google/go-cmp v0.7.0
+## explicit; go 1.21
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
@@ -83,8 +83,8 @@ github.com/google/uuid
# github.com/imdario/mergo v0.3.16
## explicit; go 1.13
github.com/imdario/mergo
-# github.com/jonboulle/clockwork v0.4.0
-## explicit; go 1.15
+# github.com/jonboulle/clockwork v0.5.0
+## explicit; go 1.21
github.com/jonboulle/clockwork
# github.com/josharian/intern v1.0.0
## explicit; go 1.5
@@ -92,7 +92,7 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.0
+# github.com/k8snetworkplumbingwg/network-attachment-definition-client v1.7.6
## explicit; go 1.21
github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io
github.com/k8snetworkplumbingwg/network-attachment-definition-client/pkg/apis/k8s.cni.cncf.io/v1
@@ -150,8 +150,8 @@ github.com/onsi/ginkgo/reporters/stenographer
github.com/onsi/ginkgo/reporters/stenographer/support/go-colorable
github.com/onsi/ginkgo/reporters/stenographer/support/go-isatty
github.com/onsi/ginkgo/types
-# github.com/onsi/gomega v1.36.2
-## explicit; go 1.22.0
+# github.com/onsi/gomega v1.37.0
+## explicit; go 1.23.0
github.com/onsi/gomega
github.com/onsi/gomega/format
github.com/onsi/gomega/internal
@@ -178,12 +178,7 @@ github.com/vishvananda/netns
# github.com/x448/float16 v0.8.4
## explicit; go 1.11
github.com/x448/float16
-# golang.org/x/exp v0.0.0-20240613232115-7f521ea00fb8
-## explicit; go 1.20
-golang.org/x/exp/constraints
-golang.org/x/exp/maps
-golang.org/x/exp/slices
-# golang.org/x/mod v0.22.0
+# golang.org/x/mod v0.23.0
## explicit; go 1.22.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
@@ -237,10 +232,10 @@ golang.org/x/text/secure/bidirule
golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
-# golang.org/x/time v0.8.0
-## explicit; go 1.18
+# golang.org/x/time v0.11.0
+## explicit; go 1.23.0
golang.org/x/time/rate
-# golang.org/x/tools v0.28.0
+# golang.org/x/tools v0.30.0
## explicit; go 1.22.0
golang.org/x/tools/go/ast/astutil
golang.org/x/tools/go/gcexportdata
@@ -264,10 +259,10 @@ golang.org/x/tools/internal/stdlib
golang.org/x/tools/internal/typeparams
golang.org/x/tools/internal/typesinternal
golang.org/x/tools/internal/versions
-# gomodules.xyz/jsonpatch/v2 v2.4.0
+# gomodules.xyz/jsonpatch/v2 v2.5.0
## explicit; go 1.20
gomodules.xyz/jsonpatch/v2
-# google.golang.org/protobuf v1.36.1
+# google.golang.org/protobuf v1.36.5
## explicit; go 1.21
google.golang.org/protobuf/encoding/prototext
google.golang.org/protobuf/encoding/protowire