mirror of
https://github.com/kubevirt/containerized-data-importer.git
synced 2025-06-03 06:30:22 +00:00
Enable nakedret linter (#3178)
* Enable nakedret linter > Checks that functions with naked returns are not longer than a maximum > size (can be zero). https://github.com/alexkohler/nakedret Relevant from the GO style guide: > Naked returns are okay if the function is a handful of lines. Once > it’s a medium sized function, be explicit with your return values. https://go.dev/wiki/CodeReviewComments#named-result-parameters I think a naked return is never easier to read than a regular return, so I set the linter to always complain about it. Disagreements are welcome. Signed-off-by: Edu Gómez Escandell <egomez@redhat.com> * Refactor functions with naked returns Signed-off-by: Edu Gómez Escandell <egomez@redhat.com> --------- Signed-off-by: Edu Gómez Escandell <egomez@redhat.com>
This commit is contained in:
parent
475a70c8cd
commit
213040a441
@ -6,6 +6,7 @@ linters:
|
||||
- ginkgolinter
|
||||
- goimports
|
||||
- misspell
|
||||
- nakedret
|
||||
- unconvert
|
||||
|
||||
issues:
|
||||
@ -13,8 +14,10 @@ issues:
|
||||
max-same-issues: 0
|
||||
|
||||
linters-settings:
|
||||
ginkgolinter:
|
||||
forbid-focus-container: true
|
||||
errorlint:
|
||||
# We want to allow for usage of %v to avoid leaking implementation details
|
||||
errorf: false
|
||||
ginkgolinter:
|
||||
forbid-focus-container: true
|
||||
nakedret:
|
||||
max-func-lines: 0
|
||||
|
@ -33,15 +33,20 @@ type execReader struct {
|
||||
stderr io.ReadCloser
|
||||
}
|
||||
|
||||
func (er *execReader) Read(p []byte) (n int, err error) {
|
||||
n, err = er.stdout.Read(p)
|
||||
if errors.Is(err, io.EOF) {
|
||||
if err2 := er.cmd.Wait(); err2 != nil {
|
||||
errBytes, _ := io.ReadAll(er.stderr)
|
||||
klog.Fatalf("Subprocess did not execute successfully, result is: %q\n%s", er.cmd.ProcessState.ExitCode(), string(errBytes))
|
||||
}
|
||||
func (er *execReader) Read(p []byte) (int, error) {
|
||||
n, err := er.stdout.Read(p)
|
||||
if err == nil {
|
||||
return n, nil
|
||||
} else if !errors.Is(err, io.EOF) {
|
||||
return n, err
|
||||
}
|
||||
return
|
||||
|
||||
if err := er.cmd.Wait(); err != nil {
|
||||
errBytes, _ := io.ReadAll(er.stderr)
|
||||
klog.Fatalf("Subprocess did not execute successfully, result is: %q\n%s", er.cmd.ProcessState.ExitCode(), string(errBytes))
|
||||
}
|
||||
|
||||
return n, io.EOF
|
||||
}
|
||||
|
||||
func (er *execReader) Close() error {
|
||||
@ -194,23 +199,25 @@ func newTarReader(preallocation bool) (io.ReadCloser, error) {
|
||||
return &execReader{cmd: cmd, stdout: stdout, stderr: io.NopCloser(&stderr)}, nil
|
||||
}
|
||||
|
||||
func getInputStream(preallocation bool) (rc io.ReadCloser) {
|
||||
var err error
|
||||
func getInputStream(preallocation bool) io.ReadCloser {
|
||||
switch contentType {
|
||||
case "filesystem-clone":
|
||||
rc, err = newTarReader(preallocation)
|
||||
rc, err := newTarReader(preallocation)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error creating tar reader for %q: %+v", mountPoint, err)
|
||||
}
|
||||
return rc
|
||||
case "blockdevice-clone":
|
||||
rc, err = os.Open(mountPoint)
|
||||
rc, err := os.Open(mountPoint)
|
||||
if err != nil {
|
||||
klog.Fatalf("Error opening block device %q: %+v", mountPoint, err)
|
||||
}
|
||||
return rc
|
||||
default:
|
||||
klog.Fatalf("Invalid content-type %q", contentType)
|
||||
}
|
||||
return
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func main() {
|
||||
|
@ -184,16 +184,19 @@ func writeData(reader io.ReadCloser, file *os.File, imageID string, progress *pr
|
||||
return nil
|
||||
}
|
||||
|
||||
func getAuthType() (authType clientconfig.AuthType, err error) {
|
||||
if configuredAuthType := getStringFromSecret(authTypeString); configuredAuthType == "" {
|
||||
authType = clientconfig.AuthPassword
|
||||
} else if supportedAuthType, found := supportedAuthTypes[configuredAuthType]; found {
|
||||
authType = supportedAuthType
|
||||
} else {
|
||||
err = errors.New(unsupportedAuthTypeErrStr)
|
||||
klog.Fatal(err.Error(), "authType", configuredAuthType)
|
||||
func getAuthType() (clientconfig.AuthType, error) {
|
||||
configuredAuthType := getStringFromSecret(authTypeString)
|
||||
if configuredAuthType == "" {
|
||||
return clientconfig.AuthPassword, nil
|
||||
}
|
||||
return
|
||||
|
||||
if supportedAuthType, found := supportedAuthTypes[configuredAuthType]; found {
|
||||
return supportedAuthType, nil
|
||||
}
|
||||
|
||||
err := errors.New(unsupportedAuthTypeErrStr)
|
||||
klog.Fatal(err.Error(), "authType", configuredAuthType)
|
||||
return clientconfig.AuthType(""), err
|
||||
}
|
||||
|
||||
func getStringFromSecret(key string) string {
|
||||
@ -216,7 +219,7 @@ func getBoolFromSecret(key string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func getProviderClient(identityEndpoint string) (provider *gophercloud.ProviderClient, err error) {
|
||||
func getProviderClient(identityEndpoint string) (*gophercloud.ProviderClient, error) {
|
||||
|
||||
authInfo := &clientconfig.AuthInfo{
|
||||
AuthURL: identityEndpoint,
|
||||
@ -233,10 +236,10 @@ func getProviderClient(identityEndpoint string) (provider *gophercloud.ProviderC
|
||||
}
|
||||
|
||||
var authType clientconfig.AuthType
|
||||
authType, err = getAuthType()
|
||||
authType, err := getAuthType()
|
||||
if err != nil {
|
||||
klog.Fatal(err.Error())
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
switch authType {
|
||||
@ -256,7 +259,7 @@ func getProviderClient(identityEndpoint string) (provider *gophercloud.ProviderC
|
||||
identityURL, err := url.Parse(identityEndpoint)
|
||||
if err != nil {
|
||||
klog.Fatal(err.Error())
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var TLSClientConfig *tls.Config
|
||||
@ -273,7 +276,7 @@ func getProviderClient(identityEndpoint string) (provider *gophercloud.ProviderC
|
||||
if !ok {
|
||||
err = errors.New(malformedCAErrStr)
|
||||
klog.Fatal(err.Error())
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
TLSClientConfig = &tls.Config{RootCAs: roots}
|
||||
}
|
||||
@ -281,10 +284,10 @@ func getProviderClient(identityEndpoint string) (provider *gophercloud.ProviderC
|
||||
}
|
||||
}
|
||||
|
||||
provider, err = openstack.NewClient(identityEndpoint)
|
||||
provider, err := openstack.NewClient(identityEndpoint)
|
||||
if err != nil {
|
||||
klog.Fatal(err.Error())
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
provider.HTTPClient.Transport = &http.Transport{
|
||||
@ -308,13 +311,13 @@ func getProviderClient(identityEndpoint string) (provider *gophercloud.ProviderC
|
||||
opts, err := clientconfig.AuthOptions(clientOpts)
|
||||
if err != nil {
|
||||
klog.Fatal(err.Error())
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
|
||||
err = openstack.Authenticate(provider, *opts)
|
||||
if err != nil {
|
||||
klog.Fatal(err.Error())
|
||||
return
|
||||
return nil, err
|
||||
}
|
||||
return
|
||||
return provider, nil
|
||||
}
|
||||
|
@ -730,17 +730,15 @@ func ParseCloneRequestAnnotation(pvc *corev1.PersistentVolumeClaim) (exists bool
|
||||
var ann string
|
||||
ann, exists = pvc.Annotations[cc.AnnCloneRequest]
|
||||
if !exists {
|
||||
return
|
||||
return false, "", ""
|
||||
}
|
||||
|
||||
sp := strings.Split(ann, "/")
|
||||
if len(sp) != 2 {
|
||||
exists = false
|
||||
return
|
||||
return false, "", ""
|
||||
}
|
||||
|
||||
namespace, name = sp[0], sp[1]
|
||||
return
|
||||
return true, sp[0], sp[1]
|
||||
}
|
||||
|
||||
// ValidateCanCloneSourceAndTargetContentType validates the pvcs passed has the same content type.
|
||||
|
@ -260,20 +260,21 @@ func (p *Planner) watchSnapshots(ctx context.Context, log logr.Logger) error {
|
||||
func (p *Planner) watchOwned(log logr.Logger, obj client.Object) error {
|
||||
objList := p.RootObjectType.DeepCopyObject().(client.ObjectList)
|
||||
if err := p.Controller.Watch(source.Kind(p.GetCache(), obj), handler.EnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
uid, ok := obj.GetLabels()[p.OwnershipLabel]
|
||||
if !ok {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
matchingFields := client.MatchingFields{
|
||||
p.UIDField: uid,
|
||||
}
|
||||
if err := p.Client.List(ctx, objList, matchingFields); err != nil {
|
||||
log.Error(err, "Unable to list resource", "matchingFields", matchingFields)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
sv := reflect.ValueOf(objList).Elem()
|
||||
iv := sv.FieldByName("Items")
|
||||
var reqs []reconcile.Request
|
||||
for i := 0; i < iv.Len(); i++ {
|
||||
o := iv.Index(i).Addr().Interface().(client.Object)
|
||||
reqs = append(reqs, reconcile.Request{
|
||||
@ -283,7 +284,7 @@ func (p *Planner) watchOwned(log logr.Logger, obj client.Object) error {
|
||||
},
|
||||
})
|
||||
}
|
||||
return
|
||||
return reqs
|
||||
}),
|
||||
); err != nil {
|
||||
return err
|
||||
|
@ -1501,16 +1501,14 @@ func NewImportDataVolume(name string) *cdiv1.DataVolume {
|
||||
func GetCloneSourceInfo(dv *cdiv1.DataVolume) (sourceType, sourceName, sourceNamespace string) {
|
||||
// Cloning sources are mutually exclusive
|
||||
if dv.Spec.Source.PVC != nil {
|
||||
sourceType = "pvc"
|
||||
sourceName = dv.Spec.Source.PVC.Name
|
||||
sourceNamespace = dv.Spec.Source.PVC.Namespace
|
||||
} else if dv.Spec.Source.Snapshot != nil {
|
||||
sourceType = "snapshot"
|
||||
sourceName = dv.Spec.Source.Snapshot.Name
|
||||
sourceNamespace = dv.Spec.Source.Snapshot.Namespace
|
||||
return "pvc", dv.Spec.Source.PVC.Name, dv.Spec.Source.PVC.Namespace
|
||||
}
|
||||
|
||||
return
|
||||
if dv.Spec.Source.Snapshot != nil {
|
||||
return "snapshot", dv.Spec.Source.Snapshot.Name, dv.Spec.Source.Snapshot.Namespace
|
||||
}
|
||||
|
||||
return "", "", ""
|
||||
}
|
||||
|
||||
// IsWaitForFirstConsumerEnabled tells us if we should respect "real" WFFC behavior or just let our worker pods randomly spawn
|
||||
|
@ -978,29 +978,30 @@ func addDataImportCronControllerWatches(mgr manager.Manager, c controller.Contro
|
||||
return nil
|
||||
}
|
||||
|
||||
mapStorageProfileToCron := func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
mapStorageProfileToCron := func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
// TODO: Get rid of this after at least one version; use indexer on storage class annotation instead
|
||||
// Otherwise we risk losing the storage profile event
|
||||
var crons cdiv1.DataImportCronList
|
||||
if err := mgr.GetClient().List(ctx, &crons); err != nil {
|
||||
c.GetLogger().Error(err, "Unable to list DataImportCrons")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
// Storage profiles are 1:1 to storage classes
|
||||
scName := obj.GetName()
|
||||
var reqs []reconcile.Request
|
||||
for _, cron := range crons.Items {
|
||||
dataVolume := cron.Spec.Template
|
||||
explicitScName := cc.GetStorageClassFromDVSpec(&dataVolume)
|
||||
templateSc, err := cc.GetStorageClassByNameWithVirtFallback(ctx, mgr.GetClient(), explicitScName, dataVolume.Spec.ContentType)
|
||||
if err != nil || templateSc == nil {
|
||||
c.GetLogger().Error(err, "Unable to get storage class", "templateSc", templateSc)
|
||||
return
|
||||
return reqs
|
||||
}
|
||||
if templateSc.Name == scName {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: cron.Namespace, Name: cron.Name}})
|
||||
}
|
||||
}
|
||||
return
|
||||
return reqs
|
||||
}
|
||||
|
||||
if err := c.Watch(source.Kind(mgr.GetCache(), &cdiv1.DataVolume{}),
|
||||
|
@ -239,10 +239,9 @@ func addDataSourceControllerWatches(mgr manager.Manager, c controller.Controller
|
||||
return err
|
||||
}
|
||||
|
||||
mapToDataSource := func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
reqs = appendMatchingDataSourceRequests(ctx, dataSourcePvcField, obj, reqs)
|
||||
reqs = appendMatchingDataSourceRequests(ctx, dataSourceSnapshotField, obj, reqs)
|
||||
return
|
||||
mapToDataSource := func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
reqs := appendMatchingDataSourceRequests(ctx, dataSourcePvcField, obj, nil)
|
||||
return appendMatchingDataSourceRequests(ctx, dataSourceSnapshotField, obj, reqs)
|
||||
}
|
||||
|
||||
if err := c.Watch(source.Kind(mgr.GetCache(), &cdiv1.DataVolume{}),
|
||||
|
@ -551,17 +551,18 @@ func addCloneWithoutSourceWatch(mgr manager.Manager, datavolumeController contro
|
||||
}
|
||||
|
||||
// Function to reconcile DVs that match the selected fields
|
||||
dataVolumeMapper := func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
dataVolumeMapper := func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
dvList := &cdiv1.DataVolumeList{}
|
||||
namespacedName := types.NamespacedName{Namespace: obj.GetNamespace(), Name: obj.GetName()}
|
||||
matchingFields := client.MatchingFields{indexingKey: namespacedName.String()}
|
||||
if err := mgr.GetClient().List(ctx, dvList, matchingFields); err != nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
var reqs []reconcile.Request
|
||||
for _, dv := range dvList.Items {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}})
|
||||
}
|
||||
return
|
||||
return reqs
|
||||
}
|
||||
|
||||
if err := datavolumeController.Watch(source.Kind(mgr.GetCache(), typeToWatch),
|
||||
|
@ -311,17 +311,18 @@ func addDataVolumeControllerCommonWatches(mgr manager.Manager, dataVolumeControl
|
||||
// Watch for SC updates and reconcile the DVs waiting for default SC
|
||||
// Relevant only when the DV StorageSpec has no AccessModes set and no matching StorageClass yet, so PVC cannot be created (test_id:9922)
|
||||
if err := dataVolumeController.Watch(source.Kind(mgr.GetCache(), &storagev1.StorageClass{}), handler.EnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
dvList := &cdiv1.DataVolumeList{}
|
||||
if err := mgr.GetClient().List(ctx, dvList, client.MatchingFields{dvPhaseField: ""}); err != nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
var reqs []reconcile.Request
|
||||
for _, dv := range dvList.Items {
|
||||
if getDataVolumeOp(ctx, mgr.GetLogger(), &dv, mgr.GetClient()) == op {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Name: dv.Name, Namespace: dv.Namespace}})
|
||||
}
|
||||
}
|
||||
return
|
||||
return reqs
|
||||
},
|
||||
),
|
||||
); err != nil {
|
||||
@ -331,12 +332,13 @@ func addDataVolumeControllerCommonWatches(mgr manager.Manager, dataVolumeControl
|
||||
// Watch for PV updates to reconcile the DVs waiting for available PV
|
||||
// Relevant only when the DV StorageSpec has no AccessModes set and no matching StorageClass yet, so PVC cannot be created (test_id:9924,9925)
|
||||
if err := dataVolumeController.Watch(source.Kind(mgr.GetCache(), &corev1.PersistentVolume{}), handler.EnqueueRequestsFromMapFunc(
|
||||
func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
pv := obj.(*corev1.PersistentVolume)
|
||||
dvList := &cdiv1.DataVolumeList{}
|
||||
if err := mgr.GetClient().List(ctx, dvList, client.MatchingFields{dvPhaseField: ""}); err != nil {
|
||||
return
|
||||
return nil
|
||||
}
|
||||
var reqs []reconcile.Request
|
||||
for _, dv := range dvList.Items {
|
||||
storage := dv.Spec.Storage
|
||||
if storage != nil &&
|
||||
@ -347,7 +349,7 @@ func addDataVolumeControllerCommonWatches(mgr manager.Manager, dataVolumeControl
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Name: dv.Name, Namespace: dv.Namespace}})
|
||||
}
|
||||
}
|
||||
return
|
||||
return reqs
|
||||
},
|
||||
),
|
||||
); err != nil {
|
||||
|
@ -152,17 +152,18 @@ func addDataSourceWatch(mgr manager.Manager, c controller.Controller) error {
|
||||
return err
|
||||
}
|
||||
|
||||
mapToDataVolume := func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
mapToDataVolume := func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var dvs cdiv1.DataVolumeList
|
||||
matchingFields := client.MatchingFields{dvDataSourceField: getKey(obj.GetNamespace(), obj.GetName())}
|
||||
if err := mgr.GetClient().List(ctx, &dvs, matchingFields); err != nil {
|
||||
c.GetLogger().Error(err, "Unable to list DataVolumes", "matchingFields", matchingFields)
|
||||
return
|
||||
return nil
|
||||
}
|
||||
var reqs []reconcile.Request
|
||||
for _, dv := range dvs.Items {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Namespace: dv.Namespace, Name: dv.Name}})
|
||||
}
|
||||
return
|
||||
return reqs
|
||||
}
|
||||
|
||||
if err := c.Watch(source.Kind(mgr.GetCache(), &cdiv1.DataSource{}),
|
||||
|
@ -421,19 +421,20 @@ func addStorageProfileControllerWatches(mgr manager.Manager, c controller.Contro
|
||||
return err
|
||||
}
|
||||
|
||||
mapSnapshotClassToProfile := func(ctx context.Context, obj client.Object) (reqs []reconcile.Request) {
|
||||
mapSnapshotClassToProfile := func(ctx context.Context, obj client.Object) []reconcile.Request {
|
||||
var scList storagev1.StorageClassList
|
||||
if err := mgr.GetClient().List(ctx, &scList); err != nil {
|
||||
c.GetLogger().Error(err, "Unable to list StorageClasses")
|
||||
return
|
||||
return nil
|
||||
}
|
||||
vsc := obj.(*snapshotv1.VolumeSnapshotClass)
|
||||
var reqs []reconcile.Request
|
||||
for _, sc := range scList.Items {
|
||||
if sc.Provisioner == vsc.Driver {
|
||||
reqs = append(reqs, reconcile.Request{NamespacedName: types.NamespacedName{Name: sc.Name}})
|
||||
}
|
||||
}
|
||||
return
|
||||
return reqs
|
||||
}
|
||||
if err := mgr.GetClient().List(context.TODO(), &snapshotv1.VolumeSnapshotClassList{}, &client.ListOptions{Limit: 1}); err != nil {
|
||||
if meta.IsNoMatchError(err) {
|
||||
|
@ -485,7 +485,7 @@ func mockExecFunction(output, errString string, expectedLimits *system.ProcessLi
|
||||
err = errors.New(errString)
|
||||
}
|
||||
|
||||
return
|
||||
return bytes, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -502,7 +502,7 @@ func mockExecFunctionStrict(output, errString string, expectedLimits *system.Pro
|
||||
err = errors.New(errString)
|
||||
}
|
||||
|
||||
return
|
||||
return bytes, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -525,7 +525,7 @@ func mockExecFunctionTwoCalls(output, errString string, expectedLimits *system.P
|
||||
err = errors.New(errString)
|
||||
}
|
||||
|
||||
return
|
||||
return bytes, err
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -244,7 +244,7 @@ func WriteTerminationMessageToFile(file, message string) error {
|
||||
}
|
||||
|
||||
// CopyDir copies a dir from one location to another.
|
||||
func CopyDir(source string, dest string) (err error) {
|
||||
func CopyDir(source string, dest string) error {
|
||||
// get properties of source dir
|
||||
sourceinfo, err := os.Stat(source)
|
||||
if err != nil {
|
||||
@ -278,7 +278,7 @@ func CopyDir(source string, dest string) (err error) {
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
return err
|
||||
}
|
||||
|
||||
// LinkFile symlinks the source to the target
|
||||
|
@ -762,16 +762,16 @@ type limitThenErrorReader struct {
|
||||
n int64 // max bytes remaining
|
||||
}
|
||||
|
||||
func (l *limitThenErrorReader) Read(p []byte) (n int, err error) {
|
||||
func (l *limitThenErrorReader) Read(p []byte) (int, error) {
|
||||
if l.n <= 0 {
|
||||
return 0, ErrTestFake // EOF
|
||||
}
|
||||
if int64(len(p)) > l.n {
|
||||
p = p[0:l.n]
|
||||
}
|
||||
n, err = l.r.Read(p)
|
||||
n, err := l.r.Read(p)
|
||||
l.n -= int64(n)
|
||||
return
|
||||
return n, err
|
||||
}
|
||||
|
||||
func startUploadProxyPortForward(f *framework.Framework) (string, *exec.Cmd, error) {
|
||||
|
Loading…
Reference in New Issue
Block a user