Skip to content

Commit

Permalink
Use unnamed parameter for unused lambda parameters (#14308)
Browse files Browse the repository at this point in the history
With the version bump, ecj compiler spams editors with hundreds of unused warnings, because it is now possible in the language to mark the parameter as unused.

Synchronize ecj configuration file with latest eclipse, to make sure we aren't missing anything else, then mark all the unused parameters as unused. Unused variable analysis can be really helpful for maintenance of the code.

See https://openjdk.org/jeps/456 for more information.
  • Loading branch information
rmuir authored Feb 27, 2025
1 parent fcffbe1 commit ab453b9
Show file tree
Hide file tree
Showing 135 changed files with 304 additions and 320 deletions.
8 changes: 8 additions & 0 deletions gradle/validation/ecj-lint/ecj.javadocs.prefs
Original file line number Diff line number Diff line change
@@ -1,13 +1,18 @@
eclipse.preferences.version=1
org.eclipse.jdt.core.builder.annotationPath.allLocations=enabled
org.eclipse.jdt.core.compiler.annotation.inheritNullAnnotations=disabled
org.eclipse.jdt.core.compiler.annotation.missingNonNullByDefaultAnnotation=ignore
org.eclipse.jdt.core.compiler.annotation.nonnull=org.eclipse.jdt.annotation.NonNull
org.eclipse.jdt.core.compiler.annotation.nonnull.secondary=
org.eclipse.jdt.core.compiler.annotation.nonnullbydefault=org.eclipse.jdt.annotation.NonNullByDefault
org.eclipse.jdt.core.compiler.annotation.nonnullbydefault.secondary=
org.eclipse.jdt.core.compiler.annotation.notowning=org.eclipse.jdt.annotation.NotOwning
org.eclipse.jdt.core.compiler.annotation.nullable=org.eclipse.jdt.annotation.Nullable
org.eclipse.jdt.core.compiler.annotation.nullable.secondary=
org.eclipse.jdt.core.compiler.annotation.nullanalysis=disabled
org.eclipse.jdt.core.compiler.annotation.owning=org.eclipse.jdt.annotation.Owning
# TODO: look into it and see if it can provide value, lots of errors
org.eclipse.jdt.core.compiler.annotation.resourceanalysis=disabled
org.eclipse.jdt.core.compiler.codegen.inlineJsrBytecode=enabled
org.eclipse.jdt.core.compiler.codegen.methodParameters=do not generate
org.eclipse.jdt.core.compiler.codegen.targetPlatform=23
Expand Down Expand Up @@ -46,8 +51,10 @@ org.eclipse.jdt.core.compiler.problem.forbiddenReference=error
org.eclipse.jdt.core.compiler.problem.hiddenCatchBlock=error
org.eclipse.jdt.core.compiler.problem.includeNullInfoFromAsserts=disabled
org.eclipse.jdt.core.compiler.problem.incompatibleNonInheritedInterfaceMethod=error
org.eclipse.jdt.core.compiler.problem.incompatibleOwningContract=error
org.eclipse.jdt.core.compiler.problem.incompleteEnumSwitch=error
org.eclipse.jdt.core.compiler.problem.indirectStaticAccess=ignore
org.eclipse.jdt.core.compiler.problem.insufficientResourceAnalysis=error
org.eclipse.jdt.core.compiler.problem.invalidJavadoc=error
org.eclipse.jdt.core.compiler.problem.invalidJavadocTags=enabled
org.eclipse.jdt.core.compiler.problem.invalidJavadocTagsDeprecatedRef=disabled
Expand Down Expand Up @@ -130,6 +137,7 @@ org.eclipse.jdt.core.compiler.problem.unusedDeclaredThrownExceptionWhenOverridin
org.eclipse.jdt.core.compiler.problem.unusedExceptionParameter=error
org.eclipse.jdt.core.compiler.problem.unusedImport=error
org.eclipse.jdt.core.compiler.problem.unusedLabel=error
org.eclipse.jdt.core.compiler.problem.unusedLambdaParameter=error
org.eclipse.jdt.core.compiler.problem.unusedLocal=error
org.eclipse.jdt.core.compiler.problem.unusedObjectAllocation=ignore
org.eclipse.jdt.core.compiler.problem.unusedParameter=ignore
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -202,9 +202,9 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
return bytes;
});
put(Random.class, random -> new Random(random.nextLong()));
put(Version.class, random -> Version.LATEST);
put(Version.class, _ -> Version.LATEST);
put(AttributeFactory.class, BaseTokenStreamTestCase::newAttributeFactory);
put(AttributeSource.class, random -> null); // force IAE/NPE
put(AttributeSource.class, _ -> null); // force IAE/NPE
put(
Set.class,
random -> {
Expand Down Expand Up @@ -242,19 +242,17 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
});
// TODO: don't want to make the exponentially slow ones Dawid documents
// in TestPatternReplaceFilter, so dont use truly random patterns (for now)
put(Pattern.class, random -> Pattern.compile("a"));
put(Pattern.class, _ -> Pattern.compile("a"));
put(
Pattern[].class,
random ->
new Pattern[] {Pattern.compile("([a-z]+)"), Pattern.compile("([0-9]+)")});
_ -> new Pattern[] {Pattern.compile("([a-z]+)"), Pattern.compile("([0-9]+)")});
put(
PayloadEncoder.class,
random ->
new IdentityEncoder()); // the other encoders will throw exceptions if tokens
_ -> new IdentityEncoder()); // the other encoders will throw exceptions if tokens
// arent numbers?
put(
Dictionary.class,
random -> {
_ -> {
// TODO: make nastier
InputStream affixStream =
TestRandomChains.class.getResourceAsStream("simple.aff");
Expand All @@ -270,7 +268,7 @@ public class TestRandomChains extends BaseTokenStreamTestCase {
});
put(
HyphenationTree.class,
random -> {
_ -> {
// TODO: make nastier
try {
InputSource is =
Expand Down Expand Up @@ -509,14 +507,14 @@ private String randomNonEmptyString(Random random) {
put(
JapaneseTokenizer.Mode.class,
random -> jaTokModes[random.nextInt(jaTokModes.length)]);
put(org.apache.lucene.analysis.ja.dict.UserDictionary.class, random -> null);
put(org.apache.lucene.analysis.ja.dict.UserDictionary.class, _ -> null);

// Nori:
final var koComplFilterModes = KoreanTokenizer.DecompoundMode.values();
put(
KoreanTokenizer.DecompoundMode.class,
random -> koComplFilterModes[random.nextInt(koComplFilterModes.length)]);
put(org.apache.lucene.analysis.ko.dict.UserDictionary.class, random -> null);
put(org.apache.lucene.analysis.ko.dict.UserDictionary.class, _ -> null);

// Phonetic:
final var bmNameTypes = org.apache.commons.codec.language.bm.NameType.values();
Expand Down Expand Up @@ -553,9 +551,7 @@ private String randomNonEmptyString(Random random) {
});

// Stempel
put(
StempelStemmer.class,
random -> new StempelStemmer(PolishAnalyzer.getDefaultTable()));
put(StempelStemmer.class, _ -> new StempelStemmer(PolishAnalyzer.getDefaultTable()));
}
});

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -812,7 +812,7 @@ private void parseAffix(
affixArg = new StringBuilder(affixArg).reverse().toString();
}

affixes.computeIfAbsent(affixArg, __ -> new IntArrayList()).add(currentAffix);
affixes.computeIfAbsent(affixArg, _ -> new IntArrayList()).add(currentAffix);
currentAffix++;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
* @see NGramFragmentChecker
*/
public interface FragmentChecker {
FragmentChecker EVERYTHING_POSSIBLE = (word, start, end) -> false;
FragmentChecker EVERYTHING_POSSIBLE = (_, _, _) -> false;

/**
* Check if the given word range intersects any fragment which is impossible in the current
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ Root<CharsRef> findStem(
offset,
length,
context,
(stem, formID, morphDataId, outerPrefix, innerPrefix, outerSuffix, innerSuffix) -> {
(stem, formID, _, _, _, _, _) -> {
if (!acceptCase(toCheck, formID, stem)) {
return dictionary.hasFlag(formID, Dictionary.HIDDEN_FLAG);
}
Expand Down Expand Up @@ -478,7 +478,7 @@ private boolean checkLastCompoundPart(
words.add(ref);

Stemmer.RootProcessor stopOnMatching =
(stem, formID, morphDataId, outerPrefix, innerPrefix, outerSuffix, innerSuffix) -> {
(_, formID, _, _, _, _, _) -> {
ref.ints[0] = formID;
for (CompoundRule r : dictionary.compoundRules) {
if (r.fullyMatches(words)) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -68,7 +68,7 @@ public List<CharsRef> stem(char[] word, int length) {
analyze(
word,
length,
(stem, formID, morphDataId, outerPrefix, innerPrefix, outerSuffix, innerSuffix) -> {
(stem, _, morphDataId, _, _, _, _) -> {
list.add(newStem(stem, morphDataId));
return true;
});
Expand Down Expand Up @@ -98,7 +98,7 @@ void analyze(char[] word, int length, RootProcessor processor) {
WordCase wordCase = caseOf(word, length);
if (wordCase == WordCase.UPPER || wordCase == WordCase.TITLE) {
CaseVariationProcessor variationProcessor =
(variant, varLength, originalCase) ->
(variant, varLength, _) ->
doStem(variant, 0, varLength, WordContext.SIMPLE_WORD, processor);
varyCase(word, length, wordCase, variationProcessor);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -172,7 +172,7 @@ Root<CharsRef> findStem(
return compoundCache
.computeIfAbsent(
new String(chars, offset, length),
__ ->
_ ->
Optional.ofNullable(super.findStem(chars, offset, length, null, context)))
.orElse(null);
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -402,7 +402,7 @@ private class WordCompressor {

for (String word : words) {
checkCanceled.run();
stemToPossibleFlags.computeIfAbsent(word, __ -> new LinkedHashSet<>());
stemToPossibleFlags.computeIfAbsent(word, _ -> new LinkedHashSet<>());
var processor =
new Stemmer.StemCandidateProcessor(WordContext.SIMPLE_WORD) {
@Override
Expand All @@ -427,14 +427,14 @@ boolean processStemCandidate(
|| allGenerated(swf).stream().noneMatch(forbidden::contains)) {
registerStem(candidate);
stemToPossibleFlags
.computeIfAbsent(candidate, __ -> new LinkedHashSet<>())
.computeIfAbsent(candidate, _ -> new LinkedHashSet<>())
.add(flagSet);
}
return true;
}

void registerStem(String stem) {
stemsToForms.computeIfAbsent(stem, __ -> new LinkedHashSet<>()).add(word);
stemsToForms.computeIfAbsent(stem, _ -> new LinkedHashSet<>()).add(word);
}
};
processor.registerStem(word);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ private void handleWrappedFilterArgs(Map<String, String> args) {
String filterName = splitKey.get(0).toLowerCase(Locale.ROOT);
if (wrappedFilterArgs.containsKey(filterName)) { // Skip if not in "wrappedFilter" arg
Map<String, String> filterArgs =
wrappedFilterArgs.computeIfAbsent(filterName, k -> new HashMap<>());
wrappedFilterArgs.computeIfAbsent(filterName, _ -> new HashMap<>());
String argKey = splitKey.get(1);
filterArgs.put(
argKey, argValue); // argKey is guaranteed unique, don't need to check for duplicates
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -121,8 +121,8 @@ public void testMaxPrologueNeeded() throws Exception {
switch (firstWord) {
case "SET":
case "FLAG":
local.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
global.computeIfAbsent(firstWord, (k) -> new ArrayList<>()).add(is.position());
local.computeIfAbsent(firstWord, (_) -> new ArrayList<>()).add(is.position());
global.computeIfAbsent(firstWord, (_) -> new ArrayList<>()).add(is.position());
break;
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -231,7 +231,7 @@ public Query rewrite(IndexSearcher indexSearcher) throws IOException {
ScoreTerm st = q.pop();
if (st != null) {
ArrayList<ScoreTerm> l =
variantQueries.computeIfAbsent(st.fuzziedSourceTerm, k -> new ArrayList<>());
variantQueries.computeIfAbsent(st.fuzziedSourceTerm, _ -> new ArrayList<>());
l.add(st);
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -386,7 +386,7 @@ private void combineSegmentsFields(
segmentTerms.fieldTermStatesMap.entrySet()) {
List<SegmentPostings> segmentPostingsList =
fieldPostingsMap.computeIfAbsent(
fieldTermState.getKey(), k -> new ArrayList<>(groupedSegmentTerms.size()));
fieldTermState.getKey(), _ -> new ArrayList<>(groupedSegmentTerms.size()));
segmentPostingsList.add(
new SegmentPostings(
segmentTerms.segmentIndex,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ public void addTerm(Term term, int docIDUpto) {
void addNumericUpdate(NumericDocValuesUpdate update, int docIDUpto) {
FieldUpdatesBuffer buffer =
fieldUpdates.computeIfAbsent(
update.field, k -> new FieldUpdatesBuffer(fieldUpdatesBytesUsed, update, docIDUpto));
update.field, _ -> new FieldUpdatesBuffer(fieldUpdatesBytesUsed, update, docIDUpto));
if (update.hasValue) {
buffer.addUpdate(update.term, update.getValue(), docIDUpto);
} else {
Expand All @@ -146,7 +146,7 @@ void addNumericUpdate(NumericDocValuesUpdate update, int docIDUpto) {
void addBinaryUpdate(BinaryDocValuesUpdate update, int docIDUpto) {
FieldUpdatesBuffer buffer =
fieldUpdates.computeIfAbsent(
update.field, k -> new FieldUpdatesBuffer(fieldUpdatesBytesUsed, update, docIDUpto));
update.field, _ -> new FieldUpdatesBuffer(fieldUpdatesBytesUsed, update, docIDUpto));
if (update.hasValue) {
buffer.addUpdate(update.term, update.getValue(), docIDUpto);
} else {
Expand Down Expand Up @@ -211,7 +211,7 @@ void put(Term term, int value) {
BytesRefIntMap hash =
deleteTerms.computeIfAbsent(
term.field,
k -> {
_ -> {
bytesUsed.addAndGet(RamUsageEstimator.sizeOf(term.field));
return new BytesRefIntMap(pool, bytesUsed);
});
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -215,7 +215,7 @@ synchronized void abort() throws IOException {
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "abort");
}
for (final DocumentsWriterPerThread perThread : perThreadPool.filterAndLock(x -> true)) {
for (final DocumentsWriterPerThread perThread : perThreadPool.filterAndLock(_ -> true)) {
try {
abortDocumentsWriterPerThread(perThread);
} finally {
Expand Down Expand Up @@ -291,7 +291,7 @@ synchronized Closeable lockAndAbortAll() throws IOException {
try {
deleteQueue.clear();
perThreadPool.lockNewWriters();
writers.addAll(perThreadPool.filterAndLock(x -> true));
writers.addAll(perThreadPool.filterAndLock(_ -> true));
for (final DocumentsWriterPerThread perThread : writers) {
assert perThread.isHeldByCurrentThread();
abortDocumentsWriterPerThread(perThread);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -177,7 +177,7 @@ public void addClosedListener(ClosedListener listener) {
// here we wrap the listener and call it with our cache key
// this is important since this key will be used to cache the reader and otherwise we won't
// free caches etc.
delegate.addClosedListener(unused -> listener.onClose(cacheKey));
delegate.addClosedListener(_ -> listener.onClose(cacheKey));
}
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ public FrozenBufferedUpdates(
: "segment private packet should only have del queries";

PrefixCodedTerms.Builder builder = new PrefixCodedTerms.Builder();
updates.deleteTerms.forEachOrdered((term, doc) -> builder.add(term));
updates.deleteTerms.forEachOrdered((term, _) -> builder.add(term));
deleteTerms = builder.finish();

deleteQueries = new Query[updates.deleteQueries.size()];
Expand Down
6 changes: 3 additions & 3 deletions lucene/core/src/java/org/apache/lucene/index/IndexWriter.java
Original file line number Diff line number Diff line change
Expand Up @@ -3333,7 +3333,7 @@ public void abortPendingMerges() throws IOException {
infoStream.message("IW", "now abort pending addIndexes merge");
}
merge.setAborted();
merge.close(false, false, mr -> {});
merge.close(false, false, _ -> {});
onMergeFinished(merge);
});
pendingAddIndexesMerges.clear();
Expand All @@ -3350,7 +3350,7 @@ public void merge(MergePolicy.OneMerge merge) throws IOException {
handleMergeException(t, merge);
} finally {
synchronized (IndexWriter.this) {
merge.close(success, false, mr -> {});
merge.close(success, false, _ -> {});
onMergeFinished(merge);
}
}
Expand Down Expand Up @@ -3731,7 +3731,7 @@ private long prepareCommitInternal() throws IOException {
// necessary files to disk and checkpointed them.
pointInTimeMerges =
preparePointInTimeMerge(
toCommit, stopAddingMergedSegments::get, MergeTrigger.COMMIT, sci -> {});
toCommit, stopAddingMergedSegments::get, MergeTrigger.COMMIT, _ -> {});
}
}
success = true;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -147,7 +147,7 @@ public synchronized void addDVUpdate(DocValuesFieldUpdates update) throws IOExce
throw new IllegalArgumentException("call finish first");
}
List<DocValuesFieldUpdates> fieldUpdates =
pendingDVUpdates.computeIfAbsent(update.field, key -> new ArrayList<>());
pendingDVUpdates.computeIfAbsent(update.field, _ -> new ArrayList<>());
assert assertNoDupGen(fieldUpdates, update);

ramBytesUsed.addAndGet(update.ramBytesUsed());
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -810,7 +810,7 @@ private synchronized <T> T getOrCreate(String field, boolean norms, IOSupplier<T
private boolean assertCreatedOnlyOnce(String field, boolean norms) {
assert Thread.holdsLock(this);
// this is mainly there to make sure we change anything in the way we merge we realize it early
int timesCached = cacheStats.compute(field + "N:" + norms, (s, i) -> i == null ? 1 : i + 1);
int timesCached = cacheStats.compute(field + "N:" + norms, (_, i) -> i == null ? 1 : i + 1);
if (timesCached > 1) {
assert norms == false : "[" + field + "] norms must not be cached twice";
boolean isSortField = false;
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -64,7 +64,7 @@ abstract class AbstractVectorSimilarityQuery extends Query {
}

protected KnnCollectorManager getKnnCollectorManager() {
return (visitLimit, searchStrategy, context) ->
return (visitLimit, _, _) ->
new VectorSimilarityCollector(traversalSimilarity, resultSimilarity, visitLimit);
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ protected DocIdStream() {}
/** Count the number of entries in this stream. This is a terminal operation. */
public int count() throws IOException {
int[] count = new int[1];
forEach(doc -> count[0]++);
forEach(_ -> count[0]++);
return count[0];
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@
public class RegexpQuery extends AutomatonQuery {

/** A provider that provides no named automata */
public static final AutomatonProvider DEFAULT_PROVIDER = name -> null;
public static final AutomatonProvider DEFAULT_PROVIDER = _ -> null;

/**
* Constructs a query for terms matching <code>term</code>.
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -589,7 +589,7 @@ private LinkedHashMap<Term, Integer> repeatingTerms() {
HashMap<Term, Integer> tcnt = new HashMap<>();
for (PhrasePositions pp : phrasePositions) {
for (Term t : pp.terms) {
Integer cnt = tcnt.compute(t, (key, old) -> old == null ? 1 : 1 + old);
Integer cnt = tcnt.compute(t, (_, old) -> old == null ? 1 : 1 + old);
if (cnt == 2) {
tord.put(t, tord.size());
}
Expand Down
2 changes: 1 addition & 1 deletion lucene/core/src/java/org/apache/lucene/search/TopDocs.java
Original file line number Diff line number Diff line change
Expand Up @@ -405,7 +405,7 @@ public static TopDocs rrf(int topN, int k, TopDocs[] hits) {
double rrfScoreContribution = 1d / Math.addExact(k, rank);
rrfScore.compute(
new ShardIndexAndDoc(scoreDoc.shardIndex, scoreDoc.doc),
(key, score) -> (score == null ? 0 : score) + rrfScoreContribution);
(_, score) -> (score == null ? 0 : score) + rrfScoreContribution);
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ public final class ByteBuffersDataOutput extends DataOutput implements Accountab

/** A singleton instance of "no-reuse" buffer strategy. */
public static final Consumer<ByteBuffer> NO_REUSE =
(bb) -> {
(_) -> {
throw new RuntimeException("reset() is not allowed on this buffer.");
};

Expand Down
Loading

0 comments on commit ab453b9

Please sign in to comment.