Commit ee829612 authored by Hilko Bengen's avatar Hilko Bengen

Imported Upstream version 4.10.3+dfsg

parent e795e8af
......@@ -3,6 +3,53 @@ Lucene Change Log
For more information on past and future Lucene versions, please see:
http://s.apache.org/luceneversions
======================= Lucene 4.10.3 ======================
Bug fixes
* LUCENE-6046: Add maxDeterminizedStates safety to determinize (which has
an exponential worst case) so that if it would create too many states, it
now throws an exception instead of exhausting CPU/RAM. (Nik
Everett via Mike McCandless)
* LUCENE-6054: Allow repeating the empty automaton (Nik Everett via
Mike McCandless)
* LUCENE-6049: Don't throw cryptic exception writing a segment when
the only docs in it had fields that hit non-aborting exceptions
during indexing but also had doc values. (Mike McCandless)
* LUCENE-6060: Deprecate IndexWriter.unlock (Simon Willnauer, Mike
McCandless)
* LUCENE-3229: Overlapping ordered SpanNearQuery spans should not match.
(Ludovic Boutros, Paul Elschot, Greg Dearing, ehatcher)
* LUCENE-6004: Don't highlight the LookupResult.key returned from
AnalyzingInfixSuggester (Christian Reuschling, jane chang via Mike McCandless)
* LUCENE-6075: Don't overflow int in SimpleRateLimiter (Boaz Leskes
via Mike McCandless)
* LUCENE-5980: Don't let document length overflow. (Robert Muir)
* LUCENE-6042: CustomScoreQuery explain was incorrect in some cases,
such as when nested inside a boolean query. (Denis Lantsman via Robert Muir)
* LUCENE-5948: RateLimiter now fully inits itself on init. (Varun
Thacker via Mike McCandless)
* LUCENE-6055: PayloadAttribute.clone() now does a deep clone of the underlying
bytes. (Shai Erera)
* LUCENE-6094: Allow IW.rollback to stop ConcurrentMergeScheduler even
when it's stalling because there are too many merges. (Mike McCandless)
Documentation
* LUCENE-6057: Improve Sort(SortField) docs (Martin Braun via Mike McCandless)
======================= Lucene 4.10.2 ======================
Bug fixes
......
......@@ -639,3 +639,15 @@ together by Lucene.
PayloadAttribute's name is unchanged, it just uses the BytesRef
class to refer to the payload bytes/start offset/end offset
(or null if there is no payload).
Bugs fixed in several ValueSource functions may result in different behavior in
situations where some documents do not have values for fields wrapped in other
ValueSources. Users who want to preserve the previous behavior may need to wrap
their ValueSources in a "DefFunction" along with a ConstValueSource of "0.0".
## PayloadAttributeImpl.clone() (LUCENE-6055)
PayloadAttributeImpl.clone() did a shallow clone which was incorrect, and was
fixed to do a deep clone. If you require shallow cloning of the underlying bytes,
you should override PayloadAttributeImpl.clone() to do a shallow clone instead.
......@@ -29,7 +29,7 @@
<!-- !!! RELEASE MANAGER: Change version numbers only here: !!! -->
<!-- The base version of the next release (including bugfix number, e.g., x.y.z+): -->
<property name="version.base" value="4.10.2"/>
<property name="version.base" value="4.10.3"/>
<!-- !!! RELEASE MANAGER: Don't change anything after this line! !!! -->
......
......@@ -88,7 +88,6 @@ public class CharTermAttributeImpl extends AttributeImpl implements CharTermAttr
@Override
public void fillBytesRef() {
bytes.copyChars(termBuffer, 0, termLength);
bytes.get();
}
@Override
......
......@@ -55,7 +55,7 @@ public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttrib
public PayloadAttributeImpl clone() {
PayloadAttributeImpl clone = (PayloadAttributeImpl) super.clone();
if (payload != null) {
clone.payload = payload.clone();
clone.payload = BytesRef.deepCopyOf(payload);
}
return clone;
}
......@@ -86,7 +86,7 @@ public class PayloadAttributeImpl extends AttributeImpl implements PayloadAttrib
@Override
public void copyTo(AttributeImpl target) {
PayloadAttribute t = (PayloadAttribute) target;
t.setPayload((payload == null) ? null : payload.clone());
t.setPayload((payload == null) ? null : BytesRef.deepCopyOf(payload));
}
......
......@@ -300,8 +300,11 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
protected synchronized int mergeThreadCount() {
int count = 0;
for (MergeThread mt : mergeThreads) {
if (mt.isAlive() && mt.getCurrentMerge() != null) {
count++;
if (mt.isAlive()) {
MergePolicy.OneMerge merge = mt.getCurrentMerge();
if (merge != null && merge.isAborted() == false) {
count++;
}
}
}
return count;
......@@ -350,7 +353,8 @@ public class ConcurrentMergeScheduler extends MergeScheduler {
message(" too many merges; stalling...");
}
try {
wait();
// Only wait 0.25 seconds, so if all merges are aborted (by IW.rollback) we notice:
wait(250);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
......
......@@ -647,6 +647,10 @@ final class DefaultIndexingChain extends DocConsumer {
invertState.lastStartOffset = startOffset;
}
invertState.length++;
if (invertState.length < 0) {
throw new IllegalArgumentException("too many tokens in field '" + field.name() + "'");
}
//System.out.println(" term=" + invertState.termAttribute);
// If we hit an exception in here, we abort
......@@ -658,8 +662,6 @@ final class DefaultIndexingChain extends DocConsumer {
aborting = true;
termsHashPerField.add();
aborting = false;
invertState.length++;
}
// trigger streams to perform end-of-stream operations
......
......@@ -302,12 +302,6 @@ final class DocumentsWriter implements Closeable, Accountable {
}
boolean anyChanges() {
if (infoStream.isEnabled("DW")) {
infoStream.message("DW", "anyChanges? numDocsInRam=" + numDocsInRAM.get()
+ " deletes=" + anyDeletions() + " hasTickets:"
+ ticketQueue.hasTickets() + " pendingChangesInFullFlush: "
+ pendingChangesInCurrentFullFlush);
}
/*
* changes are either in a DWPT or in the deleteQueue.
* yet if we currently flush deletes and / or dwpt there
......@@ -315,7 +309,16 @@ final class DocumentsWriter implements Closeable, Accountable {
* before they are published to the IW. ie we need to check if the
* ticket queue has any tickets.
*/
return numDocsInRAM.get() != 0 || anyDeletions() || ticketQueue.hasTickets() || pendingChangesInCurrentFullFlush;
boolean anyChanges = numDocsInRAM.get() != 0 || anyDeletions() || ticketQueue.hasTickets() || pendingChangesInCurrentFullFlush;
if (infoStream.isEnabled("DW")) {
if (anyChanges) {
infoStream.message("DW", "anyChanges? numDocsInRam=" + numDocsInRAM.get()
+ " deletes=" + anyDeletions() + " hasTickets:"
+ ticketQueue.hasTickets() + " pendingChangesInFullFlush: "
+ pendingChangesInCurrentFullFlush);
}
}
return anyChanges;
}
public int getBufferedDeleteTermsSize() {
......
......@@ -204,7 +204,7 @@ public final class FieldInfo {
}
void setDocValuesType(DocValuesType type) {
if (docValueType != null && docValueType != type) {
if (docValueType != null && type != null && docValueType != type) {
throw new IllegalArgumentException("cannot change DocValues type from " + docValueType + " to " + type + " for field \"" + name + "\"");
}
docValueType = type;
......
......@@ -294,7 +294,7 @@ public class FieldInfos implements Iterable<FieldInfo> {
// be updated by maybe FreqProxTermsWriterPerField:
return addOrUpdateInternal(name, -1, fieldType.indexed(), false,
fieldType.omitNorms(), false,
fieldType.indexOptions(), fieldType.docValueType(), null);
fieldType.indexOptions(), null, null);
}
private FieldInfo addOrUpdateInternal(String name, int preferredFieldNumber, boolean isIndexed,
......
......@@ -2268,7 +2268,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
/** Aborts running merges. Be careful when using this
* method: when you abort a long-running merge, you lose
* a lot of work that must later be redone. */
* a lot of work that must later be redone.
*
* @deprecated This will be removed in 5.0 */
@Deprecated
public synchronized void abortMerges() {
stopMerges = true;
......@@ -2316,7 +2319,11 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
*
* <p>It is guaranteed that any merges started prior to calling this method
* will have completed once this method completes.</p>
*
* @deprecated This will be removed in Lucene 5.x. Interact with {@link ConcurrentMergeScheduler} if you really must know the specific
* timing of merges.
*/
@Deprecated
public void waitForMerges() throws IOException {
// Give merge scheduler last chance to run, in case
......@@ -4465,7 +4472,9 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
synchronized(this) {
assert lastCommitChangeCount <= changeCount: "lastCommitChangeCount=" + lastCommitChangeCount + " changeCount=" + changeCount;
if (lastCommitChangeCount > changeCount) {
throw new IllegalStateException("lastCommitChangeCount=" + lastCommitChangeCount + ",changeCount=" + changeCount);
}
if (pendingCommitChangeCount == lastCommitChangeCount) {
if (infoStream.isEnabled("IW")) {
......@@ -4573,7 +4582,10 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
* Caution: this should only be used by failure recovery code,
* when it is known that no other process nor thread is in fact
* currently accessing this index.
*
* @deprecated This method is very dangerous and will be removed in Lucene 5.0
*/
@Deprecated
public static void unlock(Directory directory) throws IOException {
directory.makeLock(IndexWriter.WRITE_LOCK_NAME).close();
}
......@@ -4651,10 +4663,13 @@ public class IndexWriter implements Closeable, TwoPhaseCommit, Accountable {
synchronized boolean nrtIsCurrent(SegmentInfos infos) {
//System.out.println("IW.nrtIsCurrent " + (infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedDeletesStream.any()));
ensureOpen();
boolean isCurrent = infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedUpdatesStream.any();
if (infoStream.isEnabled("IW")) {
infoStream.message("IW", "nrtIsCurrent: infoVersion matches: " + (infos.version == segmentInfos.version) + "; DW changes: " + docWriter.anyChanges() + "; BD changes: "+ bufferedUpdatesStream.any());
if (isCurrent == false) {
infoStream.message("IW", "nrtIsCurrent: infoVersion matches: " + (infos.version == segmentInfos.version) + "; DW changes: " + docWriter.anyChanges() + "; BD changes: "+ bufferedUpdatesStream.any());
}
}
return infos.version == segmentInfos.version && !docWriter.anyChanges() && !bufferedUpdatesStream.any();
return isCurrent;
}
synchronized boolean isClosed() {
......
......@@ -204,8 +204,8 @@ public abstract class MergePolicy {
while (paused) {
try {
// In theory we could wait() indefinitely, but we
// do 1000 msec, defensively
wait(1000);
// do 250 msec, defensively
wait(250);
} catch (InterruptedException ie) {
throw new RuntimeException(ie);
}
......
......@@ -20,8 +20,8 @@ package org.apache.lucene.index;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Map;
import java.util.NoSuchElementException;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
......@@ -33,6 +33,7 @@ import org.apache.lucene.codecs.FieldInfosFormat;
import org.apache.lucene.codecs.LiveDocsFormat;
import org.apache.lucene.document.BinaryDocValuesField;
import org.apache.lucene.document.NumericDocValuesField;
import org.apache.lucene.index.FieldInfo.DocValuesType;
import org.apache.lucene.store.Directory;
import org.apache.lucene.store.FlushInfo;
import org.apache.lucene.store.IOContext;
......@@ -551,11 +552,13 @@ class ReadersAndUpdates {
}
// create new fields or update existing ones to have NumericDV type
for (String f : dvUpdates.numericDVUpdates.keySet()) {
builder.addOrUpdate(f, NumericDocValuesField.TYPE);
FieldInfo fieldInfo = builder.addOrUpdate(f, NumericDocValuesField.TYPE);
fieldInfo.setDocValuesType(DocValuesType.NUMERIC);
}
// create new fields or update existing ones to have BinaryDV type
for (String f : dvUpdates.binaryDVUpdates.keySet()) {
builder.addOrUpdate(f, BinaryDocValuesField.TYPE);
FieldInfo fieldInfo = builder.addOrUpdate(f, BinaryDocValuesField.TYPE);
fieldInfo.setDocValuesType(DocValuesType.BINARY);
}
fieldInfos = builder.finish();
......
......@@ -26,6 +26,7 @@ import org.apache.lucene.util.AttributeSource;
import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.CompiledAutomaton;
import org.apache.lucene.util.automaton.Operations;
/**
* A {@link Query} that will match terms against a finite-state machine.
......@@ -61,10 +62,26 @@ public class AutomatonQuery extends MultiTermQuery {
* match.
*/
public AutomatonQuery(final Term term, Automaton automaton) {
this(term, automaton, Operations.DEFAULT_MAX_DETERMINIZED_STATES);
}
/**
* Create a new AutomatonQuery from an {@link Automaton}.
*
* @param term Term containing field and possibly some pattern structure. The
* term text is ignored.
* @param automaton Automaton to run, terms that are accepted are considered a
* match.
* @param maxDeterminizedStates maximum number of states in the resulting
* automata. If the automata would need more than this many states
* TooComplextToDeterminizeException is thrown. Higher number require more
* space but can process more complex automata.
*/
public AutomatonQuery(final Term term, Automaton automaton, int maxDeterminizedStates) {
super(term.field());
this.term = term;
this.automaton = automaton;
this.compiled = new CompiledAutomaton(automaton);
this.compiled = new CompiledAutomaton(automaton, null, true, maxDeterminizedStates);
}
@Override
......
package org.apache.lucene.search;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.AutomatonProvider;
import org.apache.lucene.util.automaton.RegExp;
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
......@@ -23,6 +17,13 @@ import org.apache.lucene.util.automaton.RegExp;
* limitations under the License.
*/
import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.AutomatonProvider;
import org.apache.lucene.util.automaton.Operations;
import org.apache.lucene.util.automaton.RegExp;
/**
* A fast regular expression query based on the
* {@link org.apache.lucene.util.automaton} package.
......@@ -75,18 +76,38 @@ public class RegexpQuery extends AutomatonQuery {
* @param flags optional RegExp features from {@link RegExp}
*/
public RegexpQuery(Term term, int flags) {
this(term, flags, defaultProvider);
this(term, flags, defaultProvider,
Operations.DEFAULT_MAX_DETERMINIZED_STATES);
}
/**
* Constructs a query for terms matching <code>term</code>.
*
* @param term regular expression.
* @param flags optional RegExp features from {@link RegExp}
* @param maxDeterminizedStates maximum number of states that compiling the
* automaton for the regexp can result in. Set higher to allow more complex
* queries and lower to prevent memory exhaustion.
*/
public RegexpQuery(Term term, int flags, int maxDeterminizedStates) {
this(term, flags, defaultProvider, maxDeterminizedStates);
}
/**
* Constructs a query for terms matching <code>term</code>.
*
* @param term regular expression.
* @param flags optional RegExp features from {@link RegExp}
* @param provider custom AutomatonProvider for named automata
* @param maxDeterminizedStates maximum number of states that compiling the
* automaton for the regexp can result in. Set higher to allow more complex
* queries and lower to prevent memory exhaustion.
*/
public RegexpQuery(Term term, int flags, AutomatonProvider provider) {
super(term, new RegExp(term.text(), flags).toAutomaton(provider));
public RegexpQuery(Term term, int flags, AutomatonProvider provider,
int maxDeterminizedStates) {
super(term,
new RegExp(term.text(), flags).toAutomaton(
provider, maxDeterminizedStates), maxDeterminizedStates);
}
/** Prints a user-readable version of this query. */
......
......@@ -127,7 +127,11 @@ public class Sort {
setSort(field);
}
/** Sorts in succession by the criteria in each SortField. */
/** Sets the sort to the given criteria in succession: the
* first SortField is checked first, but if it produces a
* tie, then the second SortField is used to break the tie,
* etc. Finally, if there is still a tie after all SortFields
* are checked, the internal Lucene docid is used to break it. */
public Sort(SortField... fields) {
setSort(fields);
}
......@@ -137,7 +141,11 @@ public class Sort {
this.fields = new SortField[] { field };
}
/** Sets the sort to the given criteria in succession. */
/** Sets the sort to the given criteria in succession: the
* first SortField is checked first, but if it produces a
* tie, then the second SortField is used to break the tie,
* etc. Finally, if there is still a tie after all SortFields
* are checked, the internal Lucene docid is used to break it. */
public void setSort(SortField... fields) {
this.fields = fields;
}
......
......@@ -23,8 +23,8 @@ import java.util.List;
import org.apache.lucene.index.Term;
import org.apache.lucene.util.ToStringUtils;
import org.apache.lucene.util.automaton.Automata;
import org.apache.lucene.util.automaton.Operations;
import org.apache.lucene.util.automaton.Automaton;
import org.apache.lucene.util.automaton.Operations;
/** Implements the wildcard search query. Supported wildcards are <code>*</code>, which
* matches any character sequence (including the empty one), and <code>?</code>,
......@@ -57,6 +57,17 @@ public class WildcardQuery extends AutomatonQuery {
super(term, toAutomaton(term));
}
/**
* Constructs a query for terms matching <code>term</code>.
* @param maxDeterminizedStates maximum number of states in the resulting
* automata. If the automata would need more than this many states
* TooComplextToDeterminizeException is thrown. Higher number require more
* space but can process more complex automata.
*/
public WildcardQuery(Term term, int maxDeterminizedStates) {
super(term, toAutomaton(term), maxDeterminizedStates);
}
/**
* Convert Lucene wildcard syntax into an automaton.
* @lucene.internal
......
......@@ -767,7 +767,7 @@ public abstract class TFIDFSimilarity extends Similarity {
private Explanation explainScore(int doc, Explanation freq, IDFStats stats, NumericDocValues norms) {
Explanation result = new Explanation();
result.setDescription("score(doc="+doc+",freq="+freq+"), product of:");
result.setDescription("score(doc="+doc+",freq="+freq.getValue()+"), product of:");
// explain query weight
Explanation queryExpl = new Explanation();
......
......@@ -235,24 +235,23 @@ public class NearSpansOrdered extends Spans {
return true;
}
/** Check whether two Spans in the same document are ordered.
* @return true iff spans1 starts before spans2
* or the spans start at the same position,
* and spans1 ends before spans2.
/** Check whether two Spans in the same document are ordered and not overlapping.
* @return false iff spans2's start position is smaller than spans1's end position
*/
static final boolean docSpansOrdered(Spans spans1, Spans spans2) {
static final boolean docSpansOrderedNonOverlap(Spans spans1, Spans spans2) {
assert spans1.doc() == spans2.doc() : "doc1 " + spans1.doc() + " != doc2 " + spans2.doc();
int start1 = spans1.start();
int start2 = spans2.start();
/* Do not call docSpansOrdered(int,int,int,int) to avoid invoking .end() : */
return (start1 == start2) ? (spans1.end() < spans2.end()) : (start1 < start2);
assert spans1.start() < spans1.end();
assert spans2.start() < spans2.end();
return spans1.end() <= spans2.start();
}
/** Like {@link #docSpansOrdered(Spans,Spans)}, but use the spans
/** Like {@link #docSpansOrderedNonOverlap(Spans,Spans)}, but use the spans
* starts and ends as parameters.
*/
private static final boolean docSpansOrdered(int start1, int end1, int start2, int end2) {
return (start1 == start2) ? (end1 < end2) : (start1 < start2);
private static final boolean docSpansOrderedNonOverlap(int start1, int end1, int start2, int end2) {
assert start1 < end1;
assert start2 < end2;
return end1 <= start2;
}
/** Order the subSpans within the same document by advancing all later spans
......@@ -261,7 +260,7 @@ public class NearSpansOrdered extends Spans {
private boolean stretchToOrder() throws IOException {
matchDoc = subSpans[0].doc();
for (int i = 1; inSameDoc && (i < subSpans.length); i++) {
while (! docSpansOrdered(subSpans[i-1], subSpans[i])) {
while (! docSpansOrderedNonOverlap(subSpans[i-1], subSpans[i])) {
if (! subSpans[i].next()) {
inSameDoc = false;
more = false;
......@@ -313,7 +312,7 @@ public class NearSpansOrdered extends Spans {
} else {
int ppStart = prevSpans.start();
int ppEnd = prevSpans.end(); // Cannot avoid invoking .end()
if (! docSpansOrdered(ppStart, ppEnd, lastStart, lastEnd)) {
if (! docSpansOrderedNonOverlap(ppStart, ppEnd, lastStart, lastEnd)) {
break; // Check remaining subSpans.
} else { // prevSpans still before (lastStart, lastEnd)
prevStart = ppStart;
......
......@@ -63,7 +63,7 @@ public class NearSpansUnordered extends Spans {
@Override
protected final boolean lessThan(SpansCell spans1, SpansCell spans2) {
if (spans1.doc() == spans2.doc()) {
return NearSpansOrdered.docSpansOrdered(spans1, spans2);
return docSpansOrdered(spans1, spans2);
} else {
return spans1.doc() < spans2.doc();
}
......@@ -233,6 +233,18 @@ public class NearSpansUnordered extends Spans {
return more && (atMatch() || next());
}
/** Check whether two Spans in the same document are ordered with possible overlap.
* @return true iff spans1 starts before spans2
* or the spans start at the same position,
* and spans1 ends before spans2.
*/
static final boolean docSpansOrdered(Spans spans1, Spans spans2) {
assert spans1.doc() == spans2.doc() : "doc1 " + spans1.doc() + " != doc2 " + spans2.doc();
int start1 = spans1.start();
int start2 = spans2.start();
return (start1 == start2) ? (spans1.end() < spans2.end()) : (start1 < start2);
}
private SpansCell min() { return queue.top(); }
@Override
......
......@@ -48,12 +48,15 @@ public class SpanNearQuery extends SpanQuery implements Cloneable {
/** Construct a SpanNearQuery. Matches spans matching a span from each
* clause, with up to <code>slop</code> total unmatched positions between
* them. * When <code>inOrder</code> is true, the spans from each clause
* must be * ordered as in <code>clauses</code>.
* them.
* <br>When <code>inOrder</code> is true, the spans from each clause
* must be in the same order as in <code>clauses</code> and must be non-overlapping.
* <br>When <code>inOrder</code> is false, the spans from each clause
* need not be ordered and may overlap.
* @param clauses the clauses to find near each other
* @param slop The slop value
* @param inOrder true if order is important
* */
*/
public SpanNearQuery(SpanQuery[] clauses, int slop, boolean inOrder) {
this(clauses, slop, inOrder, true);
}
......
......@@ -66,6 +66,7 @@ public abstract class RateLimiter {
/** mbPerSec is the MB/sec max IO rate */
public SimpleRateLimiter(double mbPerSec) {
setMbPerSec(mbPerSec);
lastNS = System.nanoTime();
}
/**
......@@ -137,7 +138,17 @@ public abstract class RateLimiter {
// NOTE: except maybe on real-time JVMs, minimum realistic sleep time
// is 1 msec; if you pass just 1 nsec the default impl rounds
// this up to 1 msec:
Thread.sleep((int) (pauseNS/1000000), (int) (pauseNS % 1000000));
int sleepNS;
int sleepMS;
if (pauseNS > 100000L * Integer.MAX_VALUE) {
// Not really practical (sleeping for 25 days) but we shouldn't overflow int:
sleepMS = Integer.MAX_VALUE;
sleepNS = 0;
} else {
sleepMS = (int) (pauseNS/1000000);
sleepNS = (int) (pauseNS % 1000000);
}
Thread.sleep(sleepMS, sleepNS);
} catch (InterruptedException ie) {
throw new ThreadInterruptedException(ie);
}
......
......@@ -116,10 +116,10 @@ public abstract class AttributeImpl implements Cloneable, Attribute {
* Attributes this implementation supports.
*/
public abstract void copyTo(AttributeImpl target);
/**
* Shallow clone. Subclasses must override this if they
* need to clone any members deeply,
* In most cases the clone is, and should be, deep in order to be able to
* properly capture the state of all attributes.
*/
@Override
public AttributeImpl clone() {
......
......@@ -106,9 +106,9 @@ public final class NamedSPILoader<S extends NamedSPILoader.NamedSPI> implements
public S lookup(String name) {
final S service = services.get(name);
if (service != null) return service;
throw new IllegalArgumentException("A SPI class of type "+clazz.getName()+" with name '"+name+"' does not exist. "+
"You need to add the corresponding JAR file supporting this SPI to your classpath."+
"The current classpath supports the following names: "+availableServices());
throw new IllegalArgumentException("An SPI class of type "+clazz.getName()+" with name '"+name+"' does not exist."+
" You need to add the corresponding JAR file supporting this SPI to your classpath."+
" The current classpath supports the following names: "+availableServices());
}
public Set<String> availableServices() {
......
......@@ -140,7 +140,7 @@ public final class SPIClassIterator<S> implements Iterator<Class<? extends S>> {
// don't initialize the class (pass false as 2nd parameter):
return Class.forName(c, false, loader).asSubclass(clazz);
} catch (ClassNotFoundException cnfe) {
throw new ServiceConfigurationError(String.format(Locale.ROOT, "A SPI class of type %s with classname %s does not exist, "+
throw new ServiceConfigurationError(String.format(Locale.ROOT, "An SPI class of type %s with classname %s does not exist, "+
"please fix the file '%s%1$s' in your classpath.", clazz.getName(), c, META_INF_SERVICES));
}
}
......
......@@ -237,9 +237,16 @@ public final class Version {
/**
* Match settings and bugs in Lucene's 4.10.2 release.
* @deprecated Use latest
*/
@Deprecated
public static final Version LUCENE_4_10_2 = new Version(4, 10, 2);
/**
* Match settings and bugs in Lucene's 4.10.2 release.
*/
public static final Version LUCENE_4_10_3 = new Version(4, 10, 3);
/* Add new constants for later versions **here** to respect order! */
// To add a new version:
......@@ -261,7 +268,7 @@ public final class Version {
* some defaults may have changed and may break functionality
* in your application.
*/
public static final Version LATEST = LUCENE_4_10_2;
public static final Version LATEST = LUCENE_4_10_3;
/**
* Constant for backwards compatibility.
......