View Javadoc
1   /*
2    * Copyright (C) 2009 The Guava Authors
3    *
4    * Licensed under the Apache License, Version 2.0 (the "License");
5    * you may not use this file except in compliance with the License.
6    * You may obtain a copy of the License at
7    *
8    * http://www.apache.org/licenses/LICENSE-2.0
9    *
10   * Unless required by applicable law or agreed to in writing, software
11   * distributed under the License is distributed on an "AS IS" BASIS,
12   * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13   * See the License for the specific language governing permissions and
14   * limitations under the License.
15   */
16  
17  package com.google.common.cache;
18  
19  import static com.google.common.base.Preconditions.checkNotNull;
20  import static com.google.common.base.Preconditions.checkState;
21  import static com.google.common.cache.CacheBuilder.NULL_TICKER;
22  import static com.google.common.cache.CacheBuilder.UNSET_INT;
23  import static com.google.common.util.concurrent.Uninterruptibles.getUninterruptibly;
24  import static java.util.concurrent.TimeUnit.NANOSECONDS;
25  
26  import com.google.common.annotations.GwtCompatible;
27  import com.google.common.annotations.GwtIncompatible;
28  import com.google.common.annotations.VisibleForTesting;
29  import com.google.common.base.Equivalence;
30  import com.google.common.base.Function;
31  import com.google.common.base.Stopwatch;
32  import com.google.common.base.Ticker;
33  import com.google.common.cache.AbstractCache.SimpleStatsCounter;
34  import com.google.common.cache.AbstractCache.StatsCounter;
35  import com.google.common.cache.CacheBuilder.NullListener;
36  import com.google.common.cache.CacheBuilder.OneWeigher;
37  import com.google.common.cache.CacheLoader.InvalidCacheLoadException;
38  import com.google.common.cache.CacheLoader.UnsupportedLoadingOperationException;
39  import com.google.common.collect.AbstractSequentialIterator;
40  import com.google.common.collect.ImmutableMap;
41  import com.google.common.collect.Iterators;
42  import com.google.common.collect.Maps;
43  import com.google.common.collect.Sets;
44  import com.google.common.primitives.Ints;
45  import com.google.common.util.concurrent.ExecutionError;
46  import com.google.common.util.concurrent.Futures;
47  import com.google.common.util.concurrent.ListenableFuture;
48  import com.google.common.util.concurrent.ListeningExecutorService;
49  import com.google.common.util.concurrent.MoreExecutors;
50  import com.google.common.util.concurrent.SettableFuture;
51  import com.google.common.util.concurrent.UncheckedExecutionException;
52  import com.google.common.util.concurrent.Uninterruptibles;
53  
54  import java.io.IOException;
55  import java.io.ObjectInputStream;
56  import java.io.Serializable;
57  import java.lang.ref.Reference;
58  import java.lang.ref.ReferenceQueue;
59  import java.lang.ref.SoftReference;
60  import java.lang.ref.WeakReference;
61  import java.util.AbstractCollection;
62  import java.util.AbstractMap;
63  import java.util.AbstractQueue;
64  import java.util.AbstractSet;
65  import java.util.Collection;
66  import java.util.Iterator;
67  import java.util.Map;
68  import java.util.Map.Entry;
69  import java.util.NoSuchElementException;
70  import java.util.Queue;
71  import java.util.Set;
72  import java.util.concurrent.Callable;
73  import java.util.concurrent.ConcurrentLinkedQueue;
74  import java.util.concurrent.ConcurrentMap;
75  import java.util.concurrent.ExecutionException;
76  import java.util.concurrent.TimeUnit;
77  import java.util.concurrent.atomic.AtomicInteger;
78  import java.util.concurrent.atomic.AtomicReferenceArray;
79  import java.util.concurrent.locks.ReentrantLock;
80  import java.util.logging.Level;
81  import java.util.logging.Logger;
82  
83  import javax.annotation.Nullable;
84  import javax.annotation.concurrent.GuardedBy;
85  
86  /**
87   * The concurrent hash map implementation built by {@link CacheBuilder}.
88   *
89   * <p>This implementation is heavily derived from revision 1.96 of <a
90   * href="http://tinyurl.com/ConcurrentHashMap">ConcurrentHashMap.java</a>.
91   *
92   * @author Charles Fry
93   * @author Bob Lee ({@code com.google.common.collect.MapMaker})
94   * @author Doug Lea ({@code ConcurrentHashMap})
95   */
96  @GwtCompatible(emulated = true)
97  class LocalCache<K, V> extends AbstractMap<K, V> implements ConcurrentMap<K, V> {
98  
99    /*
100    * The basic strategy is to subdivide the table among Segments, each of which itself is a
101    * concurrently readable hash table. The map supports non-blocking reads and concurrent writes
102    * across different segments.
103    *
104    * If a maximum size is specified, a best-effort bounding is performed per segment, using a
105    * page-replacement algorithm to determine which entries to evict when the capacity has been
106    * exceeded.
107    *
108    * The page replacement algorithm's data structures are kept casually consistent with the map. The
109    * ordering of writes to a segment is sequentially consistent. An update to the map and recording
110    * of reads may not be immediately reflected on the algorithm's data structures. These structures
111    * are guarded by a lock and operations are applied in batches to avoid lock contention. The
112    * penalty of applying the batches is spread across threads so that the amortized cost is slightly
113    * higher than performing just the operation without enforcing the capacity constraint.
114    *
115    * This implementation uses a per-segment queue to record a memento of the additions, removals,
116    * and accesses that were performed on the map. The queue is drained on writes and when it exceeds
117    * its capacity threshold.
118    *
119    * The Least Recently Used page replacement algorithm was chosen due to its simplicity, high hit
120    * rate, and ability to be implemented with O(1) time complexity. The initial LRU implementation
121    * operates per-segment rather than globally for increased implementation simplicity. We expect
122    * the cache hit rate to be similar to that of a global LRU algorithm.
123    */
124 
125   // Constants
126 
127   /**
128    * The maximum capacity, used if a higher value is implicitly specified by either of the
129    * constructors with arguments. MUST be a power of two <= 1<<30 to ensure that entries are
130    * indexable using ints.
131    */
132   static final int MAXIMUM_CAPACITY = 1 << 30;
133 
134   /** The maximum number of segments to allow; used to bound constructor arguments. */
135   static final int MAX_SEGMENTS = 1 << 16; // slightly conservative
136 
137   /** Number of (unsynchronized) retries in the containsValue method. */
138   static final int CONTAINS_VALUE_RETRIES = 3;
139 
140   /**
141    * Number of cache access operations that can be buffered per segment before the cache's recency
142    * ordering information is updated. This is used to avoid lock contention by recording a memento
143    * of reads and delaying a lock acquisition until the threshold is crossed or a mutation occurs.
144    *
145    * <p>This must be a (2^n)-1 as it is used as a mask.
146    */
147   static final int DRAIN_THRESHOLD = 0x3F;
148 
149   /**
150    * Maximum number of entries to be drained in a single cleanup run. This applies independently to
151    * the cleanup queue and both reference queues.
152    */
153   // TODO(fry): empirically optimize this
154   static final int DRAIN_MAX = 16;
155 
156   // Fields
157 
158   static final Logger logger = Logger.getLogger(LocalCache.class.getName());
159 
160   static final ListeningExecutorService sameThreadExecutor = MoreExecutors.sameThreadExecutor();
161 
162   /**
163    * Mask value for indexing into segments. The upper bits of a key's hash code are used to choose
164    * the segment.
165    */
166   final int segmentMask;
167 
168   /**
169    * Shift value for indexing within segments. Helps prevent entries that end up in the same segment
170    * from also ending up in the same bucket.
171    */
172   final int segmentShift;
173 
174   /** The segments, each of which is a specialized hash table. */
175   final Segment<K, V>[] segments;
176 
177   /** The concurrency level. */
178   final int concurrencyLevel;
179 
180   /** Strategy for comparing keys. */
181   final Equivalence<Object> keyEquivalence;
182 
183   /** Strategy for comparing values. */
184   final Equivalence<Object> valueEquivalence;
185 
186   /** Strategy for referencing keys. */
187   final Strength keyStrength;
188 
189   /** Strategy for referencing values. */
190   final Strength valueStrength;
191 
192   /** The maximum weight of this map. UNSET_INT if there is no maximum. */
193   final long maxWeight;
194 
195   /** Weigher to weigh cache entries. */
196   final Weigher<K, V> weigher;
197 
198   /** How long after the last access to an entry the map will retain that entry. */
199   final long expireAfterAccessNanos;
200 
201   /** How long after the last write to an entry the map will retain that entry. */
202   final long expireAfterWriteNanos;
203 
204   /** How long after the last write an entry becomes a candidate for refresh. */
205   final long refreshNanos;
206 
207   /** Entries waiting to be consumed by the removal listener. */
208   // TODO(fry): define a new type which creates event objects and automates the clear logic
209   final Queue<RemovalNotification<K, V>> removalNotificationQueue;
210 
211   /**
212    * A listener that is invoked when an entry is removed due to expiration or garbage collection of
213    * soft/weak entries.
214    */
215   final RemovalListener<K, V> removalListener;
216 
217   /** Measures time in a testable way. */
218   final Ticker ticker;
219 
220   /** Factory used to create new entries. */
221   final EntryFactory entryFactory;
222 
223   /**
224    * Accumulates global cache statistics. Note that there are also per-segments stats counters
225    * which must be aggregated to obtain a global stats view.
226    */
227   final StatsCounter globalStatsCounter;
228 
229   /**
230    * The default cache loader to use on loading operations.
231    */
232   @Nullable
233   final CacheLoader<? super K, V> defaultLoader;
234 
235   /**
236    * Creates a new, empty map with the specified strategy, initial capacity and concurrency level.
237    */
238   LocalCache(
239       CacheBuilder<? super K, ? super V> builder, @Nullable CacheLoader<? super K, V> loader) {
240     concurrencyLevel = Math.min(builder.getConcurrencyLevel(), MAX_SEGMENTS);
241 
242     keyStrength = builder.getKeyStrength();
243     valueStrength = builder.getValueStrength();
244 
245     keyEquivalence = builder.getKeyEquivalence();
246     valueEquivalence = builder.getValueEquivalence();
247 
248     maxWeight = builder.getMaximumWeight();
249     weigher = builder.getWeigher();
250     expireAfterAccessNanos = builder.getExpireAfterAccessNanos();
251     expireAfterWriteNanos = builder.getExpireAfterWriteNanos();
252     refreshNanos = builder.getRefreshNanos();
253 
254     removalListener = builder.getRemovalListener();
255     removalNotificationQueue = (removalListener == NullListener.INSTANCE)
256         ? LocalCache.<RemovalNotification<K, V>>discardingQueue()
257         : new ConcurrentLinkedQueue<RemovalNotification<K, V>>();
258 
259     ticker = builder.getTicker(recordsTime());
260     entryFactory = EntryFactory.getFactory(keyStrength, usesAccessEntries(), usesWriteEntries());
261     globalStatsCounter = builder.getStatsCounterSupplier().get();
262     defaultLoader = loader;
263 
264     int initialCapacity = Math.min(builder.getInitialCapacity(), MAXIMUM_CAPACITY);
265     if (evictsBySize() && !customWeigher()) {
266       initialCapacity = Math.min(initialCapacity, (int) maxWeight);
267     }
268 
269     // Find the lowest power-of-two segmentCount that exceeds concurrencyLevel, unless
270     // maximumSize/Weight is specified in which case ensure that each segment gets at least 10
271     // entries. The special casing for size-based eviction is only necessary because that eviction
272     // happens per segment instead of globally, so too many segments compared to the maximum size
273     // will result in random eviction behavior.
274     int segmentShift = 0;
275     int segmentCount = 1;
276     while (segmentCount < concurrencyLevel
277            && (!evictsBySize() || segmentCount * 20 <= maxWeight)) {
278       ++segmentShift;
279       segmentCount <<= 1;
280     }
281     this.segmentShift = 32 - segmentShift;
282     segmentMask = segmentCount - 1;
283 
284     this.segments = newSegmentArray(segmentCount);
285 
286     int segmentCapacity = initialCapacity / segmentCount;
287     if (segmentCapacity * segmentCount < initialCapacity) {
288       ++segmentCapacity;
289     }
290 
291     int segmentSize = 1;
292     while (segmentSize < segmentCapacity) {
293       segmentSize <<= 1;
294     }
295 
296     if (evictsBySize()) {
297       // Ensure sum of segment max weights = overall max weights
298       long maxSegmentWeight = maxWeight / segmentCount + 1;
299       long remainder = maxWeight % segmentCount;
300       for (int i = 0; i < this.segments.length; ++i) {
301         if (i == remainder) {
302           maxSegmentWeight--;
303         }
304         this.segments[i] =
305             createSegment(segmentSize, maxSegmentWeight, builder.getStatsCounterSupplier().get());
306       }
307     } else {
308       for (int i = 0; i < this.segments.length; ++i) {
309         this.segments[i] =
310             createSegment(segmentSize, UNSET_INT, builder.getStatsCounterSupplier().get());
311       }
312     }
313   }
314 
315   boolean evictsBySize() {
316     return maxWeight >= 0;
317   }
318 
319   boolean customWeigher() {
320     return weigher != OneWeigher.INSTANCE;
321   }
322 
323   boolean expires() {
324     return expiresAfterWrite() || expiresAfterAccess();
325   }
326 
327   boolean expiresAfterWrite() {
328     return expireAfterWriteNanos > 0;
329   }
330 
331   boolean expiresAfterAccess() {
332     return expireAfterAccessNanos > 0;
333   }
334 
335   boolean refreshes() {
336     return refreshNanos > 0;
337   }
338 
339   boolean usesAccessQueue() {
340     return expiresAfterAccess() || evictsBySize();
341   }
342 
343   boolean usesWriteQueue() {
344     return expiresAfterWrite();
345   }
346 
347   boolean recordsWrite() {
348     return expiresAfterWrite() || refreshes();
349   }
350 
351   boolean recordsAccess() {
352     return expiresAfterAccess();
353   }
354 
355   boolean recordsTime() {
356     return recordsWrite() || recordsAccess();
357   }
358 
359   boolean usesWriteEntries() {
360     return usesWriteQueue() || recordsWrite();
361   }
362 
363   boolean usesAccessEntries() {
364     return usesAccessQueue() || recordsAccess();
365   }
366 
367   boolean usesKeyReferences() {
368     return keyStrength != Strength.STRONG;
369   }
370 
371   boolean usesValueReferences() {
372     return valueStrength != Strength.STRONG;
373   }
374 
375   enum Strength {
376     /*
377      * TODO(kevinb): If we strongly reference the value and aren't loading, we needn't wrap the
378      * value. This could save ~8 bytes per entry.
379      */
380 
381     STRONG {
382       @Override
383       <K, V> ValueReference<K, V> referenceValue(
384           Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
385         return (weight == 1)
386             ? new StrongValueReference<K, V>(value)
387             : new WeightedStrongValueReference<K, V>(value, weight);
388       }
389 
390       @Override
391       Equivalence<Object> defaultEquivalence() {
392         return Equivalence.equals();
393       }
394     },
395 
396     SOFT {
397       @Override
398       <K, V> ValueReference<K, V> referenceValue(
399           Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
400         return (weight == 1)
401             ? new SoftValueReference<K, V>(segment.valueReferenceQueue, value, entry)
402             : new WeightedSoftValueReference<K, V>(
403                 segment.valueReferenceQueue, value, entry, weight);
404       }
405 
406       @Override
407       Equivalence<Object> defaultEquivalence() {
408         return Equivalence.identity();
409       }
410     },
411 
412     WEAK {
413       @Override
414       <K, V> ValueReference<K, V> referenceValue(
415           Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight) {
416         return (weight == 1)
417             ? new WeakValueReference<K, V>(segment.valueReferenceQueue, value, entry)
418             : new WeightedWeakValueReference<K, V>(
419                 segment.valueReferenceQueue, value, entry, weight);
420       }
421 
422       @Override
423       Equivalence<Object> defaultEquivalence() {
424         return Equivalence.identity();
425       }
426     };
427 
428     /**
429      * Creates a reference for the given value according to this value strength.
430      */
431     abstract <K, V> ValueReference<K, V> referenceValue(
432         Segment<K, V> segment, ReferenceEntry<K, V> entry, V value, int weight);
433 
434     /**
435      * Returns the default equivalence strategy used to compare and hash keys or values referenced
436      * at this strength. This strategy will be used unless the user explicitly specifies an
437      * alternate strategy.
438      */
439     abstract Equivalence<Object> defaultEquivalence();
440   }
441 
442   /**
443    * Creates new entries.
444    */
445   enum EntryFactory {
446     STRONG {
447       @Override
448       <K, V> ReferenceEntry<K, V> newEntry(
449           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
450         return new StrongEntry<K, V>(key, hash, next);
451       }
452     },
453     STRONG_ACCESS {
454       @Override
455       <K, V> ReferenceEntry<K, V> newEntry(
456           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
457         return new StrongAccessEntry<K, V>(key, hash, next);
458       }
459 
460       @Override
461       <K, V> ReferenceEntry<K, V> copyEntry(
462           Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
463         ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
464         copyAccessEntry(original, newEntry);
465         return newEntry;
466       }
467     },
468     STRONG_WRITE {
469       @Override
470       <K, V> ReferenceEntry<K, V> newEntry(
471           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
472         return new StrongWriteEntry<K, V>(key, hash, next);
473       }
474 
475       @Override
476       <K, V> ReferenceEntry<K, V> copyEntry(
477           Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
478         ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
479         copyWriteEntry(original, newEntry);
480         return newEntry;
481       }
482     },
483     STRONG_ACCESS_WRITE {
484       @Override
485       <K, V> ReferenceEntry<K, V> newEntry(
486           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
487         return new StrongAccessWriteEntry<K, V>(key, hash, next);
488       }
489 
490       @Override
491       <K, V> ReferenceEntry<K, V> copyEntry(
492           Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
493         ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
494         copyAccessEntry(original, newEntry);
495         copyWriteEntry(original, newEntry);
496         return newEntry;
497       }
498     },
499 
500     WEAK {
501       @Override
502       <K, V> ReferenceEntry<K, V> newEntry(
503           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
504         return new WeakEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
505       }
506     },
507     WEAK_ACCESS {
508       @Override
509       <K, V> ReferenceEntry<K, V> newEntry(
510           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
511         return new WeakAccessEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
512       }
513 
514       @Override
515       <K, V> ReferenceEntry<K, V> copyEntry(
516           Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
517         ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
518         copyAccessEntry(original, newEntry);
519         return newEntry;
520       }
521     },
522     WEAK_WRITE {
523       @Override
524       <K, V> ReferenceEntry<K, V> newEntry(
525           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
526         return new WeakWriteEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
527       }
528 
529       @Override
530       <K, V> ReferenceEntry<K, V> copyEntry(
531           Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
532         ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
533         copyWriteEntry(original, newEntry);
534         return newEntry;
535       }
536     },
537     WEAK_ACCESS_WRITE {
538       @Override
539       <K, V> ReferenceEntry<K, V> newEntry(
540           Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
541         return new WeakAccessWriteEntry<K, V>(segment.keyReferenceQueue, key, hash, next);
542       }
543 
544       @Override
545       <K, V> ReferenceEntry<K, V> copyEntry(
546           Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
547         ReferenceEntry<K, V> newEntry = super.copyEntry(segment, original, newNext);
548         copyAccessEntry(original, newEntry);
549         copyWriteEntry(original, newEntry);
550         return newEntry;
551       }
552     };
553 
554     /**
555      * Masks used to compute indices in the following table.
556      */
557     static final int ACCESS_MASK = 1;
558     static final int WRITE_MASK = 2;
559     static final int WEAK_MASK = 4;
560 
561     /**
562      * Look-up table for factories.
563      */
564     static final EntryFactory[] factories = {
565       STRONG, STRONG_ACCESS, STRONG_WRITE, STRONG_ACCESS_WRITE,
566       WEAK, WEAK_ACCESS, WEAK_WRITE, WEAK_ACCESS_WRITE,
567     };
568 
569     static EntryFactory getFactory(Strength keyStrength, boolean usesAccessQueue,
570         boolean usesWriteQueue) {
571       int flags = ((keyStrength == Strength.WEAK) ? WEAK_MASK : 0)
572           | (usesAccessQueue ? ACCESS_MASK : 0)
573           | (usesWriteQueue ? WRITE_MASK : 0);
574       return factories[flags];
575     }
576 
577     /**
578      * Creates a new entry.
579      *
580      * @param segment to create the entry for
581      * @param key of the entry
582      * @param hash of the key
583      * @param next entry in the same bucket
584      */
585     abstract <K, V> ReferenceEntry<K, V> newEntry(
586         Segment<K, V> segment, K key, int hash, @Nullable ReferenceEntry<K, V> next);
587 
588     /**
589      * Copies an entry, assigning it a new {@code next} entry.
590      *
591      * @param original the entry to copy
592      * @param newNext entry in the same bucket
593      */
594     @GuardedBy("Segment.this")
595     <K, V> ReferenceEntry<K, V> copyEntry(
596         Segment<K, V> segment, ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
597       return newEntry(segment, original.getKey(), original.getHash(), newNext);
598     }
599 
600     @GuardedBy("Segment.this")
601     <K, V> void copyAccessEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) {
602       // TODO(fry): when we link values instead of entries this method can go
603       // away, as can connectAccessOrder, nullifyAccessOrder.
604       newEntry.setAccessTime(original.getAccessTime());
605 
606       connectAccessOrder(original.getPreviousInAccessQueue(), newEntry);
607       connectAccessOrder(newEntry, original.getNextInAccessQueue());
608 
609       nullifyAccessOrder(original);
610     }
611 
612     @GuardedBy("Segment.this")
613     <K, V> void copyWriteEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newEntry) {
614       // TODO(fry): when we link values instead of entries this method can go
615       // away, as can connectWriteOrder, nullifyWriteOrder.
616       newEntry.setWriteTime(original.getWriteTime());
617 
618       connectWriteOrder(original.getPreviousInWriteQueue(), newEntry);
619       connectWriteOrder(newEntry, original.getNextInWriteQueue());
620 
621       nullifyWriteOrder(original);
622     }
623   }
624 
625   /**
626    * A reference to a value.
627    */
628   interface ValueReference<K, V> {
629     /**
630      * Returns the value. Does not block or throw exceptions.
631      */
632     @Nullable
633     V get();
634 
635     /**
636      * Waits for a value that may still be loading. Unlike get(), this method can block (in the
637      * case of FutureValueReference).
638      *
639      * @throws ExecutionException if the loading thread throws an exception
640      * @throws ExecutionError if the loading thread throws an error
641      */
642     V waitForValue() throws ExecutionException;
643 
644     /**
645      * Returns the weight of this entry. This is assumed to be static between calls to setValue.
646      */
647     int getWeight();
648 
649     /**
650      * Returns the entry associated with this value reference, or {@code null} if this value
651      * reference is independent of any entry.
652      */
653     @Nullable
654     ReferenceEntry<K, V> getEntry();
655 
656     /**
657      * Creates a copy of this reference for the given entry.
658      *
659      * <p>{@code value} may be null only for a loading reference.
660      */
661     ValueReference<K, V> copyFor(
662         ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry);
663 
664     /**
665      * Notifify pending loads that a new value was set. This is only relevant to loading
666      * value references.
667      */
668     void notifyNewValue(@Nullable V newValue);
669 
670     /**
671      * Returns true if a new value is currently loading, regardless of whether or not there is an
672      * existing value. It is assumed that the return value of this method is constant for any given
673      * ValueReference instance.
674      */
675     boolean isLoading();
676 
677     /**
678      * Returns true if this reference contains an active value, meaning one that is still considered
679      * present in the cache. Active values consist of live values, which are returned by cache
680      * lookups, and dead values, which have been evicted but awaiting removal. Non-active values
681      * consist strictly of loading values, though during refresh a value may be both active and
682      * loading.
683      */
684     boolean isActive();
685   }
686 
687   /**
688    * Placeholder. Indicates that the value hasn't been set yet.
689    */
690   static final ValueReference<Object, Object> UNSET = new ValueReference<Object, Object>() {
691     @Override
692     public Object get() {
693       return null;
694     }
695 
696     @Override
697     public int getWeight() {
698       return 0;
699     }
700 
701     @Override
702     public ReferenceEntry<Object, Object> getEntry() {
703       return null;
704     }
705 
706     @Override
707     public ValueReference<Object, Object> copyFor(ReferenceQueue<Object> queue,
708         @Nullable Object value, ReferenceEntry<Object, Object> entry) {
709       return this;
710     }
711 
712     @Override
713     public boolean isLoading() {
714       return false;
715     }
716 
717     @Override
718     public boolean isActive() {
719       return false;
720     }
721 
722     @Override
723     public Object waitForValue() {
724       return null;
725     }
726 
727     @Override
728     public void notifyNewValue(Object newValue) {}
729   };
730 
731   /**
732    * Singleton placeholder that indicates a value is being loaded.
733    */
734   @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
735   static <K, V> ValueReference<K, V> unset() {
736     return (ValueReference<K, V>) UNSET;
737   }
738 
739   /**
740    * An entry in a reference map.
741    *
742    * Entries in the map can be in the following states:
743    *
744    * Valid:
745    * - Live: valid key/value are set
746    * - Loading: loading is pending
747    *
748    * Invalid:
749    * - Expired: time expired (key/value may still be set)
750    * - Collected: key/value was partially collected, but not yet cleaned up
751    * - Unset: marked as unset, awaiting cleanup or reuse
752    */
753   interface ReferenceEntry<K, V> {
754     /**
755      * Returns the value reference from this entry.
756      */
757     ValueReference<K, V> getValueReference();
758 
759     /**
760      * Sets the value reference for this entry.
761      */
762     void setValueReference(ValueReference<K, V> valueReference);
763 
764     /**
765      * Returns the next entry in the chain.
766      */
767     @Nullable
768     ReferenceEntry<K, V> getNext();
769 
770     /**
771      * Returns the entry's hash.
772      */
773     int getHash();
774 
775     /**
776      * Returns the key for this entry.
777      */
778     @Nullable
779     K getKey();
780 
781     /*
782      * Used by entries that use access order. Access entries are maintained in a doubly-linked list.
783      * New entries are added at the tail of the list at write time; stale entries are expired from
784      * the head of the list.
785      */
786 
787     /**
788      * Returns the time that this entry was last accessed, in ns.
789      */
790     long getAccessTime();
791 
792     /**
793      * Sets the entry access time in ns.
794      */
795     void setAccessTime(long time);
796 
797     /**
798      * Returns the next entry in the access queue.
799      */
800     ReferenceEntry<K, V> getNextInAccessQueue();
801 
802     /**
803      * Sets the next entry in the access queue.
804      */
805     void setNextInAccessQueue(ReferenceEntry<K, V> next);
806 
807     /**
808      * Returns the previous entry in the access queue.
809      */
810     ReferenceEntry<K, V> getPreviousInAccessQueue();
811 
812     /**
813      * Sets the previous entry in the access queue.
814      */
815     void setPreviousInAccessQueue(ReferenceEntry<K, V> previous);
816 
817     /*
818      * Implemented by entries that use write order. Write entries are maintained in a
819      * doubly-linked list. New entries are added at the tail of the list at write time and stale
820      * entries are expired from the head of the list.
821      */
822 
823     /**
824      * Returns the time that this entry was last written, in ns.
825      */
826     long getWriteTime();
827 
828     /**
829      * Sets the entry write time in ns.
830      */
831     void setWriteTime(long time);
832 
833     /**
834      * Returns the next entry in the write queue.
835      */
836     ReferenceEntry<K, V> getNextInWriteQueue();
837 
838     /**
839      * Sets the next entry in the write queue.
840      */
841     void setNextInWriteQueue(ReferenceEntry<K, V> next);
842 
843     /**
844      * Returns the previous entry in the write queue.
845      */
846     ReferenceEntry<K, V> getPreviousInWriteQueue();
847 
848     /**
849      * Sets the previous entry in the write queue.
850      */
851     void setPreviousInWriteQueue(ReferenceEntry<K, V> previous);
852   }
853 
854   private enum NullEntry implements ReferenceEntry<Object, Object> {
855     INSTANCE;
856 
857     @Override
858     public ValueReference<Object, Object> getValueReference() {
859       return null;
860     }
861 
862     @Override
863     public void setValueReference(ValueReference<Object, Object> valueReference) {}
864 
865     @Override
866     public ReferenceEntry<Object, Object> getNext() {
867       return null;
868     }
869 
870     @Override
871     public int getHash() {
872       return 0;
873     }
874 
875     @Override
876     public Object getKey() {
877       return null;
878     }
879 
880     @Override
881     public long getAccessTime() {
882       return 0;
883     }
884 
885     @Override
886     public void setAccessTime(long time) {}
887 
888     @Override
889     public ReferenceEntry<Object, Object> getNextInAccessQueue() {
890       return this;
891     }
892 
893     @Override
894     public void setNextInAccessQueue(ReferenceEntry<Object, Object> next) {}
895 
896     @Override
897     public ReferenceEntry<Object, Object> getPreviousInAccessQueue() {
898       return this;
899     }
900 
901     @Override
902     public void setPreviousInAccessQueue(ReferenceEntry<Object, Object> previous) {}
903 
904     @Override
905     public long getWriteTime() {
906       return 0;
907     }
908 
909     @Override
910     public void setWriteTime(long time) {}
911 
912     @Override
913     public ReferenceEntry<Object, Object> getNextInWriteQueue() {
914       return this;
915     }
916 
917     @Override
918     public void setNextInWriteQueue(ReferenceEntry<Object, Object> next) {}
919 
920     @Override
921     public ReferenceEntry<Object, Object> getPreviousInWriteQueue() {
922       return this;
923     }
924 
925     @Override
926     public void setPreviousInWriteQueue(ReferenceEntry<Object, Object> previous) {}
927   }
928 
929   static abstract class AbstractReferenceEntry<K, V> implements ReferenceEntry<K, V> {
930     @Override
931     public ValueReference<K, V> getValueReference() {
932       throw new UnsupportedOperationException();
933     }
934 
935     @Override
936     public void setValueReference(ValueReference<K, V> valueReference) {
937       throw new UnsupportedOperationException();
938     }
939 
940     @Override
941     public ReferenceEntry<K, V> getNext() {
942       throw new UnsupportedOperationException();
943     }
944 
945     @Override
946     public int getHash() {
947       throw new UnsupportedOperationException();
948     }
949 
950     @Override
951     public K getKey() {
952       throw new UnsupportedOperationException();
953     }
954 
955     @Override
956     public long getAccessTime() {
957       throw new UnsupportedOperationException();
958     }
959 
960     @Override
961     public void setAccessTime(long time) {
962       throw new UnsupportedOperationException();
963     }
964 
965     @Override
966     public ReferenceEntry<K, V> getNextInAccessQueue() {
967       throw new UnsupportedOperationException();
968     }
969 
970     @Override
971     public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
972       throw new UnsupportedOperationException();
973     }
974 
975     @Override
976     public ReferenceEntry<K, V> getPreviousInAccessQueue() {
977       throw new UnsupportedOperationException();
978     }
979 
980     @Override
981     public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
982       throw new UnsupportedOperationException();
983     }
984 
985     @Override
986     public long getWriteTime() {
987       throw new UnsupportedOperationException();
988     }
989 
990     @Override
991     public void setWriteTime(long time) {
992       throw new UnsupportedOperationException();
993     }
994 
995     @Override
996     public ReferenceEntry<K, V> getNextInWriteQueue() {
997       throw new UnsupportedOperationException();
998     }
999 
1000     @Override
1001     public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1002       throw new UnsupportedOperationException();
1003     }
1004 
1005     @Override
1006     public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1007       throw new UnsupportedOperationException();
1008     }
1009 
1010     @Override
1011     public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1012       throw new UnsupportedOperationException();
1013     }
1014   }
1015 
1016   @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
1017   static <K, V> ReferenceEntry<K, V> nullEntry() {
1018     return (ReferenceEntry<K, V>) NullEntry.INSTANCE;
1019   }
1020 
1021   static final Queue<? extends Object> DISCARDING_QUEUE = new AbstractQueue<Object>() {
1022     @Override
1023     public boolean offer(Object o) {
1024       return true;
1025     }
1026 
1027     @Override
1028     public Object peek() {
1029       return null;
1030     }
1031 
1032     @Override
1033     public Object poll() {
1034       return null;
1035     }
1036 
1037     @Override
1038     public int size() {
1039       return 0;
1040     }
1041 
1042     @Override
1043     public Iterator<Object> iterator() {
1044       return Iterators.emptyIterator();
1045     }
1046   };
1047 
1048   /**
1049    * Queue that discards all elements.
1050    */
1051   @SuppressWarnings("unchecked") // impl never uses a parameter or returns any non-null value
1052   static <E> Queue<E> discardingQueue() {
1053     return (Queue) DISCARDING_QUEUE;
1054   }
1055 
1056   /*
1057    * Note: All of this duplicate code sucks, but it saves a lot of memory. If only Java had mixins!
1058    * To maintain this code, make a change for the strong reference type. Then, cut and paste, and
1059    * replace "Strong" with "Soft" or "Weak" within the pasted text. The primary difference is that
1060    * strong entries store the key reference directly while soft and weak entries delegate to their
1061    * respective superclasses.
1062    */
1063 
1064   /**
1065    * Used for strongly-referenced keys.
1066    */
1067   static class StrongEntry<K, V> extends AbstractReferenceEntry<K, V> {
1068     final K key;
1069 
1070     StrongEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1071       this.key = key;
1072       this.hash = hash;
1073       this.next = next;
1074     }
1075 
1076     @Override
1077     public K getKey() {
1078       return this.key;
1079     }
1080 
1081     // The code below is exactly the same for each entry type.
1082 
1083     final int hash;
1084     final ReferenceEntry<K, V> next;
1085     volatile ValueReference<K, V> valueReference = unset();
1086 
1087     @Override
1088     public ValueReference<K, V> getValueReference() {
1089       return valueReference;
1090     }
1091 
1092     @Override
1093     public void setValueReference(ValueReference<K, V> valueReference) {
1094       this.valueReference = valueReference;
1095     }
1096 
1097     @Override
1098     public int getHash() {
1099       return hash;
1100     }
1101 
1102     @Override
1103     public ReferenceEntry<K, V> getNext() {
1104       return next;
1105     }
1106   }
1107 
1108   static final class StrongAccessEntry<K, V> extends StrongEntry<K, V> {
1109     StrongAccessEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1110       super(key, hash, next);
1111     }
1112 
1113     // The code below is exactly the same for each access entry type.
1114 
1115     volatile long accessTime = Long.MAX_VALUE;
1116 
1117     @Override
1118     public long getAccessTime() {
1119       return accessTime;
1120     }
1121 
1122     @Override
1123     public void setAccessTime(long time) {
1124       this.accessTime = time;
1125     }
1126 
1127     @GuardedBy("Segment.this")
1128     ReferenceEntry<K, V> nextAccess = nullEntry();
1129 
1130     @Override
1131     public ReferenceEntry<K, V> getNextInAccessQueue() {
1132       return nextAccess;
1133     }
1134 
1135     @Override
1136     public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1137       this.nextAccess = next;
1138     }
1139 
1140     @GuardedBy("Segment.this")
1141     ReferenceEntry<K, V> previousAccess = nullEntry();
1142 
1143     @Override
1144     public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1145       return previousAccess;
1146     }
1147 
1148     @Override
1149     public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1150       this.previousAccess = previous;
1151     }
1152   }
1153 
1154   static final class StrongWriteEntry<K, V> extends StrongEntry<K, V> {
1155     StrongWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1156       super(key, hash, next);
1157     }
1158 
1159     // The code below is exactly the same for each write entry type.
1160 
1161     volatile long writeTime = Long.MAX_VALUE;
1162 
1163     @Override
1164     public long getWriteTime() {
1165       return writeTime;
1166     }
1167 
1168     @Override
1169     public void setWriteTime(long time) {
1170       this.writeTime = time;
1171     }
1172 
1173     @GuardedBy("Segment.this")
1174     ReferenceEntry<K, V> nextWrite = nullEntry();
1175 
1176     @Override
1177     public ReferenceEntry<K, V> getNextInWriteQueue() {
1178       return nextWrite;
1179     }
1180 
1181     @Override
1182     public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1183       this.nextWrite = next;
1184     }
1185 
1186     @GuardedBy("Segment.this")
1187     ReferenceEntry<K, V> previousWrite = nullEntry();
1188 
1189     @Override
1190     public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1191       return previousWrite;
1192     }
1193 
1194     @Override
1195     public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1196       this.previousWrite = previous;
1197     }
1198   }
1199 
1200   static final class StrongAccessWriteEntry<K, V> extends StrongEntry<K, V> {
1201     StrongAccessWriteEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1202       super(key, hash, next);
1203     }
1204 
1205     // The code below is exactly the same for each access entry type.
1206 
1207     volatile long accessTime = Long.MAX_VALUE;
1208 
1209     @Override
1210     public long getAccessTime() {
1211       return accessTime;
1212     }
1213 
1214     @Override
1215     public void setAccessTime(long time) {
1216       this.accessTime = time;
1217     }
1218 
1219     @GuardedBy("Segment.this")
1220     ReferenceEntry<K, V> nextAccess = nullEntry();
1221 
1222     @Override
1223     public ReferenceEntry<K, V> getNextInAccessQueue() {
1224       return nextAccess;
1225     }
1226 
1227     @Override
1228     public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1229       this.nextAccess = next;
1230     }
1231 
1232     @GuardedBy("Segment.this")
1233     ReferenceEntry<K, V> previousAccess = nullEntry();
1234 
1235     @Override
1236     public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1237       return previousAccess;
1238     }
1239 
1240     @Override
1241     public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1242       this.previousAccess = previous;
1243     }
1244 
1245     // The code below is exactly the same for each write entry type.
1246 
1247     volatile long writeTime = Long.MAX_VALUE;
1248 
1249     @Override
1250     public long getWriteTime() {
1251       return writeTime;
1252     }
1253 
1254     @Override
1255     public void setWriteTime(long time) {
1256       this.writeTime = time;
1257     }
1258 
1259     @GuardedBy("Segment.this")
1260     ReferenceEntry<K, V> nextWrite = nullEntry();
1261 
1262     @Override
1263     public ReferenceEntry<K, V> getNextInWriteQueue() {
1264       return nextWrite;
1265     }
1266 
1267     @Override
1268     public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1269       this.nextWrite = next;
1270     }
1271 
1272     @GuardedBy("Segment.this")
1273     ReferenceEntry<K, V> previousWrite = nullEntry();
1274 
1275     @Override
1276     public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1277       return previousWrite;
1278     }
1279 
1280     @Override
1281     public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1282       this.previousWrite = previous;
1283     }
1284   }
1285 
1286   /**
1287    * Used for weakly-referenced keys.
1288    */
1289   static class WeakEntry<K, V> extends WeakReference<K> implements ReferenceEntry<K, V> {
1290     WeakEntry(ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1291       super(key, queue);
1292       this.hash = hash;
1293       this.next = next;
1294     }
1295 
1296     @Override
1297     public K getKey() {
1298       return get();
1299     }
1300 
1301     /*
1302      * It'd be nice to get these for free from AbstractReferenceEntry, but we're already extending
1303      * WeakReference<K>.
1304      */
1305 
1306     // null access
1307 
1308     @Override
1309     public long getAccessTime() {
1310       throw new UnsupportedOperationException();
1311     }
1312 
1313     @Override
1314     public void setAccessTime(long time) {
1315       throw new UnsupportedOperationException();
1316     }
1317 
1318     @Override
1319     public ReferenceEntry<K, V> getNextInAccessQueue() {
1320       throw new UnsupportedOperationException();
1321     }
1322 
1323     @Override
1324     public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1325       throw new UnsupportedOperationException();
1326     }
1327 
1328     @Override
1329     public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1330       throw new UnsupportedOperationException();
1331     }
1332 
1333     @Override
1334     public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1335       throw new UnsupportedOperationException();
1336     }
1337 
1338     // null write
1339 
1340     @Override
1341     public long getWriteTime() {
1342       throw new UnsupportedOperationException();
1343     }
1344 
1345     @Override
1346     public void setWriteTime(long time) {
1347       throw new UnsupportedOperationException();
1348     }
1349 
1350     @Override
1351     public ReferenceEntry<K, V> getNextInWriteQueue() {
1352       throw new UnsupportedOperationException();
1353     }
1354 
1355     @Override
1356     public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1357       throw new UnsupportedOperationException();
1358     }
1359 
1360     @Override
1361     public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1362       throw new UnsupportedOperationException();
1363     }
1364 
1365     @Override
1366     public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1367       throw new UnsupportedOperationException();
1368     }
1369 
1370     // The code below is exactly the same for each entry type.
1371 
1372     final int hash;
1373     final ReferenceEntry<K, V> next;
1374     volatile ValueReference<K, V> valueReference = unset();
1375 
1376     @Override
1377     public ValueReference<K, V> getValueReference() {
1378       return valueReference;
1379     }
1380 
1381     @Override
1382     public void setValueReference(ValueReference<K, V> valueReference) {
1383       this.valueReference = valueReference;
1384     }
1385 
1386     @Override
1387     public int getHash() {
1388       return hash;
1389     }
1390 
1391     @Override
1392     public ReferenceEntry<K, V> getNext() {
1393       return next;
1394     }
1395   }
1396 
1397   static final class WeakAccessEntry<K, V> extends WeakEntry<K, V> {
1398     WeakAccessEntry(
1399         ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1400       super(queue, key, hash, next);
1401     }
1402 
1403     // The code below is exactly the same for each access entry type.
1404 
1405     volatile long accessTime = Long.MAX_VALUE;
1406 
1407     @Override
1408     public long getAccessTime() {
1409       return accessTime;
1410     }
1411 
1412     @Override
1413     public void setAccessTime(long time) {
1414       this.accessTime = time;
1415     }
1416 
1417     @GuardedBy("Segment.this")
1418     ReferenceEntry<K, V> nextAccess = nullEntry();
1419 
1420     @Override
1421     public ReferenceEntry<K, V> getNextInAccessQueue() {
1422       return nextAccess;
1423     }
1424 
1425     @Override
1426     public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1427       this.nextAccess = next;
1428     }
1429 
1430     @GuardedBy("Segment.this")
1431     ReferenceEntry<K, V> previousAccess = nullEntry();
1432 
1433     @Override
1434     public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1435       return previousAccess;
1436     }
1437 
1438     @Override
1439     public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1440       this.previousAccess = previous;
1441     }
1442   }
1443 
1444   static final class WeakWriteEntry<K, V> extends WeakEntry<K, V> {
1445     WeakWriteEntry(
1446         ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1447       super(queue, key, hash, next);
1448     }
1449 
1450     // The code below is exactly the same for each write entry type.
1451 
1452     volatile long writeTime = Long.MAX_VALUE;
1453 
1454     @Override
1455     public long getWriteTime() {
1456       return writeTime;
1457     }
1458 
1459     @Override
1460     public void setWriteTime(long time) {
1461       this.writeTime = time;
1462     }
1463 
1464     @GuardedBy("Segment.this")
1465     ReferenceEntry<K, V> nextWrite = nullEntry();
1466 
1467     @Override
1468     public ReferenceEntry<K, V> getNextInWriteQueue() {
1469       return nextWrite;
1470     }
1471 
1472     @Override
1473     public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1474       this.nextWrite = next;
1475     }
1476 
1477     @GuardedBy("Segment.this")
1478     ReferenceEntry<K, V> previousWrite = nullEntry();
1479 
1480     @Override
1481     public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1482       return previousWrite;
1483     }
1484 
1485     @Override
1486     public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1487       this.previousWrite = previous;
1488     }
1489   }
1490 
1491   static final class WeakAccessWriteEntry<K, V> extends WeakEntry<K, V> {
1492     WeakAccessWriteEntry(
1493         ReferenceQueue<K> queue, K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1494       super(queue, key, hash, next);
1495     }
1496 
1497     // The code below is exactly the same for each access entry type.
1498 
1499     volatile long accessTime = Long.MAX_VALUE;
1500 
1501     @Override
1502     public long getAccessTime() {
1503       return accessTime;
1504     }
1505 
1506     @Override
1507     public void setAccessTime(long time) {
1508       this.accessTime = time;
1509     }
1510 
1511     @GuardedBy("Segment.this")
1512     ReferenceEntry<K, V> nextAccess = nullEntry();
1513 
1514     @Override
1515     public ReferenceEntry<K, V> getNextInAccessQueue() {
1516       return nextAccess;
1517     }
1518 
1519     @Override
1520     public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
1521       this.nextAccess = next;
1522     }
1523 
1524     @GuardedBy("Segment.this")
1525     ReferenceEntry<K, V> previousAccess = nullEntry();
1526 
1527     @Override
1528     public ReferenceEntry<K, V> getPreviousInAccessQueue() {
1529       return previousAccess;
1530     }
1531 
1532     @Override
1533     public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
1534       this.previousAccess = previous;
1535     }
1536 
1537     // The code below is exactly the same for each write entry type.
1538 
1539     volatile long writeTime = Long.MAX_VALUE;
1540 
1541     @Override
1542     public long getWriteTime() {
1543       return writeTime;
1544     }
1545 
1546     @Override
1547     public void setWriteTime(long time) {
1548       this.writeTime = time;
1549     }
1550 
1551     @GuardedBy("Segment.this")
1552     ReferenceEntry<K, V> nextWrite = nullEntry();
1553 
1554     @Override
1555     public ReferenceEntry<K, V> getNextInWriteQueue() {
1556       return nextWrite;
1557     }
1558 
1559     @Override
1560     public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
1561       this.nextWrite = next;
1562     }
1563 
1564     @GuardedBy("Segment.this")
1565     ReferenceEntry<K, V> previousWrite = nullEntry();
1566 
1567     @Override
1568     public ReferenceEntry<K, V> getPreviousInWriteQueue() {
1569       return previousWrite;
1570     }
1571 
1572     @Override
1573     public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
1574       this.previousWrite = previous;
1575     }
1576   }
1577 
1578   /**
1579    * References a weak value.
1580    */
1581   static class WeakValueReference<K, V>
1582       extends WeakReference<V> implements ValueReference<K, V> {
1583     final ReferenceEntry<K, V> entry;
1584 
1585     WeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) {
1586       super(referent, queue);
1587       this.entry = entry;
1588     }
1589 
1590     @Override
1591     public int getWeight() {
1592       return 1;
1593     }
1594 
1595     @Override
1596     public ReferenceEntry<K, V> getEntry() {
1597       return entry;
1598     }
1599 
1600     @Override
1601     public void notifyNewValue(V newValue) {}
1602 
1603     @Override
1604     public ValueReference<K, V> copyFor(
1605         ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1606       return new WeakValueReference<K, V>(queue, value, entry);
1607     }
1608 
1609     @Override
1610     public boolean isLoading() {
1611       return false;
1612     }
1613 
1614     @Override
1615     public boolean isActive() {
1616       return true;
1617     }
1618 
1619     @Override
1620     public V waitForValue() {
1621       return get();
1622     }
1623   }
1624 
1625   /**
1626    * References a soft value.
1627    */
1628   static class SoftValueReference<K, V>
1629       extends SoftReference<V> implements ValueReference<K, V> {
1630     final ReferenceEntry<K, V> entry;
1631 
1632     SoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry) {
1633       super(referent, queue);
1634       this.entry = entry;
1635     }
1636 
1637     @Override
1638     public int getWeight() {
1639       return 1;
1640     }
1641 
1642     @Override
1643     public ReferenceEntry<K, V> getEntry() {
1644       return entry;
1645     }
1646 
1647     @Override
1648     public void notifyNewValue(V newValue) {}
1649 
1650     @Override
1651     public ValueReference<K, V> copyFor(
1652         ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1653       return new SoftValueReference<K, V>(queue, value, entry);
1654     }
1655 
1656     @Override
1657     public boolean isLoading() {
1658       return false;
1659     }
1660 
1661     @Override
1662     public boolean isActive() {
1663       return true;
1664     }
1665 
1666     @Override
1667     public V waitForValue() {
1668       return get();
1669     }
1670   }
1671 
1672   /**
1673    * References a strong value.
1674    */
1675   static class StrongValueReference<K, V> implements ValueReference<K, V> {
1676     final V referent;
1677 
1678     StrongValueReference(V referent) {
1679       this.referent = referent;
1680     }
1681 
1682     @Override
1683     public V get() {
1684       return referent;
1685     }
1686 
1687     @Override
1688     public int getWeight() {
1689       return 1;
1690     }
1691 
1692     @Override
1693     public ReferenceEntry<K, V> getEntry() {
1694       return null;
1695     }
1696 
1697     @Override
1698     public ValueReference<K, V> copyFor(
1699         ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1700       return this;
1701     }
1702 
1703     @Override
1704     public boolean isLoading() {
1705       return false;
1706     }
1707 
1708     @Override
1709     public boolean isActive() {
1710       return true;
1711     }
1712 
1713     @Override
1714     public V waitForValue() {
1715       return get();
1716     }
1717 
1718     @Override
1719     public void notifyNewValue(V newValue) {}
1720   }
1721 
1722   /**
1723    * References a weak value.
1724    */
1725   static final class WeightedWeakValueReference<K, V> extends WeakValueReference<K, V> {
1726     final int weight;
1727 
1728     WeightedWeakValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry,
1729         int weight) {
1730       super(queue, referent, entry);
1731       this.weight = weight;
1732     }
1733 
1734     @Override
1735     public int getWeight() {
1736       return weight;
1737     }
1738 
1739     @Override
1740     public ValueReference<K, V> copyFor(
1741         ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1742       return new WeightedWeakValueReference<K, V>(queue, value, entry, weight);
1743     }
1744   }
1745 
1746   /**
1747    * References a soft value.
1748    */
1749   static final class WeightedSoftValueReference<K, V> extends SoftValueReference<K, V> {
1750     final int weight;
1751 
1752     WeightedSoftValueReference(ReferenceQueue<V> queue, V referent, ReferenceEntry<K, V> entry,
1753         int weight) {
1754       super(queue, referent, entry);
1755       this.weight = weight;
1756     }
1757 
1758     @Override
1759     public int getWeight() {
1760       return weight;
1761     }
1762     @Override
1763     public ValueReference<K, V> copyFor(
1764         ReferenceQueue<V> queue, V value, ReferenceEntry<K, V> entry) {
1765       return new WeightedSoftValueReference<K, V>(queue, value, entry, weight);
1766     }
1767 
1768   }
1769 
1770   /**
1771    * References a strong value.
1772    */
1773   static final class WeightedStrongValueReference<K, V> extends StrongValueReference<K, V> {
1774     final int weight;
1775 
1776     WeightedStrongValueReference(V referent, int weight) {
1777       super(referent);
1778       this.weight = weight;
1779     }
1780 
1781     @Override
1782     public int getWeight() {
1783       return weight;
1784     }
1785   }
1786 
1787   /**
1788    * Applies a supplemental hash function to a given hash code, which defends against poor quality
1789    * hash functions. This is critical when the concurrent hash map uses power-of-two length hash
1790    * tables, that otherwise encounter collisions for hash codes that do not differ in lower or
1791    * upper bits.
1792    *
1793    * @param h hash code
1794    */
1795   static int rehash(int h) {
1796     // Spread bits to regularize both segment and index locations,
1797     // using variant of single-word Wang/Jenkins hash.
1798     // TODO(kevinb): use Hashing/move this to Hashing?
1799     h += (h << 15) ^ 0xffffcd7d;
1800     h ^= (h >>> 10);
1801     h += (h << 3);
1802     h ^= (h >>> 6);
1803     h += (h << 2) + (h << 14);
1804     return h ^ (h >>> 16);
1805   }
1806 
1807   /**
1808    * This method is a convenience for testing. Code should call {@link Segment#newEntry} directly.
1809    */
1810   @GuardedBy("Segment.this")
1811   @VisibleForTesting
1812   ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
1813     return segmentFor(hash).newEntry(key, hash, next);
1814   }
1815 
1816   /**
1817    * This method is a convenience for testing. Code should call {@link Segment#copyEntry} directly.
1818    */
1819   @GuardedBy("Segment.this")
1820   @VisibleForTesting
1821   ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
1822     int hash = original.getHash();
1823     return segmentFor(hash).copyEntry(original, newNext);
1824   }
1825 
1826   /**
1827    * This method is a convenience for testing. Code should call {@link Segment#setValue} instead.
1828    */
1829   @GuardedBy("Segment.this")
1830   @VisibleForTesting
1831   ValueReference<K, V> newValueReference(ReferenceEntry<K, V> entry, V value, int weight) {
1832     int hash = entry.getHash();
1833     return valueStrength.referenceValue(segmentFor(hash), entry, checkNotNull(value), weight);
1834   }
1835 
1836   int hash(@Nullable Object key) {
1837     int h = keyEquivalence.hash(key);
1838     return rehash(h);
1839   }
1840 
1841   void reclaimValue(ValueReference<K, V> valueReference) {
1842     ReferenceEntry<K, V> entry = valueReference.getEntry();
1843     int hash = entry.getHash();
1844     segmentFor(hash).reclaimValue(entry.getKey(), hash, valueReference);
1845   }
1846 
1847   void reclaimKey(ReferenceEntry<K, V> entry) {
1848     int hash = entry.getHash();
1849     segmentFor(hash).reclaimKey(entry, hash);
1850   }
1851 
1852   /**
1853    * This method is a convenience for testing. Code should call {@link Segment#getLiveValue}
1854    * instead.
1855    */
1856   @VisibleForTesting
1857   boolean isLive(ReferenceEntry<K, V> entry, long now) {
1858     return segmentFor(entry.getHash()).getLiveValue(entry, now) != null;
1859   }
1860 
1861   /**
1862    * Returns the segment that should be used for a key with the given hash.
1863    *
1864    * @param hash the hash code for the key
1865    * @return the segment
1866    */
1867   Segment<K, V> segmentFor(int hash) {
1868     // TODO(fry): Lazily create segments?
1869     return segments[(hash >>> segmentShift) & segmentMask];
1870   }
1871 
1872   Segment<K, V> createSegment(
1873       int initialCapacity, long maxSegmentWeight, StatsCounter statsCounter) {
1874     return new Segment<K, V>(this, initialCapacity, maxSegmentWeight, statsCounter);
1875   }
1876 
1877   /**
1878    * Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
1879    * loading, or expired. Unlike {@link Segment#getLiveValue} this method does not attempt to
1880    * cleanup stale entries. As such it should only be called outside of a segment context, such as
1881    * during iteration.
1882    */
1883   @Nullable
1884   V getLiveValue(ReferenceEntry<K, V> entry, long now) {
1885     if (entry.getKey() == null) {
1886       return null;
1887     }
1888     V value = entry.getValueReference().get();
1889     if (value == null) {
1890       return null;
1891     }
1892 
1893     if (isExpired(entry, now)) {
1894       return null;
1895     }
1896     return value;
1897   }
1898 
1899   // expiration
1900 
1901   /**
1902    * Returns true if the entry has expired.
1903    */
1904   boolean isExpired(ReferenceEntry<K, V> entry, long now) {
1905     checkNotNull(entry);
1906     if (expiresAfterAccess()
1907         && (now - entry.getAccessTime() >= expireAfterAccessNanos)) {
1908       return true;
1909     }
1910     if (expiresAfterWrite()
1911         && (now - entry.getWriteTime() >= expireAfterWriteNanos)) {
1912       return true;
1913     }
1914     return false;
1915   }
1916 
1917   // queues
1918 
1919   @GuardedBy("Segment.this")
1920   static <K, V> void connectAccessOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) {
1921     previous.setNextInAccessQueue(next);
1922     next.setPreviousInAccessQueue(previous);
1923   }
1924 
1925   @GuardedBy("Segment.this")
1926   static <K, V> void nullifyAccessOrder(ReferenceEntry<K, V> nulled) {
1927     ReferenceEntry<K, V> nullEntry = nullEntry();
1928     nulled.setNextInAccessQueue(nullEntry);
1929     nulled.setPreviousInAccessQueue(nullEntry);
1930   }
1931 
1932   @GuardedBy("Segment.this")
1933   static <K, V> void connectWriteOrder(ReferenceEntry<K, V> previous, ReferenceEntry<K, V> next) {
1934     previous.setNextInWriteQueue(next);
1935     next.setPreviousInWriteQueue(previous);
1936   }
1937 
1938   @GuardedBy("Segment.this")
1939   static <K, V> void nullifyWriteOrder(ReferenceEntry<K, V> nulled) {
1940     ReferenceEntry<K, V> nullEntry = nullEntry();
1941     nulled.setNextInWriteQueue(nullEntry);
1942     nulled.setPreviousInWriteQueue(nullEntry);
1943   }
1944 
1945   /**
1946    * Notifies listeners that an entry has been automatically removed due to expiration, eviction,
1947    * or eligibility for garbage collection. This should be called every time expireEntries or
1948    * evictEntry is called (once the lock is released).
1949    */
1950   void processPendingNotifications() {
1951     RemovalNotification<K, V> notification;
1952     while ((notification = removalNotificationQueue.poll()) != null) {
1953       try {
1954         removalListener.onRemoval(notification);
1955       } catch (Throwable e) {
1956         logger.log(Level.WARNING, "Exception thrown by removal listener", e);
1957       }
1958     }
1959   }
1960 
1961   @SuppressWarnings("unchecked")
1962   final Segment<K, V>[] newSegmentArray(int ssize) {
1963     return new Segment[ssize];
1964   }
1965 
1966   // Inner Classes
1967 
1968   /**
1969    * Segments are specialized versions of hash tables. This subclass inherits from ReentrantLock
1970    * opportunistically, just to simplify some locking and avoid separate construction.
1971    */
1972   @SuppressWarnings("serial") // This class is never serialized.
1973   static class Segment<K, V> extends ReentrantLock {
1974 
1975     /*
1976      * TODO(fry): Consider copying variables (like evictsBySize) from outer class into this class.
1977      * It will require more memory but will reduce indirection.
1978      */
1979 
1980     /*
1981      * Segments maintain a table of entry lists that are ALWAYS kept in a consistent state, so can
1982      * be read without locking. Next fields of nodes are immutable (final). All list additions are
1983      * performed at the front of each bin. This makes it easy to check changes, and also fast to
1984      * traverse. When nodes would otherwise be changed, new nodes are created to replace them. This
1985      * works well for hash tables since the bin lists tend to be short. (The average length is less
1986      * than two.)
1987      *
1988      * Read operations can thus proceed without locking, but rely on selected uses of volatiles to
1989      * ensure that completed write operations performed by other threads are noticed. For most
1990      * purposes, the "count" field, tracking the number of elements, serves as that volatile
1991      * variable ensuring visibility. This is convenient because this field needs to be read in many
1992      * read operations anyway:
1993      *
1994      * - All (unsynchronized) read operations must first read the "count" field, and should not
1995      * look at table entries if it is 0.
1996      *
1997      * - All (synchronized) write operations should write to the "count" field after structurally
1998      * changing any bin. The operations must not take any action that could even momentarily
1999      * cause a concurrent read operation to see inconsistent data. This is made easier by the
2000      * nature of the read operations in Map. For example, no operation can reveal that the table
2001      * has grown but the threshold has not yet been updated, so there are no atomicity requirements
2002      * for this with respect to reads.
2003      *
2004      * As a guide, all critical volatile reads and writes to the count field are marked in code
2005      * comments.
2006      */
2007 
2008     final LocalCache<K, V> map;
2009 
2010     /**
2011      * The number of live elements in this segment's region.
2012      */
2013     volatile int count;
2014 
2015     /**
2016      * The weight of the live elements in this segment's region.
2017      */
2018     @GuardedBy("Segment.this")
2019     int totalWeight;
2020 
2021     /**
2022      * Number of updates that alter the size of the table. This is used during bulk-read methods to
2023      * make sure they see a consistent snapshot: If modCounts change during a traversal of segments
2024      * loading size or checking containsValue, then we might have an inconsistent view of state
2025      * so (usually) must retry.
2026      */
2027     int modCount;
2028 
2029     /**
2030      * The table is expanded when its size exceeds this threshold. (The value of this field is
2031      * always {@code (int) (capacity * 0.75)}.)
2032      */
2033     int threshold;
2034 
2035     /**
2036      * The per-segment table.
2037      */
2038     volatile AtomicReferenceArray<ReferenceEntry<K, V>> table;
2039 
2040     /**
2041      * The maximum weight of this segment. UNSET_INT if there is no maximum.
2042      */
2043     final long maxSegmentWeight;
2044 
2045     /**
2046      * The key reference queue contains entries whose keys have been garbage collected, and which
2047      * need to be cleaned up internally.
2048      */
2049     final ReferenceQueue<K> keyReferenceQueue;
2050 
2051     /**
2052      * The value reference queue contains value references whose values have been garbage collected,
2053      * and which need to be cleaned up internally.
2054      */
2055     final ReferenceQueue<V> valueReferenceQueue;
2056 
2057     /**
2058      * The recency queue is used to record which entries were accessed for updating the access
2059      * list's ordering. It is drained as a batch operation when either the DRAIN_THRESHOLD is
2060      * crossed or a write occurs on the segment.
2061      */
2062     final Queue<ReferenceEntry<K, V>> recencyQueue;
2063 
2064     /**
2065      * A counter of the number of reads since the last write, used to drain queues on a small
2066      * fraction of read operations.
2067      */
2068     final AtomicInteger readCount = new AtomicInteger();
2069 
2070     /**
2071      * A queue of elements currently in the map, ordered by write time. Elements are added to the
2072      * tail of the queue on write.
2073      */
2074     @GuardedBy("Segment.this")
2075     final Queue<ReferenceEntry<K, V>> writeQueue;
2076 
2077     /**
2078      * A queue of elements currently in the map, ordered by access time. Elements are added to the
2079      * tail of the queue on access (note that writes count as accesses).
2080      */
2081     @GuardedBy("Segment.this")
2082     final Queue<ReferenceEntry<K, V>> accessQueue;
2083 
2084     /** Accumulates cache statistics. */
2085     final StatsCounter statsCounter;
2086 
2087     Segment(LocalCache<K, V> map, int initialCapacity, long maxSegmentWeight,
2088         StatsCounter statsCounter) {
2089       this.map = map;
2090       this.maxSegmentWeight = maxSegmentWeight;
2091       this.statsCounter = checkNotNull(statsCounter);
2092       initTable(newEntryArray(initialCapacity));
2093 
2094       keyReferenceQueue = map.usesKeyReferences()
2095            ? new ReferenceQueue<K>() : null;
2096 
2097       valueReferenceQueue = map.usesValueReferences()
2098            ? new ReferenceQueue<V>() : null;
2099 
2100       recencyQueue = map.usesAccessQueue()
2101           ? new ConcurrentLinkedQueue<ReferenceEntry<K, V>>()
2102           : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2103 
2104       writeQueue = map.usesWriteQueue()
2105           ? new WriteQueue<K, V>()
2106           : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2107 
2108       accessQueue = map.usesAccessQueue()
2109           ? new AccessQueue<K, V>()
2110           : LocalCache.<ReferenceEntry<K, V>>discardingQueue();
2111     }
2112 
2113     AtomicReferenceArray<ReferenceEntry<K, V>> newEntryArray(int size) {
2114       return new AtomicReferenceArray<ReferenceEntry<K, V>>(size);
2115     }
2116 
2117     void initTable(AtomicReferenceArray<ReferenceEntry<K, V>> newTable) {
2118       this.threshold = newTable.length() * 3 / 4; // 0.75
2119       if (!map.customWeigher() && this.threshold == maxSegmentWeight) {
2120         // prevent spurious expansion before eviction
2121         this.threshold++;
2122       }
2123       this.table = newTable;
2124     }
2125 
2126     @GuardedBy("Segment.this")
2127     ReferenceEntry<K, V> newEntry(K key, int hash, @Nullable ReferenceEntry<K, V> next) {
2128       return map.entryFactory.newEntry(this, checkNotNull(key), hash, next);
2129     }
2130 
2131     /**
2132      * Copies {@code original} into a new entry chained to {@code newNext}. Returns the new entry,
2133      * or {@code null} if {@code original} was already garbage collected.
2134      */
2135     @GuardedBy("Segment.this")
2136     ReferenceEntry<K, V> copyEntry(ReferenceEntry<K, V> original, ReferenceEntry<K, V> newNext) {
2137       if (original.getKey() == null) {
2138         // key collected
2139         return null;
2140       }
2141 
2142       ValueReference<K, V> valueReference = original.getValueReference();
2143       V value = valueReference.get();
2144       if ((value == null) && valueReference.isActive()) {
2145         // value collected
2146         return null;
2147       }
2148 
2149       ReferenceEntry<K, V> newEntry = map.entryFactory.copyEntry(this, original, newNext);
2150       newEntry.setValueReference(valueReference.copyFor(this.valueReferenceQueue, value, newEntry));
2151       return newEntry;
2152     }
2153 
2154     /**
2155      * Sets a new value of an entry. Adds newly created entries at the end of the access queue.
2156      */
2157     @GuardedBy("Segment.this")
2158     void setValue(ReferenceEntry<K, V> entry, K key, V value, long now) {
2159       ValueReference<K, V> previous = entry.getValueReference();
2160       int weight = map.weigher.weigh(key, value);
2161       checkState(weight >= 0, "Weights must be non-negative");
2162 
2163       ValueReference<K, V> valueReference =
2164           map.valueStrength.referenceValue(this, entry, value, weight);
2165       entry.setValueReference(valueReference);
2166       recordWrite(entry, weight, now);
2167       previous.notifyNewValue(value);
2168     }
2169 
2170     // loading
2171 
2172     V get(K key, int hash, CacheLoader<? super K, V> loader) throws ExecutionException {
2173       checkNotNull(key);
2174       checkNotNull(loader);
2175       try {
2176         if (count != 0) { // read-volatile
2177           // don't call getLiveEntry, which would ignore loading values
2178           ReferenceEntry<K, V> e = getEntry(key, hash);
2179           if (e != null) {
2180             long now = map.ticker.read();
2181             V value = getLiveValue(e, now);
2182             if (value != null) {
2183               recordRead(e, now);
2184               statsCounter.recordHits(1);
2185               return scheduleRefresh(e, key, hash, value, now, loader);
2186             }
2187             ValueReference<K, V> valueReference = e.getValueReference();
2188             if (valueReference.isLoading()) {
2189               return waitForLoadingValue(e, key, valueReference);
2190             }
2191           }
2192         }
2193 
2194         // at this point e is either null or expired;
2195         return lockedGetOrLoad(key, hash, loader);
2196       } catch (ExecutionException ee) {
2197         Throwable cause = ee.getCause();
2198         if (cause instanceof Error) {
2199           throw new ExecutionError((Error) cause);
2200         } else if (cause instanceof RuntimeException) {
2201           throw new UncheckedExecutionException(cause);
2202         }
2203         throw ee;
2204       } finally {
2205         postReadCleanup();
2206       }
2207     }
2208 
2209     V lockedGetOrLoad(K key, int hash, CacheLoader<? super K, V> loader)
2210         throws ExecutionException {
2211       ReferenceEntry<K, V> e;
2212       ValueReference<K, V> valueReference = null;
2213       LoadingValueReference<K, V> loadingValueReference = null;
2214       boolean createNewEntry = true;
2215 
2216       lock();
2217       try {
2218         // re-read ticker once inside the lock
2219         long now = map.ticker.read();
2220         preWriteCleanup(now);
2221 
2222         int newCount = this.count - 1;
2223         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2224         int index = hash & (table.length() - 1);
2225         ReferenceEntry<K, V> first = table.get(index);
2226 
2227         for (e = first; e != null; e = e.getNext()) {
2228           K entryKey = e.getKey();
2229           if (e.getHash() == hash && entryKey != null
2230               && map.keyEquivalence.equivalent(key, entryKey)) {
2231             valueReference = e.getValueReference();
2232             if (valueReference.isLoading()) {
2233               createNewEntry = false;
2234             } else {
2235               V value = valueReference.get();
2236               if (value == null) {
2237                 enqueueNotification(entryKey, hash, valueReference, RemovalCause.COLLECTED);
2238               } else if (map.isExpired(e, now)) {
2239                 // This is a duplicate check, as preWriteCleanup already purged expired
2240                 // entries, but let's accomodate an incorrect expiration queue.
2241                 enqueueNotification(entryKey, hash, valueReference, RemovalCause.EXPIRED);
2242               } else {
2243                 recordLockedRead(e, now);
2244                 statsCounter.recordHits(1);
2245                 // we were concurrent with loading; don't consider refresh
2246                 return value;
2247               }
2248 
2249               // immediately reuse invalid entries
2250               writeQueue.remove(e);
2251               accessQueue.remove(e);
2252               this.count = newCount; // write-volatile
2253             }
2254             break;
2255           }
2256         }
2257 
2258         if (createNewEntry) {
2259           loadingValueReference = new LoadingValueReference<K, V>();
2260 
2261           if (e == null) {
2262             e = newEntry(key, hash, first);
2263             e.setValueReference(loadingValueReference);
2264             table.set(index, e);
2265           } else {
2266             e.setValueReference(loadingValueReference);
2267           }
2268         }
2269       } finally {
2270         unlock();
2271         postWriteCleanup();
2272       }
2273 
2274       if (createNewEntry) {
2275         try {
2276           // Synchronizes on the entry to allow failing fast when a recursive load is
2277           // detected. This may be circumvented when an entry is copied, but will fail fast most
2278           // of the time.
2279           synchronized (e) {
2280             return loadSync(key, hash, loadingValueReference, loader);
2281           }
2282         } finally {
2283           statsCounter.recordMisses(1);
2284         }
2285       } else {
2286         // The entry already exists. Wait for loading.
2287         return waitForLoadingValue(e, key, valueReference);
2288       }
2289     }
2290 
2291     V waitForLoadingValue(ReferenceEntry<K, V> e, K key, ValueReference<K, V> valueReference)
2292         throws ExecutionException {
2293       if (!valueReference.isLoading()) {
2294         throw new AssertionError();
2295       }
2296 
2297       checkState(!Thread.holdsLock(e), "Recursive load of: %s", key);
2298       // don't consider expiration as we're concurrent with loading
2299       try {
2300         V value = valueReference.waitForValue();
2301         if (value == null) {
2302           throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
2303         }
2304         // re-read ticker now that loading has completed
2305         long now = map.ticker.read();
2306         recordRead(e, now);
2307         return value;
2308       } finally {
2309         statsCounter.recordMisses(1);
2310       }
2311     }
2312 
2313     // at most one of loadSync/loadAsync may be called for any given LoadingValueReference
2314 
2315     V loadSync(K key, int hash, LoadingValueReference<K, V> loadingValueReference,
2316         CacheLoader<? super K, V> loader) throws ExecutionException {
2317       ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
2318       return getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
2319     }
2320 
2321     ListenableFuture<V> loadAsync(final K key, final int hash,
2322         final LoadingValueReference<K, V> loadingValueReference, CacheLoader<? super K, V> loader) {
2323       final ListenableFuture<V> loadingFuture = loadingValueReference.loadFuture(key, loader);
2324       loadingFuture.addListener(
2325           new Runnable() {
2326             @Override
2327             public void run() {
2328               try {
2329                 V newValue = getAndRecordStats(key, hash, loadingValueReference, loadingFuture);
2330               } catch (Throwable t) {
2331                 logger.log(Level.WARNING, "Exception thrown during refresh", t);
2332                 loadingValueReference.setException(t);
2333               }
2334             }
2335           }, sameThreadExecutor);
2336       return loadingFuture;
2337     }
2338 
2339     /**
2340      * Waits uninterruptibly for {@code newValue} to be loaded, and then records loading stats.
2341      */
2342     V getAndRecordStats(K key, int hash, LoadingValueReference<K, V> loadingValueReference,
2343         ListenableFuture<V> newValue) throws ExecutionException {
2344       V value = null;
2345       try {
2346         value = getUninterruptibly(newValue);
2347         if (value == null) {
2348           throw new InvalidCacheLoadException("CacheLoader returned null for key " + key + ".");
2349         }
2350         statsCounter.recordLoadSuccess(loadingValueReference.elapsedNanos());
2351         storeLoadedValue(key, hash, loadingValueReference, value);
2352         return value;
2353       } finally {
2354         if (value == null) {
2355           statsCounter.recordLoadException(loadingValueReference.elapsedNanos());
2356           removeLoadingValue(key, hash, loadingValueReference);
2357         }
2358       }
2359     }
2360 
2361     V scheduleRefresh(ReferenceEntry<K, V> entry, K key, int hash, V oldValue, long now,
2362         CacheLoader<? super K, V> loader) {
2363       if (map.refreshes() && (now - entry.getWriteTime() > map.refreshNanos)
2364           && !entry.getValueReference().isLoading()) {
2365         V newValue = refresh(key, hash, loader, true);
2366         if (newValue != null) {
2367           return newValue;
2368         }
2369       }
2370       return oldValue;
2371     }
2372 
2373     /**
2374      * Refreshes the value associated with {@code key}, unless another thread is already doing so.
2375      * Returns the newly refreshed value associated with {@code key} if it was refreshed inline, or
2376      * {@code null} if another thread is performing the refresh or if an error occurs during
2377      * refresh.
2378      */
2379     @Nullable
2380     V refresh(K key, int hash, CacheLoader<? super K, V> loader, boolean checkTime) {
2381       final LoadingValueReference<K, V> loadingValueReference =
2382           insertLoadingValueReference(key, hash, checkTime);
2383       if (loadingValueReference == null) {
2384         return null;
2385       }
2386 
2387       ListenableFuture<V> result = loadAsync(key, hash, loadingValueReference, loader);
2388       if (result.isDone()) {
2389         try {
2390           return Uninterruptibles.getUninterruptibly(result);
2391         } catch (Throwable t) {
2392           // don't let refresh exceptions propagate; error was already logged
2393         }
2394       }
2395       return null;
2396     }
2397 
2398     /**
2399      * Returns a newly inserted {@code LoadingValueReference}, or null if the live value reference
2400      * is already loading.
2401      */
2402     @Nullable
2403     LoadingValueReference<K, V> insertLoadingValueReference(final K key, final int hash,
2404         boolean checkTime) {
2405       ReferenceEntry<K, V> e = null;
2406       lock();
2407       try {
2408         long now = map.ticker.read();
2409         preWriteCleanup(now);
2410 
2411         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2412         int index = hash & (table.length() - 1);
2413         ReferenceEntry<K, V> first = table.get(index);
2414 
2415         // Look for an existing entry.
2416         for (e = first; e != null; e = e.getNext()) {
2417           K entryKey = e.getKey();
2418           if (e.getHash() == hash && entryKey != null
2419               && map.keyEquivalence.equivalent(key, entryKey)) {
2420             // We found an existing entry.
2421 
2422             ValueReference<K, V> valueReference = e.getValueReference();
2423             if (valueReference.isLoading()
2424                 || (checkTime && (now - e.getWriteTime() < map.refreshNanos))) {
2425               // refresh is a no-op if loading is pending
2426               // if checkTime, we want to check *after* acquiring the lock if refresh still needs
2427               // to be scheduled
2428               return null;
2429             }
2430 
2431             // continue returning old value while loading
2432             ++modCount;
2433             LoadingValueReference<K, V> loadingValueReference =
2434                 new LoadingValueReference<K, V>(valueReference);
2435             e.setValueReference(loadingValueReference);
2436             return loadingValueReference;
2437           }
2438         }
2439 
2440         ++modCount;
2441         LoadingValueReference<K, V> loadingValueReference = new LoadingValueReference<K, V>();
2442         e = newEntry(key, hash, first);
2443         e.setValueReference(loadingValueReference);
2444         table.set(index, e);
2445         return loadingValueReference;
2446       } finally {
2447         unlock();
2448         postWriteCleanup();
2449       }
2450     }
2451 
2452     // reference queues, for garbage collection cleanup
2453 
2454     /**
2455      * Cleanup collected entries when the lock is available.
2456      */
2457     void tryDrainReferenceQueues() {
2458       if (tryLock()) {
2459         try {
2460           drainReferenceQueues();
2461         } finally {
2462           unlock();
2463         }
2464       }
2465     }
2466 
2467     /**
2468      * Drain the key and value reference queues, cleaning up internal entries containing garbage
2469      * collected keys or values.
2470      */
2471     @GuardedBy("Segment.this")
2472     void drainReferenceQueues() {
2473       if (map.usesKeyReferences()) {
2474         drainKeyReferenceQueue();
2475       }
2476       if (map.usesValueReferences()) {
2477         drainValueReferenceQueue();
2478       }
2479     }
2480 
2481     @GuardedBy("Segment.this")
2482     void drainKeyReferenceQueue() {
2483       Reference<? extends K> ref;
2484       int i = 0;
2485       while ((ref = keyReferenceQueue.poll()) != null) {
2486         @SuppressWarnings("unchecked")
2487         ReferenceEntry<K, V> entry = (ReferenceEntry<K, V>) ref;
2488         map.reclaimKey(entry);
2489         if (++i == DRAIN_MAX) {
2490           break;
2491         }
2492       }
2493     }
2494 
2495     @GuardedBy("Segment.this")
2496     void drainValueReferenceQueue() {
2497       Reference<? extends V> ref;
2498       int i = 0;
2499       while ((ref = valueReferenceQueue.poll()) != null) {
2500         @SuppressWarnings("unchecked")
2501         ValueReference<K, V> valueReference = (ValueReference<K, V>) ref;
2502         map.reclaimValue(valueReference);
2503         if (++i == DRAIN_MAX) {
2504           break;
2505         }
2506       }
2507     }
2508 
2509     /**
2510      * Clears all entries from the key and value reference queues.
2511      */
2512     void clearReferenceQueues() {
2513       if (map.usesKeyReferences()) {
2514         clearKeyReferenceQueue();
2515       }
2516       if (map.usesValueReferences()) {
2517         clearValueReferenceQueue();
2518       }
2519     }
2520 
2521     void clearKeyReferenceQueue() {
2522       while (keyReferenceQueue.poll() != null) {}
2523     }
2524 
2525     void clearValueReferenceQueue() {
2526       while (valueReferenceQueue.poll() != null) {}
2527     }
2528 
2529     // recency queue, shared by expiration and eviction
2530 
2531     /**
2532      * Records the relative order in which this read was performed by adding {@code entry} to the
2533      * recency queue. At write-time, or when the queue is full past the threshold, the queue will
2534      * be drained and the entries therein processed.
2535      *
2536      * <p>Note: locked reads should use {@link #recordLockedRead}.
2537      */
2538     void recordRead(ReferenceEntry<K, V> entry, long now) {
2539       if (map.recordsAccess()) {
2540         entry.setAccessTime(now);
2541       }
2542       recencyQueue.add(entry);
2543     }
2544 
2545     /**
2546      * Updates the eviction metadata that {@code entry} was just read. This currently amounts to
2547      * adding {@code entry} to relevant eviction lists.
2548      *
2549      * <p>Note: this method should only be called under lock, as it directly manipulates the
2550      * eviction queues. Unlocked reads should use {@link #recordRead}.
2551      */
2552     @GuardedBy("Segment.this")
2553     void recordLockedRead(ReferenceEntry<K, V> entry, long now) {
2554       if (map.recordsAccess()) {
2555         entry.setAccessTime(now);
2556       }
2557       accessQueue.add(entry);
2558     }
2559 
2560     /**
2561      * Updates eviction metadata that {@code entry} was just written. This currently amounts to
2562      * adding {@code entry} to relevant eviction lists.
2563      */
2564     @GuardedBy("Segment.this")
2565     void recordWrite(ReferenceEntry<K, V> entry, int weight, long now) {
2566       // we are already under lock, so drain the recency queue immediately
2567       drainRecencyQueue();
2568       totalWeight += weight;
2569 
2570       if (map.recordsAccess()) {
2571         entry.setAccessTime(now);
2572       }
2573       if (map.recordsWrite()) {
2574         entry.setWriteTime(now);
2575       }
2576       accessQueue.add(entry);
2577       writeQueue.add(entry);
2578     }
2579 
2580     /**
2581      * Drains the recency queue, updating eviction metadata that the entries therein were read in
2582      * the specified relative order. This currently amounts to adding them to relevant eviction
2583      * lists (accounting for the fact that they could have been removed from the map since being
2584      * added to the recency queue).
2585      */
2586     @GuardedBy("Segment.this")
2587     void drainRecencyQueue() {
2588       ReferenceEntry<K, V> e;
2589       while ((e = recencyQueue.poll()) != null) {
2590         // An entry may be in the recency queue despite it being removed from
2591         // the map . This can occur when the entry was concurrently read while a
2592         // writer is removing it from the segment or after a clear has removed
2593         // all of the segment's entries.
2594         if (accessQueue.contains(e)) {
2595           accessQueue.add(e);
2596         }
2597       }
2598     }
2599 
2600     // expiration
2601 
2602     /**
2603      * Cleanup expired entries when the lock is available.
2604      */
2605     void tryExpireEntries(long now) {
2606       if (tryLock()) {
2607         try {
2608           expireEntries(now);
2609         } finally {
2610           unlock();
2611           // don't call postWriteCleanup as we're in a read
2612         }
2613       }
2614     }
2615 
2616     @GuardedBy("Segment.this")
2617     void expireEntries(long now) {
2618       drainRecencyQueue();
2619 
2620       ReferenceEntry<K, V> e;
2621       while ((e = writeQueue.peek()) != null && map.isExpired(e, now)) {
2622         if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
2623           throw new AssertionError();
2624         }
2625       }
2626       while ((e = accessQueue.peek()) != null && map.isExpired(e, now)) {
2627         if (!removeEntry(e, e.getHash(), RemovalCause.EXPIRED)) {
2628           throw new AssertionError();
2629         }
2630       }
2631     }
2632 
2633     // eviction
2634 
2635     @GuardedBy("Segment.this")
2636     void enqueueNotification(ReferenceEntry<K, V> entry, RemovalCause cause) {
2637       enqueueNotification(entry.getKey(), entry.getHash(), entry.getValueReference(), cause);
2638     }
2639 
2640     @GuardedBy("Segment.this")
2641     void enqueueNotification(@Nullable K key, int hash, ValueReference<K, V> valueReference,
2642         RemovalCause cause) {
2643       totalWeight -= valueReference.getWeight();
2644       if (cause.wasEvicted()) {
2645         statsCounter.recordEviction();
2646       }
2647       if (map.removalNotificationQueue != DISCARDING_QUEUE) {
2648         V value = valueReference.get();
2649         RemovalNotification<K, V> notification = new RemovalNotification<K, V>(key, value, cause);
2650         map.removalNotificationQueue.offer(notification);
2651       }
2652     }
2653 
2654     /**
2655      * Performs eviction if the segment is full. This should only be called prior to adding a new
2656      * entry and increasing {@code count}.
2657      */
2658     @GuardedBy("Segment.this")
2659     void evictEntries() {
2660       if (!map.evictsBySize()) {
2661         return;
2662       }
2663 
2664       drainRecencyQueue();
2665       while (totalWeight > maxSegmentWeight) {
2666         ReferenceEntry<K, V> e = getNextEvictable();
2667         if (!removeEntry(e, e.getHash(), RemovalCause.SIZE)) {
2668           throw new AssertionError();
2669         }
2670       }
2671     }
2672 
2673     // TODO(fry): instead implement this with an eviction head
2674     ReferenceEntry<K, V> getNextEvictable() {
2675       for (ReferenceEntry<K, V> e : accessQueue) {
2676         int weight = e.getValueReference().getWeight();
2677         if (weight > 0) {
2678           return e;
2679         }
2680       }
2681       throw new AssertionError();
2682     }
2683 
2684     /**
2685      * Returns first entry of bin for given hash.
2686      */
2687     ReferenceEntry<K, V> getFirst(int hash) {
2688       // read this volatile field only once
2689       AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2690       return table.get(hash & (table.length() - 1));
2691     }
2692 
2693     // Specialized implementations of map methods
2694 
2695     @Nullable
2696     ReferenceEntry<K, V> getEntry(Object key, int hash) {
2697       for (ReferenceEntry<K, V> e = getFirst(hash); e != null; e = e.getNext()) {
2698         if (e.getHash() != hash) {
2699           continue;
2700         }
2701 
2702         K entryKey = e.getKey();
2703         if (entryKey == null) {
2704           tryDrainReferenceQueues();
2705           continue;
2706         }
2707 
2708         if (map.keyEquivalence.equivalent(key, entryKey)) {
2709           return e;
2710         }
2711       }
2712 
2713       return null;
2714     }
2715 
2716     @Nullable
2717     ReferenceEntry<K, V> getLiveEntry(Object key, int hash, long now) {
2718       ReferenceEntry<K, V> e = getEntry(key, hash);
2719       if (e == null) {
2720         return null;
2721       } else if (map.isExpired(e, now)) {
2722         tryExpireEntries(now);
2723         return null;
2724       }
2725       return e;
2726     }
2727 
2728     /**
2729      * Gets the value from an entry. Returns null if the entry is invalid, partially-collected,
2730      * loading, or expired.
2731      */
2732     V getLiveValue(ReferenceEntry<K, V> entry, long now) {
2733       if (entry.getKey() == null) {
2734         tryDrainReferenceQueues();
2735         return null;
2736       }
2737       V value = entry.getValueReference().get();
2738       if (value == null) {
2739         tryDrainReferenceQueues();
2740         return null;
2741       }
2742 
2743       if (map.isExpired(entry, now)) {
2744         tryExpireEntries(now);
2745         return null;
2746       }
2747       return value;
2748     }
2749 
2750     @Nullable
2751     V get(Object key, int hash) {
2752       try {
2753         if (count != 0) { // read-volatile
2754           long now = map.ticker.read();
2755           ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
2756           if (e == null) {
2757             return null;
2758           }
2759 
2760           V value = e.getValueReference().get();
2761           if (value != null) {
2762             recordRead(e, now);
2763             return scheduleRefresh(e, e.getKey(), hash, value, now, map.defaultLoader);
2764           }
2765           tryDrainReferenceQueues();
2766         }
2767         return null;
2768       } finally {
2769         postReadCleanup();
2770       }
2771     }
2772 
2773     boolean containsKey(Object key, int hash) {
2774       try {
2775         if (count != 0) { // read-volatile
2776           long now = map.ticker.read();
2777           ReferenceEntry<K, V> e = getLiveEntry(key, hash, now);
2778           if (e == null) {
2779             return false;
2780           }
2781           return e.getValueReference().get() != null;
2782         }
2783 
2784         return false;
2785       } finally {
2786         postReadCleanup();
2787       }
2788     }
2789 
2790     /**
2791      * This method is a convenience for testing. Code should call {@link
2792      * LocalCache#containsValue} directly.
2793      */
2794     @VisibleForTesting
2795     boolean containsValue(Object value) {
2796       try {
2797         if (count != 0) { // read-volatile
2798           long now = map.ticker.read();
2799           AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2800           int length = table.length();
2801           for (int i = 0; i < length; ++i) {
2802             for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
2803               V entryValue = getLiveValue(e, now);
2804               if (entryValue == null) {
2805                 continue;
2806               }
2807               if (map.valueEquivalence.equivalent(value, entryValue)) {
2808                 return true;
2809               }
2810             }
2811           }
2812         }
2813 
2814         return false;
2815       } finally {
2816         postReadCleanup();
2817       }
2818     }
2819 
2820     @Nullable
2821     V put(K key, int hash, V value, boolean onlyIfAbsent) {
2822       lock();
2823       try {
2824         long now = map.ticker.read();
2825         preWriteCleanup(now);
2826 
2827         int newCount = this.count + 1;
2828         if (newCount > this.threshold) { // ensure capacity
2829           expand();
2830           newCount = this.count + 1;
2831         }
2832 
2833         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2834         int index = hash & (table.length() - 1);
2835         ReferenceEntry<K, V> first = table.get(index);
2836 
2837         // Look for an existing entry.
2838         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
2839           K entryKey = e.getKey();
2840           if (e.getHash() == hash && entryKey != null
2841               && map.keyEquivalence.equivalent(key, entryKey)) {
2842             // We found an existing entry.
2843 
2844             ValueReference<K, V> valueReference = e.getValueReference();
2845             V entryValue = valueReference.get();
2846 
2847             if (entryValue == null) {
2848               ++modCount;
2849               if (valueReference.isActive()) {
2850                 enqueueNotification(key, hash, valueReference, RemovalCause.COLLECTED);
2851                 setValue(e, key, value, now);
2852                 newCount = this.count; // count remains unchanged
2853               } else {
2854                 setValue(e, key, value, now);
2855                 newCount = this.count + 1;
2856               }
2857               this.count = newCount; // write-volatile
2858               evictEntries();
2859               return null;
2860             } else if (onlyIfAbsent) {
2861               // Mimic
2862               // "if (!map.containsKey(key)) ...
2863               // else return map.get(key);
2864               recordLockedRead(e, now);
2865               return entryValue;
2866             } else {
2867               // clobber existing entry, count remains unchanged
2868               ++modCount;
2869               enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
2870               setValue(e, key, value, now);
2871               evictEntries();
2872               return entryValue;
2873             }
2874           }
2875         }
2876 
2877         // Create a new entry.
2878         ++modCount;
2879         ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
2880         setValue(newEntry, key, value, now);
2881         table.set(index, newEntry);
2882         newCount = this.count + 1;
2883         this.count = newCount; // write-volatile
2884         evictEntries();
2885         return null;
2886       } finally {
2887         unlock();
2888         postWriteCleanup();
2889       }
2890     }
2891 
2892     /**
2893      * Expands the table if possible.
2894      */
2895     @GuardedBy("Segment.this")
2896     void expand() {
2897       AtomicReferenceArray<ReferenceEntry<K, V>> oldTable = table;
2898       int oldCapacity = oldTable.length();
2899       if (oldCapacity >= MAXIMUM_CAPACITY) {
2900         return;
2901       }
2902 
2903       /*
2904        * Reclassify nodes in each list to new Map. Because we are using power-of-two expansion, the
2905        * elements from each bin must either stay at same index, or move with a power of two offset.
2906        * We eliminate unnecessary node creation by catching cases where old nodes can be reused
2907        * because their next fields won't change. Statistically, at the default threshold, only
2908        * about one-sixth of them need cloning when a table doubles. The nodes they replace will be
2909        * garbage collectable as soon as they are no longer referenced by any reader thread that may
2910        * be in the midst of traversing table right now.
2911        */
2912 
2913       int newCount = count;
2914       AtomicReferenceArray<ReferenceEntry<K, V>> newTable = newEntryArray(oldCapacity << 1);
2915       threshold = newTable.length() * 3 / 4;
2916       int newMask = newTable.length() - 1;
2917       for (int oldIndex = 0; oldIndex < oldCapacity; ++oldIndex) {
2918         // We need to guarantee that any existing reads of old Map can
2919         // proceed. So we cannot yet null out each bin.
2920         ReferenceEntry<K, V> head = oldTable.get(oldIndex);
2921 
2922         if (head != null) {
2923           ReferenceEntry<K, V> next = head.getNext();
2924           int headIndex = head.getHash() & newMask;
2925 
2926           // Single node on list
2927           if (next == null) {
2928             newTable.set(headIndex, head);
2929           } else {
2930             // Reuse the consecutive sequence of nodes with the same target
2931             // index from the end of the list. tail points to the first
2932             // entry in the reusable list.
2933             ReferenceEntry<K, V> tail = head;
2934             int tailIndex = headIndex;
2935             for (ReferenceEntry<K, V> e = next; e != null; e = e.getNext()) {
2936               int newIndex = e.getHash() & newMask;
2937               if (newIndex != tailIndex) {
2938                 // The index changed. We'll need to copy the previous entry.
2939                 tailIndex = newIndex;
2940                 tail = e;
2941               }
2942             }
2943             newTable.set(tailIndex, tail);
2944 
2945             // Clone nodes leading up to the tail.
2946             for (ReferenceEntry<K, V> e = head; e != tail; e = e.getNext()) {
2947               int newIndex = e.getHash() & newMask;
2948               ReferenceEntry<K, V> newNext = newTable.get(newIndex);
2949               ReferenceEntry<K, V> newFirst = copyEntry(e, newNext);
2950               if (newFirst != null) {
2951                 newTable.set(newIndex, newFirst);
2952               } else {
2953                 removeCollectedEntry(e);
2954                 newCount--;
2955               }
2956             }
2957           }
2958         }
2959       }
2960       table = newTable;
2961       this.count = newCount;
2962     }
2963 
2964     boolean replace(K key, int hash, V oldValue, V newValue) {
2965       lock();
2966       try {
2967         long now = map.ticker.read();
2968         preWriteCleanup(now);
2969 
2970         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
2971         int index = hash & (table.length() - 1);
2972         ReferenceEntry<K, V> first = table.get(index);
2973 
2974         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
2975           K entryKey = e.getKey();
2976           if (e.getHash() == hash && entryKey != null
2977               && map.keyEquivalence.equivalent(key, entryKey)) {
2978             ValueReference<K, V> valueReference = e.getValueReference();
2979             V entryValue = valueReference.get();
2980             if (entryValue == null) {
2981               if (valueReference.isActive()) {
2982                 // If the value disappeared, this entry is partially collected.
2983                 int newCount = this.count - 1;
2984                 ++modCount;
2985                 ReferenceEntry<K, V> newFirst = removeValueFromChain(
2986                     first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
2987                 newCount = this.count - 1;
2988                 table.set(index, newFirst);
2989                 this.count = newCount; // write-volatile
2990               }
2991               return false;
2992             }
2993 
2994             if (map.valueEquivalence.equivalent(oldValue, entryValue)) {
2995               ++modCount;
2996               enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
2997               setValue(e, key, newValue, now);
2998               evictEntries();
2999               return true;
3000             } else {
3001               // Mimic
3002               // "if (map.containsKey(key) && map.get(key).equals(oldValue))..."
3003               recordLockedRead(e, now);
3004               return false;
3005             }
3006           }
3007         }
3008 
3009         return false;
3010       } finally {
3011         unlock();
3012         postWriteCleanup();
3013       }
3014     }
3015 
3016     @Nullable
3017     V replace(K key, int hash, V newValue) {
3018       lock();
3019       try {
3020         long now = map.ticker.read();
3021         preWriteCleanup(now);
3022 
3023         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3024         int index = hash & (table.length() - 1);
3025         ReferenceEntry<K, V> first = table.get(index);
3026 
3027         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3028           K entryKey = e.getKey();
3029           if (e.getHash() == hash && entryKey != null
3030               && map.keyEquivalence.equivalent(key, entryKey)) {
3031             ValueReference<K, V> valueReference = e.getValueReference();
3032             V entryValue = valueReference.get();
3033             if (entryValue == null) {
3034               if (valueReference.isActive()) {
3035                 // If the value disappeared, this entry is partially collected.
3036                 int newCount = this.count - 1;
3037                 ++modCount;
3038                 ReferenceEntry<K, V> newFirst = removeValueFromChain(
3039                     first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
3040                 newCount = this.count - 1;
3041                 table.set(index, newFirst);
3042                 this.count = newCount; // write-volatile
3043               }
3044               return null;
3045             }
3046 
3047             ++modCount;
3048             enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3049             setValue(e, key, newValue, now);
3050             evictEntries();
3051             return entryValue;
3052           }
3053         }
3054 
3055         return null;
3056       } finally {
3057         unlock();
3058         postWriteCleanup();
3059       }
3060     }
3061 
3062     @Nullable
3063     V remove(Object key, int hash) {
3064       lock();
3065       try {
3066         long now = map.ticker.read();
3067         preWriteCleanup(now);
3068 
3069         int newCount = this.count - 1;
3070         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3071         int index = hash & (table.length() - 1);
3072         ReferenceEntry<K, V> first = table.get(index);
3073 
3074         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3075           K entryKey = e.getKey();
3076           if (e.getHash() == hash && entryKey != null
3077               && map.keyEquivalence.equivalent(key, entryKey)) {
3078             ValueReference<K, V> valueReference = e.getValueReference();
3079             V entryValue = valueReference.get();
3080 
3081             RemovalCause cause;
3082             if (entryValue != null) {
3083               cause = RemovalCause.EXPLICIT;
3084             } else if (valueReference.isActive()) {
3085               cause = RemovalCause.COLLECTED;
3086             } else {
3087               // currently loading
3088               return null;
3089             }
3090 
3091             ++modCount;
3092             ReferenceEntry<K, V> newFirst = removeValueFromChain(
3093                 first, e, entryKey, hash, valueReference, cause);
3094             newCount = this.count - 1;
3095             table.set(index, newFirst);
3096             this.count = newCount; // write-volatile
3097             return entryValue;
3098           }
3099         }
3100 
3101         return null;
3102       } finally {
3103         unlock();
3104         postWriteCleanup();
3105       }
3106     }
3107 
3108     boolean storeLoadedValue(K key, int hash, LoadingValueReference<K, V> oldValueReference,
3109         V newValue) {
3110       lock();
3111       try {
3112         long now = map.ticker.read();
3113         preWriteCleanup(now);
3114 
3115         int newCount = this.count + 1;
3116         if (newCount > this.threshold) { // ensure capacity
3117           expand();
3118           newCount = this.count + 1;
3119         }
3120 
3121         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3122         int index = hash & (table.length() - 1);
3123         ReferenceEntry<K, V> first = table.get(index);
3124 
3125         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3126           K entryKey = e.getKey();
3127           if (e.getHash() == hash && entryKey != null
3128               && map.keyEquivalence.equivalent(key, entryKey)) {
3129             ValueReference<K, V> valueReference = e.getValueReference();
3130             V entryValue = valueReference.get();
3131             // replace the old LoadingValueReference if it's live, otherwise
3132             // perform a putIfAbsent
3133             if (oldValueReference == valueReference
3134                 || (entryValue == null && valueReference != UNSET)) {
3135               ++modCount;
3136               if (oldValueReference.isActive()) {
3137                 RemovalCause cause =
3138                     (entryValue == null) ? RemovalCause.COLLECTED : RemovalCause.REPLACED;
3139                 enqueueNotification(key, hash, oldValueReference, cause);
3140                 newCount--;
3141               }
3142               setValue(e, key, newValue, now);
3143               this.count = newCount; // write-volatile
3144               evictEntries();
3145               return true;
3146             }
3147 
3148             // the loaded value was already clobbered
3149             valueReference = new WeightedStrongValueReference<K, V>(newValue, 0);
3150             enqueueNotification(key, hash, valueReference, RemovalCause.REPLACED);
3151             return false;
3152           }
3153         }
3154 
3155         ++modCount;
3156         ReferenceEntry<K, V> newEntry = newEntry(key, hash, first);
3157         setValue(newEntry, key, newValue, now);
3158         table.set(index, newEntry);
3159         this.count = newCount; // write-volatile
3160         evictEntries();
3161         return true;
3162       } finally {
3163         unlock();
3164         postWriteCleanup();
3165       }
3166     }
3167 
3168     boolean remove(Object key, int hash, Object value) {
3169       lock();
3170       try {
3171         long now = map.ticker.read();
3172         preWriteCleanup(now);
3173 
3174         int newCount = this.count - 1;
3175         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3176         int index = hash & (table.length() - 1);
3177         ReferenceEntry<K, V> first = table.get(index);
3178 
3179         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3180           K entryKey = e.getKey();
3181           if (e.getHash() == hash && entryKey != null
3182               && map.keyEquivalence.equivalent(key, entryKey)) {
3183             ValueReference<K, V> valueReference = e.getValueReference();
3184             V entryValue = valueReference.get();
3185 
3186             RemovalCause cause;
3187             if (map.valueEquivalence.equivalent(value, entryValue)) {
3188               cause = RemovalCause.EXPLICIT;
3189             } else if (entryValue == null && valueReference.isActive()) {
3190               cause = RemovalCause.COLLECTED;
3191             } else {
3192               // currently loading
3193               return false;
3194             }
3195 
3196             ++modCount;
3197             ReferenceEntry<K, V> newFirst = removeValueFromChain(
3198                 first, e, entryKey, hash, valueReference, cause);
3199             newCount = this.count - 1;
3200             table.set(index, newFirst);
3201             this.count = newCount; // write-volatile
3202             return (cause == RemovalCause.EXPLICIT);
3203           }
3204         }
3205 
3206         return false;
3207       } finally {
3208         unlock();
3209         postWriteCleanup();
3210       }
3211     }
3212 
3213     void clear() {
3214       if (count != 0) { // read-volatile
3215         lock();
3216         try {
3217           AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3218           for (int i = 0; i < table.length(); ++i) {
3219             for (ReferenceEntry<K, V> e = table.get(i); e != null; e = e.getNext()) {
3220               // Loading references aren't actually in the map yet.
3221               if (e.getValueReference().isActive()) {
3222                 enqueueNotification(e, RemovalCause.EXPLICIT);
3223               }
3224             }
3225           }
3226           for (int i = 0; i < table.length(); ++i) {
3227             table.set(i, null);
3228           }
3229           clearReferenceQueues();
3230           writeQueue.clear();
3231           accessQueue.clear();
3232           readCount.set(0);
3233 
3234           ++modCount;
3235           count = 0; // write-volatile
3236         } finally {
3237           unlock();
3238           postWriteCleanup();
3239         }
3240       }
3241     }
3242 
3243     @GuardedBy("Segment.this")
3244     @Nullable
3245     ReferenceEntry<K, V> removeValueFromChain(ReferenceEntry<K, V> first,
3246         ReferenceEntry<K, V> entry, @Nullable K key, int hash, ValueReference<K, V> valueReference,
3247         RemovalCause cause) {
3248       enqueueNotification(key, hash, valueReference, cause);
3249       writeQueue.remove(entry);
3250       accessQueue.remove(entry);
3251 
3252       if (valueReference.isLoading()) {
3253         valueReference.notifyNewValue(null);
3254         return first;
3255       } else {
3256         return removeEntryFromChain(first, entry);
3257       }
3258     }
3259 
3260     @GuardedBy("Segment.this")
3261     @Nullable
3262     ReferenceEntry<K, V> removeEntryFromChain(ReferenceEntry<K, V> first,
3263         ReferenceEntry<K, V> entry) {
3264       int newCount = count;
3265       ReferenceEntry<K, V> newFirst = entry.getNext();
3266       for (ReferenceEntry<K, V> e = first; e != entry; e = e.getNext()) {
3267         ReferenceEntry<K, V> next = copyEntry(e, newFirst);
3268         if (next != null) {
3269           newFirst = next;
3270         } else {
3271           removeCollectedEntry(e);
3272           newCount--;
3273         }
3274       }
3275       this.count = newCount;
3276       return newFirst;
3277     }
3278 
3279     @GuardedBy("Segment.this")
3280     void removeCollectedEntry(ReferenceEntry<K, V> entry) {
3281       enqueueNotification(entry, RemovalCause.COLLECTED);
3282       writeQueue.remove(entry);
3283       accessQueue.remove(entry);
3284     }
3285 
3286     /**
3287      * Removes an entry whose key has been garbage collected.
3288      */
3289     boolean reclaimKey(ReferenceEntry<K, V> entry, int hash) {
3290       lock();
3291       try {
3292         int newCount = count - 1;
3293         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3294         int index = hash & (table.length() - 1);
3295         ReferenceEntry<K, V> first = table.get(index);
3296 
3297         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3298           if (e == entry) {
3299             ++modCount;
3300             ReferenceEntry<K, V> newFirst = removeValueFromChain(
3301                 first, e, e.getKey(), hash, e.getValueReference(), RemovalCause.COLLECTED);
3302             newCount = this.count - 1;
3303             table.set(index, newFirst);
3304             this.count = newCount; // write-volatile
3305             return true;
3306           }
3307         }
3308 
3309         return false;
3310       } finally {
3311         unlock();
3312         postWriteCleanup();
3313       }
3314     }
3315 
3316     /**
3317      * Removes an entry whose value has been garbage collected.
3318      */
3319     boolean reclaimValue(K key, int hash, ValueReference<K, V> valueReference) {
3320       lock();
3321       try {
3322         int newCount = this.count - 1;
3323         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3324         int index = hash & (table.length() - 1);
3325         ReferenceEntry<K, V> first = table.get(index);
3326 
3327         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3328           K entryKey = e.getKey();
3329           if (e.getHash() == hash && entryKey != null
3330               && map.keyEquivalence.equivalent(key, entryKey)) {
3331             ValueReference<K, V> v = e.getValueReference();
3332             if (v == valueReference) {
3333               ++modCount;
3334               ReferenceEntry<K, V> newFirst = removeValueFromChain(
3335                   first, e, entryKey, hash, valueReference, RemovalCause.COLLECTED);
3336               newCount = this.count - 1;
3337               table.set(index, newFirst);
3338               this.count = newCount; // write-volatile
3339               return true;
3340             }
3341             return false;
3342           }
3343         }
3344 
3345         return false;
3346       } finally {
3347         unlock();
3348         if (!isHeldByCurrentThread()) { // don't cleanup inside of put
3349           postWriteCleanup();
3350         }
3351       }
3352     }
3353 
3354     boolean removeLoadingValue(K key, int hash, LoadingValueReference<K, V> valueReference) {
3355       lock();
3356       try {
3357         AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3358         int index = hash & (table.length() - 1);
3359         ReferenceEntry<K, V> first = table.get(index);
3360 
3361         for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3362           K entryKey = e.getKey();
3363           if (e.getHash() == hash && entryKey != null
3364               && map.keyEquivalence.equivalent(key, entryKey)) {
3365             ValueReference<K, V> v = e.getValueReference();
3366             if (v == valueReference) {
3367               if (valueReference.isActive()) {
3368                 e.setValueReference(valueReference.getOldValue());
3369               } else {
3370                 ReferenceEntry<K, V> newFirst = removeEntryFromChain(first, e);
3371                 table.set(index, newFirst);
3372               }
3373               return true;
3374             }
3375             return false;
3376           }
3377         }
3378 
3379         return false;
3380       } finally {
3381         unlock();
3382         postWriteCleanup();
3383       }
3384     }
3385 
3386     @GuardedBy("Segment.this")
3387     boolean removeEntry(ReferenceEntry<K, V> entry, int hash, RemovalCause cause) {
3388       int newCount = this.count - 1;
3389       AtomicReferenceArray<ReferenceEntry<K, V>> table = this.table;
3390       int index = hash & (table.length() - 1);
3391       ReferenceEntry<K, V> first = table.get(index);
3392 
3393       for (ReferenceEntry<K, V> e = first; e != null; e = e.getNext()) {
3394         if (e == entry) {
3395           ++modCount;
3396           ReferenceEntry<K, V> newFirst = removeValueFromChain(
3397               first, e, e.getKey(), hash, e.getValueReference(), cause);
3398           newCount = this.count - 1;
3399           table.set(index, newFirst);
3400           this.count = newCount; // write-volatile
3401           return true;
3402         }
3403       }
3404 
3405       return false;
3406     }
3407 
3408     /**
3409      * Performs routine cleanup following a read. Normally cleanup happens during writes. If cleanup
3410      * is not observed after a sufficient number of reads, try cleaning up from the read thread.
3411      */
3412     void postReadCleanup() {
3413       if ((readCount.incrementAndGet() & DRAIN_THRESHOLD) == 0) {
3414         cleanUp();
3415       }
3416     }
3417 
3418     /**
3419      * Performs routine cleanup prior to executing a write. This should be called every time a
3420      * write thread acquires the segment lock, immediately after acquiring the lock.
3421      *
3422      * <p>Post-condition: expireEntries has been run.
3423      */
3424     @GuardedBy("Segment.this")
3425     void preWriteCleanup(long now) {
3426       runLockedCleanup(now);
3427     }
3428 
3429     /**
3430      * Performs routine cleanup following a write.
3431      */
3432     void postWriteCleanup() {
3433       runUnlockedCleanup();
3434     }
3435 
3436     void cleanUp() {
3437       long now = map.ticker.read();
3438       runLockedCleanup(now);
3439       runUnlockedCleanup();
3440     }
3441 
3442     void runLockedCleanup(long now) {
3443       if (tryLock()) {
3444         try {
3445           drainReferenceQueues();
3446           expireEntries(now); // calls drainRecencyQueue
3447           readCount.set(0);
3448         } finally {
3449           unlock();
3450         }
3451       }
3452     }
3453 
3454     void runUnlockedCleanup() {
3455       // locked cleanup may generate notifications we can send unlocked
3456       if (!isHeldByCurrentThread()) {
3457         map.processPendingNotifications();
3458       }
3459     }
3460 
3461   }
3462 
3463   static class LoadingValueReference<K, V> implements ValueReference<K, V> {
3464     volatile ValueReference<K, V> oldValue;
3465 
3466     // TODO(fry): rename get, then extend AbstractFuture instead of containing SettableFuture
3467     final SettableFuture<V> futureValue = SettableFuture.create();
3468     final Stopwatch stopwatch = Stopwatch.createUnstarted();
3469 
3470     public LoadingValueReference() {
3471       this(LocalCache.<K, V>unset());
3472     }
3473 
3474     public LoadingValueReference(ValueReference<K, V> oldValue) {
3475       this.oldValue = oldValue;
3476     }
3477 
3478     @Override
3479     public boolean isLoading() {
3480       return true;
3481     }
3482 
3483     @Override
3484     public boolean isActive() {
3485       return oldValue.isActive();
3486     }
3487 
3488     @Override
3489     public int getWeight() {
3490       return oldValue.getWeight();
3491     }
3492 
3493     public boolean set(@Nullable V newValue) {
3494       return futureValue.set(newValue);
3495     }
3496 
3497     public boolean setException(Throwable t) {
3498       return futureValue.setException(t);
3499     }
3500 
3501     private ListenableFuture<V> fullyFailedFuture(Throwable t) {
3502       return Futures.immediateFailedFuture(t);
3503     }
3504 
3505     @Override
3506     public void notifyNewValue(@Nullable V newValue) {
3507       if (newValue != null) {
3508         // The pending load was clobbered by a manual write.
3509         // Unblock all pending gets, and have them return the new value.
3510         set(newValue);
3511       } else {
3512         // The pending load was removed. Delay notifications until loading completes.
3513         oldValue = unset();
3514       }
3515 
3516       // TODO(fry): could also cancel loading if we had a handle on its future
3517     }
3518 
3519     public ListenableFuture<V> loadFuture(K key, CacheLoader<? super K, V> loader) {
3520       stopwatch.start();
3521       V previousValue = oldValue.get();
3522       try {
3523         if (previousValue == null) {
3524           V newValue = loader.load(key);
3525           return set(newValue) ? futureValue : Futures.immediateFuture(newValue);
3526         }
3527         ListenableFuture<V> newValue = loader.reload(key, previousValue);
3528         if (newValue == null) {
3529           return Futures.immediateFuture(null);
3530         }
3531         // To avoid a race, make sure the refreshed value is set into loadingValueReference
3532         // *before* returning newValue from the cache query.
3533         return Futures.transform(newValue, new Function<V, V>() {
3534           @Override
3535           public V apply(V newValue) {
3536             LoadingValueReference.this.set(newValue);
3537             return newValue;
3538           }
3539         });
3540       } catch (Throwable t) {
3541         if (t instanceof InterruptedException) {
3542           Thread.currentThread().interrupt();
3543         }
3544         return setException(t) ? futureValue : fullyFailedFuture(t);
3545       }
3546     }
3547 
3548     public long elapsedNanos() {
3549       return stopwatch.elapsed(NANOSECONDS);
3550     }
3551 
3552     @Override
3553     public V waitForValue() throws ExecutionException {
3554       return getUninterruptibly(futureValue);
3555     }
3556 
3557     @Override
3558     public V get() {
3559       return oldValue.get();
3560     }
3561 
3562     public ValueReference<K, V> getOldValue() {
3563       return oldValue;
3564     }
3565 
3566     @Override
3567     public ReferenceEntry<K, V> getEntry() {
3568       return null;
3569     }
3570 
3571     @Override
3572     public ValueReference<K, V> copyFor(
3573         ReferenceQueue<V> queue, @Nullable V value, ReferenceEntry<K, V> entry) {
3574       return this;
3575     }
3576   }
3577 
3578   // Queues
3579 
3580   /**
3581    * A custom queue for managing eviction order. Note that this is tightly integrated with {@code
3582    * ReferenceEntry}, upon which it relies to perform its linking.
3583    *
3584    * <p>Note that this entire implementation makes the assumption that all elements which are in
3585    * the map are also in this queue, and that all elements not in the queue are not in the map.
3586    *
3587    * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle
3588    * of the queue as part of copyWriteEntry, and (2) the contains method is highly optimized
3589    * for the current model.
3590    */
3591   static final class WriteQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> {
3592     final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() {
3593 
3594       @Override
3595       public long getWriteTime() {
3596         return Long.MAX_VALUE;
3597       }
3598 
3599       @Override
3600       public void setWriteTime(long time) {}
3601 
3602       ReferenceEntry<K, V> nextWrite = this;
3603 
3604       @Override
3605       public ReferenceEntry<K, V> getNextInWriteQueue() {
3606         return nextWrite;
3607       }
3608 
3609       @Override
3610       public void setNextInWriteQueue(ReferenceEntry<K, V> next) {
3611         this.nextWrite = next;
3612       }
3613 
3614       ReferenceEntry<K, V> previousWrite = this;
3615 
3616       @Override
3617       public ReferenceEntry<K, V> getPreviousInWriteQueue() {
3618         return previousWrite;
3619       }
3620 
3621       @Override
3622       public void setPreviousInWriteQueue(ReferenceEntry<K, V> previous) {
3623         this.previousWrite = previous;
3624       }
3625     };
3626 
3627     // implements Queue
3628 
3629     @Override
3630     public boolean offer(ReferenceEntry<K, V> entry) {
3631       // unlink
3632       connectWriteOrder(entry.getPreviousInWriteQueue(), entry.getNextInWriteQueue());
3633 
3634       // add to tail
3635       connectWriteOrder(head.getPreviousInWriteQueue(), entry);
3636       connectWriteOrder(entry, head);
3637 
3638       return true;
3639     }
3640 
3641     @Override
3642     public ReferenceEntry<K, V> peek() {
3643       ReferenceEntry<K, V> next = head.getNextInWriteQueue();
3644       return (next == head) ? null : next;
3645     }
3646 
3647     @Override
3648     public ReferenceEntry<K, V> poll() {
3649       ReferenceEntry<K, V> next = head.getNextInWriteQueue();
3650       if (next == head) {
3651         return null;
3652       }
3653 
3654       remove(next);
3655       return next;
3656     }
3657 
3658     @Override
3659     @SuppressWarnings("unchecked")
3660     public boolean remove(Object o) {
3661       ReferenceEntry<K, V> e = (ReferenceEntry) o;
3662       ReferenceEntry<K, V> previous = e.getPreviousInWriteQueue();
3663       ReferenceEntry<K, V> next = e.getNextInWriteQueue();
3664       connectWriteOrder(previous, next);
3665       nullifyWriteOrder(e);
3666 
3667       return next != NullEntry.INSTANCE;
3668     }
3669 
3670     @Override
3671     @SuppressWarnings("unchecked")
3672     public boolean contains(Object o) {
3673       ReferenceEntry<K, V> e = (ReferenceEntry) o;
3674       return e.getNextInWriteQueue() != NullEntry.INSTANCE;
3675     }
3676 
3677     @Override
3678     public boolean isEmpty() {
3679       return head.getNextInWriteQueue() == head;
3680     }
3681 
3682     @Override
3683     public int size() {
3684       int size = 0;
3685       for (ReferenceEntry<K, V> e = head.getNextInWriteQueue(); e != head;
3686           e = e.getNextInWriteQueue()) {
3687         size++;
3688       }
3689       return size;
3690     }
3691 
3692     @Override
3693     public void clear() {
3694       ReferenceEntry<K, V> e = head.getNextInWriteQueue();
3695       while (e != head) {
3696         ReferenceEntry<K, V> next = e.getNextInWriteQueue();
3697         nullifyWriteOrder(e);
3698         e = next;
3699       }
3700 
3701       head.setNextInWriteQueue(head);
3702       head.setPreviousInWriteQueue(head);
3703     }
3704 
3705     @Override
3706     public Iterator<ReferenceEntry<K, V>> iterator() {
3707       return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) {
3708         @Override
3709         protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) {
3710           ReferenceEntry<K, V> next = previous.getNextInWriteQueue();
3711           return (next == head) ? null : next;
3712         }
3713       };
3714     }
3715   }
3716 
3717   /**
3718    * A custom queue for managing access order. Note that this is tightly integrated with
3719    * {@code ReferenceEntry}, upon which it reliese to perform its linking.
3720    *
3721    * <p>Note that this entire implementation makes the assumption that all elements which are in
3722    * the map are also in this queue, and that all elements not in the queue are not in the map.
3723    *
3724    * <p>The benefits of creating our own queue are that (1) we can replace elements in the middle
3725    * of the queue as part of copyWriteEntry, and (2) the contains method is highly optimized
3726    * for the current model.
3727    */
3728   static final class AccessQueue<K, V> extends AbstractQueue<ReferenceEntry<K, V>> {
3729     final ReferenceEntry<K, V> head = new AbstractReferenceEntry<K, V>() {
3730 
3731       @Override
3732       public long getAccessTime() {
3733         return Long.MAX_VALUE;
3734       }
3735 
3736       @Override
3737       public void setAccessTime(long time) {}
3738 
3739       ReferenceEntry<K, V> nextAccess = this;
3740 
3741       @Override
3742       public ReferenceEntry<K, V> getNextInAccessQueue() {
3743         return nextAccess;
3744       }
3745 
3746       @Override
3747       public void setNextInAccessQueue(ReferenceEntry<K, V> next) {
3748         this.nextAccess = next;
3749       }
3750 
3751       ReferenceEntry<K, V> previousAccess = this;
3752 
3753       @Override
3754       public ReferenceEntry<K, V> getPreviousInAccessQueue() {
3755         return previousAccess;
3756       }
3757 
3758       @Override
3759       public void setPreviousInAccessQueue(ReferenceEntry<K, V> previous) {
3760         this.previousAccess = previous;
3761       }
3762     };
3763 
3764     // implements Queue
3765 
3766     @Override
3767     public boolean offer(ReferenceEntry<K, V> entry) {
3768       // unlink
3769       connectAccessOrder(entry.getPreviousInAccessQueue(), entry.getNextInAccessQueue());
3770 
3771       // add to tail
3772       connectAccessOrder(head.getPreviousInAccessQueue(), entry);
3773       connectAccessOrder(entry, head);
3774 
3775       return true;
3776     }
3777 
3778     @Override
3779     public ReferenceEntry<K, V> peek() {
3780       ReferenceEntry<K, V> next = head.getNextInAccessQueue();
3781       return (next == head) ? null : next;
3782     }
3783 
3784     @Override
3785     public ReferenceEntry<K, V> poll() {
3786       ReferenceEntry<K, V> next = head.getNextInAccessQueue();
3787       if (next == head) {
3788         return null;
3789       }
3790 
3791       remove(next);
3792       return next;
3793     }
3794 
3795     @Override
3796     @SuppressWarnings("unchecked")
3797     public boolean remove(Object o) {
3798       ReferenceEntry<K, V> e = (ReferenceEntry) o;
3799       ReferenceEntry<K, V> previous = e.getPreviousInAccessQueue();
3800       ReferenceEntry<K, V> next = e.getNextInAccessQueue();
3801       connectAccessOrder(previous, next);
3802       nullifyAccessOrder(e);
3803 
3804       return next != NullEntry.INSTANCE;
3805     }
3806 
3807     @Override
3808     @SuppressWarnings("unchecked")
3809     public boolean contains(Object o) {
3810       ReferenceEntry<K, V> e = (ReferenceEntry) o;
3811       return e.getNextInAccessQueue() != NullEntry.INSTANCE;
3812     }
3813 
3814     @Override
3815     public boolean isEmpty() {
3816       return head.getNextInAccessQueue() == head;
3817     }
3818 
3819     @Override
3820     public int size() {
3821       int size = 0;
3822       for (ReferenceEntry<K, V> e = head.getNextInAccessQueue(); e != head;
3823           e = e.getNextInAccessQueue()) {
3824         size++;
3825       }
3826       return size;
3827     }
3828 
3829     @Override
3830     public void clear() {
3831       ReferenceEntry<K, V> e = head.getNextInAccessQueue();
3832       while (e != head) {
3833         ReferenceEntry<K, V> next = e.getNextInAccessQueue();
3834         nullifyAccessOrder(e);
3835         e = next;
3836       }
3837 
3838       head.setNextInAccessQueue(head);
3839       head.setPreviousInAccessQueue(head);
3840     }
3841 
3842     @Override
3843     public Iterator<ReferenceEntry<K, V>> iterator() {
3844       return new AbstractSequentialIterator<ReferenceEntry<K, V>>(peek()) {
3845         @Override
3846         protected ReferenceEntry<K, V> computeNext(ReferenceEntry<K, V> previous) {
3847           ReferenceEntry<K, V> next = previous.getNextInAccessQueue();
3848           return (next == head) ? null : next;
3849         }
3850       };
3851     }
3852   }
3853 
3854   // Cache support
3855 
3856   public void cleanUp() {
3857     for (Segment<?, ?> segment : segments) {
3858       segment.cleanUp();
3859     }
3860   }
3861 
3862   // ConcurrentMap methods
3863 
3864   @Override
3865   public boolean isEmpty() {
3866     /*
3867      * Sum per-segment modCounts to avoid mis-reporting when elements are concurrently added and
3868      * removed in one segment while checking another, in which case the table was never actually
3869      * empty at any point. (The sum ensures accuracy up through at least 1<<31 per-segment
3870      * modifications before recheck.)  Method containsValue() uses similar constructions for
3871      * stability checks.
3872      */
3873     long sum = 0L;
3874     Segment<K, V>[] segments = this.segments;
3875     for (int i = 0; i < segments.length; ++i) {
3876       if (segments[i].count != 0) {
3877         return false;
3878       }
3879       sum += segments[i].modCount;
3880     }
3881 
3882     if (sum != 0L) { // recheck unless no modifications
3883       for (int i = 0; i < segments.length; ++i) {
3884         if (segments[i].count != 0) {
3885           return false;
3886         }
3887         sum -= segments[i].modCount;
3888       }
3889       if (sum != 0L) {
3890         return false;
3891       }
3892     }
3893     return true;
3894   }
3895 
3896   long longSize() {
3897     Segment<K, V>[] segments = this.segments;
3898     long sum = 0;
3899     for (int i = 0; i < segments.length; ++i) {
3900       sum += segments[i].count;
3901     }
3902     return sum;
3903   }
3904 
3905   @Override
3906   public int size() {
3907     return Ints.saturatedCast(longSize());
3908   }
3909 
3910   @Override
3911   @Nullable
3912   public V get(@Nullable Object key) {
3913     if (key == null) {
3914       return null;
3915     }
3916     int hash = hash(key);
3917     return segmentFor(hash).get(key, hash);
3918   }
3919 
3920   @Nullable
3921   public V getIfPresent(Object key) {
3922     int hash = hash(checkNotNull(key));
3923     V value = segmentFor(hash).get(key, hash);
3924     if (value == null) {
3925       globalStatsCounter.recordMisses(1);
3926     } else {
3927       globalStatsCounter.recordHits(1);
3928     }
3929     return value;
3930   }
3931 
3932   V get(K key, CacheLoader<? super K, V> loader) throws ExecutionException {
3933     int hash = hash(checkNotNull(key));
3934     return segmentFor(hash).get(key, hash, loader);
3935   }
3936 
3937   V getOrLoad(K key) throws ExecutionException {
3938     return get(key, defaultLoader);
3939   }
3940 
3941   ImmutableMap<K, V> getAllPresent(Iterable<?> keys) {
3942     int hits = 0;
3943     int misses = 0;
3944 
3945     Map<K, V> result = Maps.newLinkedHashMap();
3946     for (Object key : keys) {
3947       V value = get(key);
3948       if (value == null) {
3949         misses++;
3950       } else {
3951         // TODO(fry): store entry key instead of query key
3952         @SuppressWarnings("unchecked")
3953         K castKey = (K) key;
3954         result.put(castKey, value);
3955         hits++;
3956       }
3957     }
3958     globalStatsCounter.recordHits(hits);
3959     globalStatsCounter.recordMisses(misses);
3960     return ImmutableMap.copyOf(result);
3961   }
3962 
3963   ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
3964     int hits = 0;
3965     int misses = 0;
3966 
3967     Map<K, V> result = Maps.newLinkedHashMap();
3968     Set<K> keysToLoad = Sets.newLinkedHashSet();
3969     for (K key : keys) {
3970       V value = get(key);
3971       if (!result.containsKey(key)) {
3972         result.put(key, value);
3973         if (value == null) {
3974           misses++;
3975           keysToLoad.add(key);
3976         } else {
3977           hits++;
3978         }
3979       }
3980     }
3981 
3982     try {
3983       if (!keysToLoad.isEmpty()) {
3984         try {
3985           Map<K, V> newEntries = loadAll(keysToLoad, defaultLoader);
3986           for (K key : keysToLoad) {
3987             V value = newEntries.get(key);
3988             if (value == null) {
3989               throw new InvalidCacheLoadException("loadAll failed to return a value for " + key);
3990             }
3991             result.put(key, value);
3992           }
3993         } catch (UnsupportedLoadingOperationException e) {
3994           // loadAll not implemented, fallback to load
3995           for (K key : keysToLoad) {
3996             misses--; // get will count this miss
3997             result.put(key, get(key, defaultLoader));
3998           }
3999         }
4000       }
4001       return ImmutableMap.copyOf(result);
4002     } finally {
4003       globalStatsCounter.recordHits(hits);
4004       globalStatsCounter.recordMisses(misses);
4005     }
4006   }
4007 
4008   /**
4009    * Returns the result of calling {@link CacheLoader#loadAll}, or null if {@code loader} doesn't
4010    * implement {@code loadAll}.
4011    */
4012   @Nullable
4013   Map<K, V> loadAll(Set<? extends K> keys, CacheLoader<? super K, V> loader)
4014       throws ExecutionException {
4015     checkNotNull(loader);
4016     checkNotNull(keys);
4017     Stopwatch stopwatch = Stopwatch.createStarted();
4018     Map<K, V> result;
4019     boolean success = false;
4020     try {
4021       @SuppressWarnings("unchecked") // safe since all keys extend K
4022       Map<K, V> map = (Map<K, V>) loader.loadAll(keys);
4023       result = map;
4024       success = true;
4025     } catch (UnsupportedLoadingOperationException e) {
4026       success = true;
4027       throw e;
4028     } catch (InterruptedException e) {
4029       Thread.currentThread().interrupt();
4030       throw new ExecutionException(e);
4031     } catch (RuntimeException e) {
4032       throw new UncheckedExecutionException(e);
4033     } catch (Exception e) {
4034       throw new ExecutionException(e);
4035     } catch (Error e) {
4036       throw new ExecutionError(e);
4037     } finally {
4038       if (!success) {
4039         globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
4040       }
4041     }
4042 
4043     if (result == null) {
4044       globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
4045       throw new InvalidCacheLoadException(loader + " returned null map from loadAll");
4046     }
4047 
4048     stopwatch.stop();
4049     // TODO(fry): batch by segment
4050     boolean nullsPresent = false;
4051     for (Map.Entry<K, V> entry : result.entrySet()) {
4052       K key = entry.getKey();
4053       V value = entry.getValue();
4054       if (key == null || value == null) {
4055         // delay failure until non-null entries are stored
4056         nullsPresent = true;
4057       } else {
4058         put(key, value);
4059       }
4060     }
4061 
4062     if (nullsPresent) {
4063       globalStatsCounter.recordLoadException(stopwatch.elapsed(NANOSECONDS));
4064       throw new InvalidCacheLoadException(loader + " returned null keys or values from loadAll");
4065     }
4066 
4067     // TODO(fry): record count of loaded entries
4068     globalStatsCounter.recordLoadSuccess(stopwatch.elapsed(NANOSECONDS));
4069     return result;
4070   }
4071 
4072   /**
4073    * Returns the internal entry for the specified key. The entry may be loading, expired, or
4074    * partially collected.
4075    */
4076   ReferenceEntry<K, V> getEntry(@Nullable Object key) {
4077     // does not impact recency ordering
4078     if (key == null) {
4079       return null;
4080     }
4081     int hash = hash(key);
4082     return segmentFor(hash).getEntry(key, hash);
4083   }
4084 
4085   void refresh(K key) {
4086     int hash = hash(checkNotNull(key));
4087     segmentFor(hash).refresh(key, hash, defaultLoader, false);
4088   }
4089 
4090   @Override
4091   public boolean containsKey(@Nullable Object key) {
4092     // does not impact recency ordering
4093     if (key == null) {
4094       return false;
4095     }
4096     int hash = hash(key);
4097     return segmentFor(hash).containsKey(key, hash);
4098   }
4099 
4100   @Override
4101   public boolean containsValue(@Nullable Object value) {
4102     // does not impact recency ordering
4103     if (value == null) {
4104       return false;
4105     }
4106 
4107     // This implementation is patterned after ConcurrentHashMap, but without the locking. The only
4108     // way for it to return a false negative would be for the target value to jump around in the map
4109     // such that none of the subsequent iterations observed it, despite the fact that at every point
4110     // in time it was present somewhere int the map. This becomes increasingly unlikely as
4111     // CONTAINS_VALUE_RETRIES increases, though without locking it is theoretically possible.
4112     long now = ticker.read();
4113     final Segment<K,V>[] segments = this.segments;
4114     long last = -1L;
4115     for (int i = 0; i < CONTAINS_VALUE_RETRIES; i++) {
4116       long sum = 0L;
4117       for (Segment<K, V> segment : segments) {
4118         // ensure visibility of most recent completed write
4119         @SuppressWarnings({"UnusedDeclaration", "unused"})
4120         int c = segment.count; // read-volatile
4121 
4122         AtomicReferenceArray<ReferenceEntry<K, V>> table = segment.table;
4123         for (int j = 0 ; j < table.length(); j++) {
4124           for (ReferenceEntry<K, V> e = table.get(j); e != null; e = e.getNext()) {
4125             V v = segment.getLiveValue(e, now);
4126             if (v != null && valueEquivalence.equivalent(value, v)) {
4127               return true;
4128             }
4129           }
4130         }
4131         sum += segment.modCount;
4132       }
4133       if (sum == last) {
4134         break;
4135       }
4136       last = sum;
4137     }
4138     return false;
4139   }
4140 
4141   @Override
4142   public V put(K key, V value) {
4143     checkNotNull(key);
4144     checkNotNull(value);
4145     int hash = hash(key);
4146     return segmentFor(hash).put(key, hash, value, false);
4147   }
4148 
4149   @Override
4150   public V putIfAbsent(K key, V value) {
4151     checkNotNull(key);
4152     checkNotNull(value);
4153     int hash = hash(key);
4154     return segmentFor(hash).put(key, hash, value, true);
4155   }
4156 
4157   @Override
4158   public void putAll(Map<? extends K, ? extends V> m) {
4159     for (Entry<? extends K, ? extends V> e : m.entrySet()) {
4160       put(e.getKey(), e.getValue());
4161     }
4162   }
4163 
4164   @Override
4165   public V remove(@Nullable Object key) {
4166     if (key == null) {
4167       return null;
4168     }
4169     int hash = hash(key);
4170     return segmentFor(hash).remove(key, hash);
4171   }
4172 
4173   @Override
4174   public boolean remove(@Nullable Object key, @Nullable Object value) {
4175     if (key == null || value == null) {
4176       return false;
4177     }
4178     int hash = hash(key);
4179     return segmentFor(hash).remove(key, hash, value);
4180   }
4181 
4182   @Override
4183   public boolean replace(K key, @Nullable V oldValue, V newValue) {
4184     checkNotNull(key);
4185     checkNotNull(newValue);
4186     if (oldValue == null) {
4187       return false;
4188     }
4189     int hash = hash(key);
4190     return segmentFor(hash).replace(key, hash, oldValue, newValue);
4191   }
4192 
4193   @Override
4194   public V replace(K key, V value) {
4195     checkNotNull(key);
4196     checkNotNull(value);
4197     int hash = hash(key);
4198     return segmentFor(hash).replace(key, hash, value);
4199   }
4200 
4201   @Override
4202   public void clear() {
4203     for (Segment<K, V> segment : segments) {
4204       segment.clear();
4205     }
4206   }
4207 
4208   void invalidateAll(Iterable<?> keys) {
4209     // TODO(fry): batch by segment
4210     for (Object key : keys) {
4211       remove(key);
4212     }
4213   }
4214 
4215   Set<K> keySet;
4216 
4217   @Override
4218   public Set<K> keySet() {
4219     // does not impact recency ordering
4220     Set<K> ks = keySet;
4221     return (ks != null) ? ks : (keySet = new KeySet(this));
4222   }
4223 
4224   Collection<V> values;
4225 
4226   @Override
4227   public Collection<V> values() {
4228     // does not impact recency ordering
4229     Collection<V> vs = values;
4230     return (vs != null) ? vs : (values = new Values(this));
4231   }
4232 
4233   Set<Entry<K, V>> entrySet;
4234 
4235   @Override
4236   @GwtIncompatible("Not supported.")
4237   public Set<Entry<K, V>> entrySet() {
4238     // does not impact recency ordering
4239     Set<Entry<K, V>> es = entrySet;
4240     return (es != null) ? es : (entrySet = new EntrySet(this));
4241   }
4242 
4243   // Iterator Support
4244 
4245   abstract class HashIterator<T> implements Iterator<T> {
4246 
4247     int nextSegmentIndex;
4248     int nextTableIndex;
4249     Segment<K, V> currentSegment;
4250     AtomicReferenceArray<ReferenceEntry<K, V>> currentTable;
4251     ReferenceEntry<K, V> nextEntry;
4252     WriteThroughEntry nextExternal;
4253     WriteThroughEntry lastReturned;
4254 
4255     HashIterator() {
4256       nextSegmentIndex = segments.length - 1;
4257       nextTableIndex = -1;
4258       advance();
4259     }
4260 
4261     @Override
4262     public abstract T next();
4263 
4264     final void advance() {
4265       nextExternal = null;
4266 
4267       if (nextInChain()) {
4268         return;
4269       }
4270 
4271       if (nextInTable()) {
4272         return;
4273       }
4274 
4275       while (nextSegmentIndex >= 0) {
4276         currentSegment = segments[nextSegmentIndex--];
4277         if (currentSegment.count != 0) {
4278           currentTable = currentSegment.table;
4279           nextTableIndex = currentTable.length() - 1;
4280           if (nextInTable()) {
4281             return;
4282           }
4283         }
4284       }
4285     }
4286 
4287     /**
4288      * Finds the next entry in the current chain. Returns true if an entry was found.
4289      */
4290     boolean nextInChain() {
4291       if (nextEntry != null) {
4292         for (nextEntry = nextEntry.getNext(); nextEntry != null; nextEntry = nextEntry.getNext()) {
4293           if (advanceTo(nextEntry)) {
4294             return true;
4295           }
4296         }
4297       }
4298       return false;
4299     }
4300 
4301     /**
4302      * Finds the next entry in the current table. Returns true if an entry was found.
4303      */
4304     boolean nextInTable() {
4305       while (nextTableIndex >= 0) {
4306         if ((nextEntry = currentTable.get(nextTableIndex--)) != null) {
4307           if (advanceTo(nextEntry) || nextInChain()) {
4308             return true;
4309           }
4310         }
4311       }
4312       return false;
4313     }
4314 
4315     /**
4316      * Advances to the given entry. Returns true if the entry was valid, false if it should be
4317      * skipped.
4318      */
4319     boolean advanceTo(ReferenceEntry<K, V> entry) {
4320       try {
4321         long now = ticker.read();
4322         K key = entry.getKey();
4323         V value = getLiveValue(entry, now);
4324         if (value != null) {
4325           nextExternal = new WriteThroughEntry(key, value);
4326           return true;
4327         } else {
4328           // Skip stale entry.
4329           return false;
4330         }
4331       } finally {
4332         currentSegment.postReadCleanup();
4333       }
4334     }
4335 
4336     @Override
4337     public boolean hasNext() {
4338       return nextExternal != null;
4339     }
4340 
4341     WriteThroughEntry nextEntry() {
4342       if (nextExternal == null) {
4343         throw new NoSuchElementException();
4344       }
4345       lastReturned = nextExternal;
4346       advance();
4347       return lastReturned;
4348     }
4349 
4350     @Override
4351     public void remove() {
4352       checkState(lastReturned != null);
4353       LocalCache.this.remove(lastReturned.getKey());
4354       lastReturned = null;
4355     }
4356   }
4357 
4358   final class KeyIterator extends HashIterator<K> {
4359 
4360     @Override
4361     public K next() {
4362       return nextEntry().getKey();
4363     }
4364   }
4365 
4366   final class ValueIterator extends HashIterator<V> {
4367 
4368     @Override
4369     public V next() {
4370       return nextEntry().getValue();
4371     }
4372   }
4373 
4374   /**
4375    * Custom Entry class used by EntryIterator.next(), that relays setValue changes to the
4376    * underlying map.
4377    */
4378   final class WriteThroughEntry implements Entry<K, V> {
4379     final K key; // non-null
4380     V value; // non-null
4381 
4382     WriteThroughEntry(K key, V value) {
4383       this.key = key;
4384       this.value = value;
4385     }
4386 
4387     @Override
4388     public K getKey() {
4389       return key;
4390     }
4391 
4392     @Override
4393     public V getValue() {
4394       return value;
4395     }
4396 
4397     @Override
4398     public boolean equals(@Nullable Object object) {
4399       // Cannot use key and value equivalence
4400       if (object instanceof Entry) {
4401         Entry<?, ?> that = (Entry<?, ?>) object;
4402         return key.equals(that.getKey()) && value.equals(that.getValue());
4403       }
4404       return false;
4405     }
4406 
4407     @Override
4408     public int hashCode() {
4409       // Cannot use key and value equivalence
4410       return key.hashCode() ^ value.hashCode();
4411     }
4412 
4413     @Override
4414     public V setValue(V newValue) {
4415       throw new UnsupportedOperationException();
4416     }
4417 
4418     /**
4419      * Returns a string representation of the form <code>{key}={value}</code>.
4420      */
4421     @Override public String toString() {
4422       return getKey() + "=" + getValue();
4423     }
4424   }
4425 
4426   final class EntryIterator extends HashIterator<Entry<K, V>> {
4427 
4428     @Override
4429     public Entry<K, V> next() {
4430       return nextEntry();
4431     }
4432   }
4433 
4434   abstract class AbstractCacheSet<T> extends AbstractSet<T> {
4435     final ConcurrentMap<?, ?> map;
4436 
4437     AbstractCacheSet(ConcurrentMap<?, ?> map) {
4438       this.map = map;
4439     }
4440 
4441     @Override
4442     public int size() {
4443       return map.size();
4444     }
4445 
4446     @Override
4447     public boolean isEmpty() {
4448       return map.isEmpty();
4449     }
4450 
4451     @Override
4452     public void clear() {
4453       map.clear();
4454     }
4455   }
4456 
4457   final class KeySet extends AbstractCacheSet<K> {
4458 
4459     KeySet(ConcurrentMap<?, ?> map) {
4460       super(map);
4461     }
4462 
4463     @Override
4464     public Iterator<K> iterator() {
4465       return new KeyIterator();
4466     }
4467 
4468     @Override
4469     public boolean contains(Object o) {
4470       return map.containsKey(o);
4471     }
4472 
4473     @Override
4474     public boolean remove(Object o) {
4475       return map.remove(o) != null;
4476     }
4477   }
4478 
4479   final class Values extends AbstractCollection<V> {
4480     private final ConcurrentMap<?, ?> map;
4481 
4482     Values(ConcurrentMap<?, ?> map) {
4483       this.map = map;
4484     }
4485 
4486     @Override public int size() {
4487       return map.size();
4488     }
4489 
4490     @Override public boolean isEmpty() {
4491       return map.isEmpty();
4492     }
4493 
4494     @Override public void clear() {
4495       map.clear();
4496     }
4497 
4498     @Override
4499     public Iterator<V> iterator() {
4500       return new ValueIterator();
4501     }
4502 
4503     @Override
4504     public boolean contains(Object o) {
4505       return map.containsValue(o);
4506     }
4507   }
4508 
4509   final class EntrySet extends AbstractCacheSet<Entry<K, V>> {
4510 
4511     EntrySet(ConcurrentMap<?, ?> map) {
4512       super(map);
4513     }
4514 
4515     @Override
4516     public Iterator<Entry<K, V>> iterator() {
4517       return new EntryIterator();
4518     }
4519 
4520     @Override
4521     public boolean contains(Object o) {
4522       if (!(o instanceof Entry)) {
4523         return false;
4524       }
4525       Entry<?, ?> e = (Entry<?, ?>) o;
4526       Object key = e.getKey();
4527       if (key == null) {
4528         return false;
4529       }
4530       V v = LocalCache.this.get(key);
4531 
4532       return v != null && valueEquivalence.equivalent(e.getValue(), v);
4533     }
4534 
4535     @Override
4536     public boolean remove(Object o) {
4537       if (!(o instanceof Entry)) {
4538         return false;
4539       }
4540       Entry<?, ?> e = (Entry<?, ?>) o;
4541       Object key = e.getKey();
4542       return key != null && LocalCache.this.remove(key, e.getValue());
4543     }
4544   }
4545 
4546   // Serialization Support
4547 
4548   /**
4549    * Serializes the configuration of a LocalCache, reconsitituting it as a Cache using
4550    * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace
4551    * of LocalManualCache.
4552    *
4553    * Unfortunately, readResolve() doesn't get called when a circular dependency is present, so the
4554    * proxy must be able to behave as the cache itself.
4555    */
4556   static class ManualSerializationProxy<K, V>
4557       extends ForwardingCache<K, V> implements Serializable {
4558     private static final long serialVersionUID = 1;
4559 
4560     final Strength keyStrength;
4561     final Strength valueStrength;
4562     final Equivalence<Object> keyEquivalence;
4563     final Equivalence<Object> valueEquivalence;
4564     final long expireAfterWriteNanos;
4565     final long expireAfterAccessNanos;
4566     final long maxWeight;
4567     final Weigher<K, V> weigher;
4568     final int concurrencyLevel;
4569     final RemovalListener<? super K, ? super V> removalListener;
4570     final Ticker ticker;
4571     final CacheLoader<? super K, V> loader;
4572 
4573     transient Cache<K, V> delegate;
4574 
4575     ManualSerializationProxy(LocalCache<K, V> cache) {
4576       this(
4577           cache.keyStrength,
4578           cache.valueStrength,
4579           cache.keyEquivalence,
4580           cache.valueEquivalence,
4581           cache.expireAfterWriteNanos,
4582           cache.expireAfterAccessNanos,
4583           cache.maxWeight,
4584           cache.weigher,
4585           cache.concurrencyLevel,
4586           cache.removalListener,
4587           cache.ticker,
4588           cache.defaultLoader);
4589     }
4590 
4591     private ManualSerializationProxy(
4592         Strength keyStrength, Strength valueStrength,
4593         Equivalence<Object> keyEquivalence, Equivalence<Object> valueEquivalence,
4594         long expireAfterWriteNanos, long expireAfterAccessNanos, long maxWeight,
4595         Weigher<K, V> weigher, int concurrencyLevel,
4596         RemovalListener<? super K, ? super V> removalListener,
4597         Ticker ticker, CacheLoader<? super K, V> loader) {
4598       this.keyStrength = keyStrength;
4599       this.valueStrength = valueStrength;
4600       this.keyEquivalence = keyEquivalence;
4601       this.valueEquivalence = valueEquivalence;
4602       this.expireAfterWriteNanos = expireAfterWriteNanos;
4603       this.expireAfterAccessNanos = expireAfterAccessNanos;
4604       this.maxWeight = maxWeight;
4605       this.weigher = weigher;
4606       this.concurrencyLevel = concurrencyLevel;
4607       this.removalListener = removalListener;
4608       this.ticker = (ticker == Ticker.systemTicker() || ticker == NULL_TICKER)
4609           ? null : ticker;
4610       this.loader = loader;
4611     }
4612 
4613    CacheBuilder<K, V> recreateCacheBuilder() {
4614       CacheBuilder<K, V> builder = CacheBuilder.newBuilder()
4615           .setKeyStrength(keyStrength)
4616           .setValueStrength(valueStrength)
4617           .keyEquivalence(keyEquivalence)
4618           .valueEquivalence(valueEquivalence)
4619           .concurrencyLevel(concurrencyLevel)
4620           .removalListener(removalListener);
4621       builder.strictParsing = false;
4622       if (expireAfterWriteNanos > 0) {
4623         builder.expireAfterWrite(expireAfterWriteNanos, TimeUnit.NANOSECONDS);
4624       }
4625       if (expireAfterAccessNanos > 0) {
4626         builder.expireAfterAccess(expireAfterAccessNanos, TimeUnit.NANOSECONDS);
4627       }
4628       if (weigher != OneWeigher.INSTANCE) {
4629         builder.weigher(weigher);
4630         if (maxWeight != UNSET_INT) {
4631           builder.maximumWeight(maxWeight);
4632         }
4633       } else {
4634         if (maxWeight != UNSET_INT) {
4635           builder.maximumSize(maxWeight);
4636         }
4637       }
4638       if (ticker != null) {
4639         builder.ticker(ticker);
4640       }
4641       return builder;
4642     }
4643 
4644     private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
4645       in.defaultReadObject();
4646       CacheBuilder<K, V> builder = recreateCacheBuilder();
4647       this.delegate = builder.build();
4648     }
4649 
4650     private Object readResolve() {
4651       return delegate;
4652     }
4653 
4654     @Override
4655     protected Cache<K, V> delegate() {
4656       return delegate;
4657     }
4658   }
4659 
4660   /**
4661    * Serializes the configuration of a LocalCache, reconsitituting it as an LoadingCache using
4662    * CacheBuilder upon deserialization. An instance of this class is fit for use by the writeReplace
4663    * of LocalLoadingCache.
4664    *
4665    * Unfortunately, readResolve() doesn't get called when a circular dependency is present, so the
4666    * proxy must be able to behave as the cache itself.
4667    */
4668   static final class LoadingSerializationProxy<K, V>
4669       extends ManualSerializationProxy<K, V> implements LoadingCache<K, V>, Serializable {
4670     private static final long serialVersionUID = 1;
4671 
4672     transient LoadingCache<K, V> autoDelegate;
4673 
4674     LoadingSerializationProxy(LocalCache<K, V> cache) {
4675       super(cache);
4676     }
4677 
4678     private void readObject(ObjectInputStream in) throws IOException, ClassNotFoundException {
4679       in.defaultReadObject();
4680       CacheBuilder<K, V> builder = recreateCacheBuilder();
4681       this.autoDelegate = builder.build(loader);
4682     }
4683 
4684     @Override
4685     public V get(K key) throws ExecutionException {
4686       return autoDelegate.get(key);
4687     }
4688 
4689     @Override
4690     public V getUnchecked(K key) {
4691       return autoDelegate.getUnchecked(key);
4692     }
4693 
4694     @Override
4695     public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
4696       return autoDelegate.getAll(keys);
4697     }
4698 
4699     @Override
4700     public final V apply(K key) {
4701       return autoDelegate.apply(key);
4702     }
4703 
4704     @Override
4705     public void refresh(K key) {
4706       autoDelegate.refresh(key);
4707     }
4708 
4709     private Object readResolve() {
4710       return autoDelegate;
4711     }
4712   }
4713 
4714   static class LocalManualCache<K, V> implements Cache<K, V>, Serializable {
4715     final LocalCache<K, V> localCache;
4716 
4717     LocalManualCache(CacheBuilder<? super K, ? super V> builder) {
4718       this(new LocalCache<K, V>(builder, null));
4719     }
4720 
4721     private LocalManualCache(LocalCache<K, V> localCache) {
4722       this.localCache = localCache;
4723     }
4724 
4725     // Cache methods
4726 
4727     @Override
4728     @Nullable
4729     public V getIfPresent(Object key) {
4730       return localCache.getIfPresent(key);
4731     }
4732 
4733     @Override
4734     public V get(K key, final Callable<? extends V> valueLoader) throws ExecutionException {
4735       checkNotNull(valueLoader);
4736       return localCache.get(key, new CacheLoader<Object, V>() {
4737         @Override
4738         public V load(Object key) throws Exception {
4739           return valueLoader.call();
4740         }
4741       });
4742     }
4743 
4744     @Override
4745     public ImmutableMap<K, V> getAllPresent(Iterable<?> keys) {
4746       return localCache.getAllPresent(keys);
4747     }
4748 
4749     @Override
4750     public void put(K key, V value) {
4751       localCache.put(key, value);
4752     }
4753 
4754     @Override
4755     public void putAll(Map<? extends K, ? extends V> m) {
4756       localCache.putAll(m);
4757     }
4758 
4759     @Override
4760     public void invalidate(Object key) {
4761       checkNotNull(key);
4762       localCache.remove(key);
4763     }
4764 
4765     @Override
4766     public void invalidateAll(Iterable<?> keys) {
4767       localCache.invalidateAll(keys);
4768     }
4769 
4770     @Override
4771     public void invalidateAll() {
4772       localCache.clear();
4773     }
4774 
4775     @Override
4776     public long size() {
4777       return localCache.longSize();
4778     }
4779 
4780     @Override
4781     public ConcurrentMap<K, V> asMap() {
4782       return localCache;
4783     }
4784 
4785     @Override
4786     public CacheStats stats() {
4787       SimpleStatsCounter aggregator = new SimpleStatsCounter();
4788       aggregator.incrementBy(localCache.globalStatsCounter);
4789       for (Segment<K, V> segment : localCache.segments) {
4790         aggregator.incrementBy(segment.statsCounter);
4791       }
4792       return aggregator.snapshot();
4793     }
4794 
4795     @Override
4796     public void cleanUp() {
4797       localCache.cleanUp();
4798     }
4799 
4800     // Serialization Support
4801 
4802     private static final long serialVersionUID = 1;
4803 
4804     Object writeReplace() {
4805       return new ManualSerializationProxy<K, V>(localCache);
4806     }
4807   }
4808 
4809   static class LocalLoadingCache<K, V>
4810       extends LocalManualCache<K, V> implements LoadingCache<K, V> {
4811 
4812     LocalLoadingCache(CacheBuilder<? super K, ? super V> builder,
4813         CacheLoader<? super K, V> loader) {
4814       super(new LocalCache<K, V>(builder, checkNotNull(loader)));
4815     }
4816 
4817     // LoadingCache methods
4818 
4819     @Override
4820     public V get(K key) throws ExecutionException {
4821       return localCache.getOrLoad(key);
4822     }
4823 
4824     @Override
4825     public V getUnchecked(K key) {
4826       try {
4827         return get(key);
4828       } catch (ExecutionException e) {
4829         throw new UncheckedExecutionException(e.getCause());
4830       }
4831     }
4832 
4833     @Override
4834     public ImmutableMap<K, V> getAll(Iterable<? extends K> keys) throws ExecutionException {
4835       return localCache.getAll(keys);
4836     }
4837 
4838     @Override
4839     public void refresh(K key) {
4840       localCache.refresh(key);
4841     }
4842 
4843     @Override
4844     public final V apply(K key) {
4845       return getUnchecked(key);
4846     }
4847 
4848     // Serialization Support
4849 
4850     private static final long serialVersionUID = 1;
4851 
4852     @Override
4853     Object writeReplace() {
4854       return new LoadingSerializationProxy<K, V>(localCache);
4855     }
4856   }
4857 }