Commit 46232542 authored by Mattias Persson's avatar Mattias Persson
Browse files

Merge remote-tracking branch 'upstream/2.2' into 2.3

parents 2ad8c1f5 fa2b50ea
Showing with 81 additions and 5 deletions
+81 -5
......@@ -66,6 +66,12 @@ public abstract class AbstractHopScotchCollection<VALUE> implements PrimitiveCol
this.table = newTable;
}
@Override
public Table<VALUE> getLastTable()
{
return table;
}
@Override
public void close()
{
......
......@@ -179,7 +179,8 @@ public class HopScotchHashingAlgorithm
}
// we couldn't add this value, even in the H-1 neighborhood, so grow table...
Table<VALUE> resizedTable = growTable( table, monitor, hashFunction, resizeMonitor );
growTable( table, monitor, hashFunction, resizeMonitor );
Table<VALUE> resizedTable = resizeMonitor.getLastTable();
// ...and try again
return put( resizedTable, monitor, hashFunction, key, value, resizeMonitor );
......@@ -276,11 +277,14 @@ public class HopScotchHashingAlgorithm
return hashFunction.hash( key ) & tableMask;
}
private static <VALUE> Table<VALUE> growTable( Table<VALUE> oldTable, Monitor monitor,
private static <VALUE> void growTable( Table<VALUE> oldTable, Monitor monitor,
HashFunction hashFunction, ResizeMonitor<VALUE> resizeMonitor )
{
assert monitor.tableGrowing( oldTable.capacity(), oldTable.size() );
Table<VALUE> newTable = oldTable.grow();
// Install the new table before populating it with the old data, in case we find it needs to grow even more
// while we are populating it. If that happens, we want to end up with the table installed by the final grow.
resizeMonitor.tableGrew( newTable );
long nullKey = oldTable.nullKey();
// place all entries in the new table
......@@ -290,17 +294,20 @@ public class HopScotchHashingAlgorithm
long key = oldTable.key( i );
if ( key != nullKey )
{
VALUE putResult = put( newTable, monitor, hashFunction, key, oldTable.value( i ), resizeMonitor );
// Always use the table from the resize monitor, because any put can cause a grow.
Table<VALUE> table = resizeMonitor.getLastTable();
VALUE putResult = put( table, monitor, hashFunction, key, oldTable.value( i ), resizeMonitor );
if ( putResult != null )
{
// If we somehow fail to populate the new table, reinstall the old one.
resizeMonitor.tableGrew( oldTable );
newTable.close();
throw new IllegalStateException( "Couldn't add " + key + " when growing table" );
}
}
}
assert monitor.tableGrew( oldTable.capacity(), newTable.capacity(), newTable.size() );
resizeMonitor.tableGrew( newTable );
oldTable.close();
return newTable;
}
/**
......@@ -411,5 +418,7 @@ public class HopScotchHashingAlgorithm
public interface ResizeMonitor<VALUE>
{
void tableGrew( Table<VALUE> newTable );
Table<VALUE> getLastTable();
}
}
......@@ -21,8 +21,12 @@ package org.neo4j.collection.primitive.hopscotch;
import org.junit.Test;
import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.atomic.AtomicInteger;
import org.neo4j.collection.primitive.Primitive;
......@@ -1268,4 +1272,61 @@ public class PrimitiveLongMapTest
// THEN
assertThat( counter.get(), is( 3 ) );
}
@Test
public void recursivePutGrowInterleavingShouldNotDropOriginalValues()
{
// List of values which causes calls to put() call grow(), which will call put() which calls grow() again
List<Long> lst = Arrays.asList(
44988L, 44868L, 44271L, 44399L, 44502L, 44655L, 44348L, 44843L,
44254L, 44774L, 44476L, 44664L, 44485L, 44237L, 44953L, 44468L,
44970L, 44808L, 44527L, 44987L, 44672L, 44647L, 44467L, 44825L,
44740L, 44220L, 44851L, 44902L, 44791L, 44416L, 44365L, 44382L,
44885L, 44510L, 44553L, 44894L, 44288L, 44306L, 44450L, 44689L,
44305L, 44374L, 44323L, 44493L, 44706L, 44681L, 44578L, 44723L,
44331L, 44936L, 44289L, 44919L, 44433L, 44826L, 44757L, 44561L,
44595L, 44612L, 44996L, 44646L, 44834L, 44314L, 44544L, 44629L,
44357L // <-- this value will cause a grow, which during new table population will cause another grow.
);
verifyMapRetainsAllEntries( lst );
}
@Test
public void recursivePutGrowInterleavingShouldNotDropOriginalValuesEvenWhenFirstGrowAddsMoreValuesAfterSecondGrow()
throws Exception
{
// List of values that cause recursive growth like above, but this time the first grow wants to add more values
// to the table *after* the second grow has occurred.
List<Long> lst = Arrays.asList(
85380L, 85124L, 85252L, 85259L, 85005L, 85260L, 85132L, 85141L,
85397L, 85013L, 85269L, 85277L, 85149L, 85404L, 85022L, 85150L,
85029L, 85414L, 85158L, 85286L, 85421L, 85039L, 85167L, 85294L,
85166L, 85431L, 85303L, 85046L, 85311L, 85439L, 85438L, 85184L,
85056L, 85063L, 85320L, 85448L, 85201L, 85073L, 85329L, 85456L,
85328L, 85337L, 85081L, 85465L, 85080L, 85208L, 85473L, 85218L,
85346L, 85090L, 85097L, 85225L, 85354L, 85098L, 85482L, 85235L,
85363L, 85107L, 85490L, 85115L, 85499L, 85242L, 85175L, 85371L,
85192L // <-- this value will cause a grow, which during new table population will cause another grow.
);
verifyMapRetainsAllEntries( lst );
}
private void verifyMapRetainsAllEntries( List<Long> lst )
{
PrimitiveLongIntMap map = Primitive.longIntMap();
Set<Long> set = new HashSet<>();
for ( Long value : lst )
{
assertThat( map.put( value, 1 ), is( -1 ) );
assertTrue( set.add( value ) );
}
assertThat( map.size(), is( set.size() ) );
for ( Long aLong : set )
{
assertThat( map.get( aLong ), is( 1 ) );
}
}
}
Supports Markdown
0% or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment