mikemccand commented on a change in pull request #2459:
URL: https://github.com/apache/lucene-solr/pull/2459#discussion_r589079453



##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();

Review comment:
       You might want to fail-fast(er) here by checking `if (c != lastChar && 
!collission) { return null; }`?  Might give small speedup for words not in the 
dictionary, though, that is likely the rare case (most lookups exist, or, share 
a suffix that exists)?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {
+        if (currentEntry != null) {
+          if (entry.compareTo(currentEntry) < 0) {
+            throw new IllegalArgumentException("out of order: " + entry + " < 
" + currentEntry);
+          }
+          int pos = flushGroup();
+
+          commonPrefixLength = GeneratingSuggester.commonPrefix(currentEntry, 
entry);
+          ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+          in.setPosition(pos);
+          for (int i = currentEntry.length() - 1; i >= commonPrefixLength; 
i--) {
+            char c = (char) in.readVInt();
+            assert c == currentEntry.charAt(i);
+            pos -= in.readVInt();
+            in.setPosition(pos);
+          }
+          commonPrefixPos = pos;
+        }
+        currentEntry = entry;
+      }
+
+      group.add(flags);
+      if (hasCustomMorphData) {
+        morphDataIDs.add(morphDataID);
+      }
+    }
+
+    private int flushGroup() throws IOException {
+      IntsRefBuilder currentOrds = new IntsRefBuilder();
+
+      boolean hasNonHidden = false;
+      for (char[] flags : group) {
+        if (!hasHiddenFlag(flags)) {
+          hasNonHidden = true;
+          break;
+        }
+      }
+
+      for (int i = 0; i < group.size(); i++) {
+        char[] flags = group.get(i);
+        if (hasNonHidden && hasHiddenFlag(flags)) {
+          continue;
+        }
+
+        currentOrds.append(flagEnumerator.add(flags));
+        if (hasCustomMorphData) {
+          currentOrds.append(morphDataIDs.get(i));
+        }
+      }
+
+      int lastPos = commonPrefixPos;

Review comment:
       Maybe add `// write new suffix for this entry` comment?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;

Review comment:
       Instead of burning whole byte to represent "next entry still has a 
collision", you could steal one bit from the `delta pos` you read next?  It'd 
make this a bit more compact, and would be consistent with the up front `pos < 
0` collision check which is single bit encoding.

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {

Review comment:
       Maybe some brief javadoc/comment saying these must be pre-UTF16 sorted?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;

Review comment:
       Hmm I thought I saw comment saying we don't support empty string?  
Should we `assert false` here?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{

Review comment:
       It looks like `maxLength` means "skip any words > `maxLength`", and 
"process all words <= `maxLength`"?  Maybe state that in the javadoc too?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {

Review comment:
       Hmm, if `wordStart == 0` we skip the word?  But shouldn't we accept it?  
Isn't that the `== maxLength` case?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {

Review comment:
       Hmm, does this really allow adding the same `String entry` multiple 
times?  I thought not?  This is effectively a compact/fast `Map<String,int[]>`? 
 Maybe throw exception in `else` clause to this `if`?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();

Review comment:
       If you are willing to say "no more than 256 forms" you could make this a 
`.readByte()` which goes a bit faster than `.readVInt`, maybe.

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {
+        if (currentEntry != null) {
+          if (entry.compareTo(currentEntry) < 0) {
+            throw new IllegalArgumentException("out of order: " + entry + " < 
" + currentEntry);
+          }
+          int pos = flushGroup();
+
+          commonPrefixLength = GeneratingSuggester.commonPrefix(currentEntry, 
entry);

Review comment:
       Hmm it's weird this `commonPrefixLength` is a member of this class 
instead of a local variable, and we could pass it to `flushGroup`?
   
   Edit: sorry, nevermind: there is an off-by-one here, where we need the 
`commonPrefixLength` of the *last* entry before the current one which is 
different from the previous "group", OK.

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {
+        if (currentEntry != null) {
+          if (entry.compareTo(currentEntry) < 0) {
+            throw new IllegalArgumentException("out of order: " + entry + " < 
" + currentEntry);
+          }
+          int pos = flushGroup();
+
+          commonPrefixLength = GeneratingSuggester.commonPrefix(currentEntry, 
entry);
+          ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+          in.setPosition(pos);
+          for (int i = currentEntry.length() - 1; i >= commonPrefixLength; 
i--) {
+            char c = (char) in.readVInt();
+            assert c == currentEntry.charAt(i);
+            pos -= in.readVInt();
+            in.setPosition(pos);
+          }
+          commonPrefixPos = pos;
+        }
+        currentEntry = entry;
+      }
+
+      group.add(flags);
+      if (hasCustomMorphData) {
+        morphDataIDs.add(morphDataID);
+      }
+    }
+
+    private int flushGroup() throws IOException {
+      IntsRefBuilder currentOrds = new IntsRefBuilder();
+
+      boolean hasNonHidden = false;
+      for (char[] flags : group) {
+        if (!hasHiddenFlag(flags)) {
+          hasNonHidden = true;
+          break;
+        }
+      }
+
+      for (int i = 0; i < group.size(); i++) {
+        char[] flags = group.get(i);
+        if (hasNonHidden && hasHiddenFlag(flags)) {
+          continue;
+        }
+
+        currentOrds.append(flagEnumerator.add(flags));
+        if (hasCustomMorphData) {
+          currentOrds.append(morphDataIDs.get(i));
+        }
+      }
+
+      int lastPos = commonPrefixPos;
+      for (int i = commonPrefixLength; i < currentEntry.length() - 1; i++) {
+        int pos = dataWriter.getPosition();
+        ensureArraySize(0, false);
+        dataWriter.writeVInt(currentEntry.charAt(i));
+        dataWriter.writeVInt(pos - lastPos);
+        lastPos = pos;
+      }
+
+      int pos = dataWriter.getPosition();
+      int hash = Math.abs(currentEntry.hashCode() % hashTable.length);
+      int collision = hashTable[hash];
+      hashTable[hash] = collision == 0 ? pos : -pos;
+
+      if (++chainLengths[hash] > 20) {
+        throw new RuntimeException(
+            "Too many collisions, please report this to 
d...@lucene.apache.org");
+      }
+
+      ensureArraySize(currentOrds.length(), collision != 0);
+      dataWriter.writeVInt(currentEntry.charAt(currentEntry.length() - 1));
+      dataWriter.writeVInt(pos - lastPos);
+      IntSequenceOutputs.getSingleton().write(currentOrds.get(), dataWriter);
+      if (collision != 0) {
+        dataWriter.writeByte(collision < 0 ? (byte) 1 : 0);
+        dataWriter.writeVInt(pos - Math.abs(collision));
+      }
+
+      group.clear();
+      morphDataIDs.clear();
+      return pos;
+    }
+
+    private void ensureArraySize(int valueLength, boolean hasCollision) {
+      int pos = dataWriter.getPosition();
+      int maxEntrySize = 8 + 4 * (valueLength + 1) + (hasCollision ? 5 : 0);

Review comment:
       This math looks maybe a bit fragile -- could we instead just insert an 
`ArrayUtil.grow` every time we try to append a byte to `wordData`?  I think we 
are not to concerned with CPU cost to build a new dictionary right?  We build 
once and then `.get` zillions of times?
   
   Hmm I thought Lucene had something like `ByteArrayDataOutput` that already 
did this under-the-hood, but I looked and I'm not sure it does!  We should add 
that!  I see `ByteArrayDataOutput.reset(...)` so caller could do that on the 
outside.

##########
File path: 
lucene/analysis/common/src/test/org/apache/lucene/analysis/hunspell/TestPerformance.java
##########
@@ -57,12 +57,12 @@ public static void resolveCorpora() {
 
   @Test
   public void en() throws Exception {
-    checkAnalysisPerformance("en", 1_000_000);
+    checkAnalysisPerformance("en", 1_200_000);
   }
 
   @Test
   public void en_suggest() throws Exception {
-    checkSuggestionPerformance("en", 1_200);
+    checkSuggestionPerformance("en", 3_000);

Review comment:
       Hmm what are these magical constant numbers?  And why does this change 
mean they should increase?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {
+        if (currentEntry != null) {
+          if (entry.compareTo(currentEntry) < 0) {
+            throw new IllegalArgumentException("out of order: " + entry + " < 
" + currentEntry);
+          }
+          int pos = flushGroup();
+
+          commonPrefixLength = GeneratingSuggester.commonPrefix(currentEntry, 
entry);
+          ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+          in.setPosition(pos);
+          for (int i = currentEntry.length() - 1; i >= commonPrefixLength; 
i--) {
+            char c = (char) in.readVInt();
+            assert c == currentEntry.charAt(i);
+            pos -= in.readVInt();
+            in.setPosition(pos);
+          }
+          commonPrefixPos = pos;
+        }
+        currentEntry = entry;
+      }
+
+      group.add(flags);
+      if (hasCustomMorphData) {
+        morphDataIDs.add(morphDataID);
+      }
+    }
+
+    private int flushGroup() throws IOException {
+      IntsRefBuilder currentOrds = new IntsRefBuilder();
+
+      boolean hasNonHidden = false;
+      for (char[] flags : group) {
+        if (!hasHiddenFlag(flags)) {
+          hasNonHidden = true;
+          break;
+        }
+      }
+
+      for (int i = 0; i < group.size(); i++) {
+        char[] flags = group.get(i);
+        if (hasNonHidden && hasHiddenFlag(flags)) {
+          continue;
+        }
+
+        currentOrds.append(flagEnumerator.add(flags));
+        if (hasCustomMorphData) {
+          currentOrds.append(morphDataIDs.get(i));
+        }
+      }
+
+      int lastPos = commonPrefixPos;
+      for (int i = commonPrefixLength; i < currentEntry.length() - 1; i++) {
+        int pos = dataWriter.getPosition();
+        ensureArraySize(0, false);
+        dataWriter.writeVInt(currentEntry.charAt(i));
+        dataWriter.writeVInt(pos - lastPos);
+        lastPos = pos;
+      }
+
+      int pos = dataWriter.getPosition();
+      int hash = Math.abs(currentEntry.hashCode() % hashTable.length);
+      int collision = hashTable[hash];
+      hashTable[hash] = collision == 0 ? pos : -pos;
+
+      if (++chainLengths[hash] > 20) {
+        throw new RuntimeException(
+            "Too many collisions, please report this to 
d...@lucene.apache.org");
+      }
+
+      ensureArraySize(currentOrds.length(), collision != 0);
+      dataWriter.writeVInt(currentEntry.charAt(currentEntry.length() - 1));
+      dataWriter.writeVInt(pos - lastPos);
+      IntSequenceOutputs.getSingleton().write(currentOrds.get(), dataWriter);
+      if (collision != 0) {
+        dataWriter.writeByte(collision < 0 ? (byte) 1 : 0);
+        dataWriter.writeVInt(pos - Math.abs(collision));
+      }
+
+      group.clear();
+      morphDataIDs.clear();
+      return pos;
+    }
+
+    private void ensureArraySize(int valueLength, boolean hasCollision) {
+      int pos = dataWriter.getPosition();
+      int maxEntrySize = 8 + 4 * (valueLength + 1) + (hasCollision ? 5 : 0);
+      while (wordData.length < pos + maxEntrySize) {
+        wordData = ArrayUtil.grow(wordData);
+        dataWriter.reset(wordData, pos, wordData.length - pos);
+      }
+    }
+
+    private static boolean hasHiddenFlag(char[] flags) {
+      for (char flag : flags) {
+        if (flag == Dictionary.HIDDEN_FLAG) {
+          return true;
+        }
+      }
+      return false;
+    }
+
+    WordStorage build() throws IOException {
+      flushGroup();

Review comment:
       Maybe confirm this is only called once, in the end?
   
   Also, should we confirm that the up-front declared `wordCount` is exactly 
how many (unique) words we did see?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {
+        if (currentEntry != null) {
+          if (entry.compareTo(currentEntry) < 0) {
+            throw new IllegalArgumentException("out of order: " + entry + " < 
" + currentEntry);
+          }
+          int pos = flushGroup();

Review comment:
       Ahh it looks like we do indeed allow adding same `String entry` multiple 
times, cool!  If this is common, maybe we should make a dedicated first class 
`add` overloaded method that takes `String entry` and multiple 
`flags`/`moreDataID`.

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {
+        if (currentEntry != null) {
+          if (entry.compareTo(currentEntry) < 0) {
+            throw new IllegalArgumentException("out of order: " + entry + " < 
" + currentEntry);
+          }
+          int pos = flushGroup();
+
+          commonPrefixLength = GeneratingSuggester.commonPrefix(currentEntry, 
entry);
+          ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+          in.setPosition(pos);
+          for (int i = currentEntry.length() - 1; i >= commonPrefixLength; 
i--) {
+            char c = (char) in.readVInt();
+            assert c == currentEntry.charAt(i);
+            pos -= in.readVInt();
+            in.setPosition(pos);
+          }
+          commonPrefixPos = pos;
+        }
+        currentEntry = entry;
+      }
+
+      group.add(flags);
+      if (hasCustomMorphData) {
+        morphDataIDs.add(morphDataID);
+      }
+    }
+
+    private int flushGroup() throws IOException {
+      IntsRefBuilder currentOrds = new IntsRefBuilder();
+
+      boolean hasNonHidden = false;
+      for (char[] flags : group) {
+        if (!hasHiddenFlag(flags)) {
+          hasNonHidden = true;
+          break;
+        }
+      }
+
+      for (int i = 0; i < group.size(); i++) {
+        char[] flags = group.get(i);
+        if (hasNonHidden && hasHiddenFlag(flags)) {
+          continue;
+        }
+
+        currentOrds.append(flagEnumerator.add(flags));
+        if (hasCustomMorphData) {
+          currentOrds.append(morphDataIDs.get(i));
+        }
+      }
+
+      int lastPos = commonPrefixPos;
+      for (int i = commonPrefixLength; i < currentEntry.length() - 1; i++) {
+        int pos = dataWriter.getPosition();
+        ensureArraySize(0, false);
+        dataWriter.writeVInt(currentEntry.charAt(i));
+        dataWriter.writeVInt(pos - lastPos);
+        lastPos = pos;
+      }
+
+      int pos = dataWriter.getPosition();
+      int hash = Math.abs(currentEntry.hashCode() % hashTable.length);
+      int collision = hashTable[hash];
+      hashTable[hash] = collision == 0 ? pos : -pos;
+
+      if (++chainLengths[hash] > 20) {
+        throw new RuntimeException(

Review comment:
       Awesome!  I am glad we are hard-rejecting accidentally adversarial uses.
   
   But, isn't this ... statistically a small but non-trivial possibility?  
Hashing is hard ... every fixed hash function has many adversaries, but perhaps 
not that common in real-world usage?
   
   Anyway +1 to keep this safety.
   
   Hmm should we maybe also confirm that the number of `.add(...)` calls does 
not exceed the original `int wordCount`?  That might otherwise cause excessive 
conflicts ...

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {

Review comment:
       Ahh, `wordCount` might actually be "how big to pre-allocate the 
hash-table", not actually the precise number of words that will be added, next?

##########
File path: 
lucene/analysis/common/src/java/org/apache/lucene/analysis/hunspell/WordStorage.java
##########
@@ -0,0 +1,338 @@
+/*
+ * Licensed to the Apache Software Foundation (ASF) under one or more
+ * contributor license agreements.  See the NOTICE file distributed with
+ * this work for additional information regarding copyright ownership.
+ * The ASF licenses this file to You under the Apache License, Version 2.0
+ * (the "License"); you may not use this file except in compliance with
+ * the License.  You may obtain a copy of the License at
+ *
+ *     http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+package org.apache.lucene.analysis.hunspell;
+
+import java.io.IOException;
+import java.util.ArrayList;
+import java.util.List;
+import java.util.function.BiConsumer;
+import org.apache.lucene.store.ByteArrayDataInput;
+import org.apache.lucene.store.ByteArrayDataOutput;
+import org.apache.lucene.store.DataOutput;
+import org.apache.lucene.util.ArrayUtil;
+import org.apache.lucene.util.CharsRef;
+import org.apache.lucene.util.IntsRef;
+import org.apache.lucene.util.IntsRefBuilder;
+import org.apache.lucene.util.fst.IntSequenceOutputs;
+
+/**
+ * A data structure for memory-efficient word storage and fast 
lookup/enumeration. Each dictionary
+ * entry is stored as:
+ *
+ * <ol>
+ *   <li>the last character
+ *   <li>pointer to a similar entry for the prefix (all characters except the 
last one)
+ *   <li>value data: a list of ints representing word flags and morphological 
data, and a pointer to
+ *       hash collisions, if any
+ * </ol>
+ *
+ * There's only one entry for each prefix, so it's like a trie/{@link
+ * org.apache.lucene.util.fst.FST}, but a reversed one: each nodes points to a 
single previous nodes
+ * instead of several following ones. For example, "abc" and "abd" point to 
the same prefix entry
+ * "ab" which points to "a" which points to 0.<br>
+ * <br>
+ * The entries are stored in a contiguous byte array, identified by their 
offsets, using {@link
+ * DataOutput#writeVInt} ()} VINT} format for compression.
+ */
+class WordStorage {
+  /**
+   * A map from word's hash (modulo array's length) into the offset of the 
last entry in {@link
+   * #wordData} with this hash. Negated, if there's more than one entry with 
the same hash.
+   */
+  private final int[] hashTable;
+
+  /**
+   * An array of word entries:
+   *
+   * <ul>
+   *   <li>VINT: the word's last character
+   *   <li>VINT: pointer to the entry for the same word without the last 
character. It's relative:
+   *       the difference of this entry's start and the prefix's entry start. 
0 for single-character
+   *       entries
+   *   <li>Optional, for non-leaf entries only:
+   *       <ul>
+   *         <li>VINT: the length of the word form data, returned from {@link 
#lookupWord}
+   *         <li>n * VINT: the word form data
+   *         <li>Optional, for hash-colliding entries only:
+   *             <ul>
+   *               <li>BYTE: 1 if the next collision entry has further 
collisions, 0 if it's the
+   *                   last of the entries with the same hash
+   *               <li>VINT: (relative) pointer to the previous entry with the 
same hash
+   *             </ul>
+   *       </ul>
+   * </ul>
+   */
+  private final byte[] wordData;
+
+  private WordStorage(int[] hashTable, byte[] wordData) {
+    this.hashTable = hashTable;
+    this.wordData = wordData;
+  }
+
+  IntsRef lookupWord(char[] word, int offset, int length) {
+    assert length > 0;
+
+    int hash = Math.abs(CharsRef.stringHashCode(word, offset, length) % 
hashTable.length);
+    int pos = hashTable[hash];
+    if (pos == 0) {
+      return null;
+    }
+
+    boolean collision = pos < 0;
+    pos = Math.abs(pos);
+
+    char lastChar = word[offset + length - 1];
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    while (true) {
+      in.setPosition(pos);
+      char c = (char) in.readVInt();
+      int prevPos = pos - in.readVInt();
+      int beforeForms = in.getPosition();
+      boolean found = c == lastChar && isSameString(word, offset, length - 1, 
prevPos, in);
+      if (!collision && !found) {
+        return null;
+      }
+
+      in.setPosition(beforeForms);
+      int formLength = in.readVInt();
+      if (found) {
+        IntsRef forms = new IntsRef(formLength);
+        readForms(forms, in, formLength);
+        return forms;
+      } else {
+        skipVInts(in, formLength);
+      }
+
+      collision = in.readByte() == 1;
+      pos -= in.readVInt();
+    }
+  }
+
+  private static void skipVInts(ByteArrayDataInput in, int count) {
+    for (int i = 0; i < count; ) {
+      if (in.readByte() >= 0) i++;
+    }
+  }
+
+  /**
+   * @param processor is invoked for each word. Note that the passed arguments 
(word and form) are
+   *     reused, so they can be modified in any way, but may not be saved for 
later by the processor
+   */
+  void processAllWords(int maxLength, BiConsumer<CharsRef, IntsRef> processor) 
{
+    CharsRef chars = new CharsRef(maxLength);
+    IntsRef forms = new IntsRef();
+    ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+    for (int pos : hashTable) {
+      boolean collision = pos < 0;
+      pos = Math.abs(pos);
+
+      while (pos != 0) {
+        int wordStart = maxLength - 1;
+
+        in.setPosition(pos);
+        chars.chars[wordStart] = (char) in.readVInt();
+        int prevPos = pos - in.readVInt();
+
+        int dataLength = in.readVInt();
+        if (forms.ints.length < dataLength) {
+          forms.ints = new int[dataLength];
+        }
+        readForms(forms, in, dataLength);
+
+        int afterForms = in.getPosition();
+
+        while (prevPos != 0 && wordStart > 0) {
+          in.setPosition(prevPos);
+          chars.chars[--wordStart] = (char) in.readVInt();
+          prevPos -= in.readVInt();
+        }
+
+        if (wordStart > 0) {
+          chars.offset = wordStart;
+          chars.length = maxLength - wordStart;
+          processor.accept(chars, forms);
+        }
+
+        if (!collision) {
+          break;
+        }
+
+        in.setPosition(afterForms);
+        collision = in.readVInt() == 1;
+        pos -= in.readVInt();
+      }
+    }
+  }
+
+  private boolean isSameString(
+      char[] word, int offset, int length, int dataPos, ByteArrayDataInput in) 
{
+    for (int i = length - 1; i >= 0; i--) {
+      in.setPosition(dataPos);
+      char c = (char) in.readVInt();
+      if (c != word[i + offset]) {
+        return false;
+      }
+      dataPos -= in.readVInt();
+      if (dataPos == 0) {
+        return i == 0;
+      }
+    }
+    return length == 0;
+  }
+
+  private void readForms(IntsRef forms, ByteArrayDataInput in, int length) {
+    for (int i = 0; i < length; i++) {
+      forms.ints[i] = in.readVInt();
+    }
+    forms.length = length;
+  }
+
+  static class Builder {
+    private final boolean hasCustomMorphData;
+    private final int[] hashTable;
+    private final int[] chainLengths;
+    private final FlagEnumerator flagEnumerator;
+    private final ByteArrayDataOutput dataWriter;
+
+    private byte[] wordData;
+
+    private int commonPrefixLength, commonPrefixPos;
+    private String currentEntry = null;
+    private final List<char[]> group = new ArrayList<>();
+    private final List<Integer> morphDataIDs = new ArrayList<>();
+
+    Builder(int wordCount, boolean hasCustomMorphData, FlagEnumerator 
flagEnumerator) {
+      this.flagEnumerator = flagEnumerator;
+      this.hasCustomMorphData = hasCustomMorphData;
+
+      hashTable = new int[wordCount];
+      wordData = new byte[wordCount * 6];
+
+      dataWriter = new ByteArrayDataOutput(wordData);
+      dataWriter.writeByte((byte) 0); // zero index is root, contains nothing
+      chainLengths = new int[hashTable.length];
+    }
+
+    void add(String entry, char[] flags, int morphDataID) throws IOException {
+      if (!entry.equals(currentEntry)) {
+        if (currentEntry != null) {
+          if (entry.compareTo(currentEntry) < 0) {
+            throw new IllegalArgumentException("out of order: " + entry + " < 
" + currentEntry);
+          }
+          int pos = flushGroup();
+
+          commonPrefixLength = GeneratingSuggester.commonPrefix(currentEntry, 
entry);
+          ByteArrayDataInput in = new ByteArrayDataInput(wordData);
+          in.setPosition(pos);
+          for (int i = currentEntry.length() - 1; i >= commonPrefixLength; 
i--) {
+            char c = (char) in.readVInt();
+            assert c == currentEntry.charAt(i);
+            pos -= in.readVInt();
+            in.setPosition(pos);
+          }
+          commonPrefixPos = pos;
+        }
+        currentEntry = entry;
+      }
+
+      group.add(flags);
+      if (hasCustomMorphData) {
+        morphDataIDs.add(morphDataID);
+      }
+    }
+
+    private int flushGroup() throws IOException {
+      IntsRefBuilder currentOrds = new IntsRefBuilder();

Review comment:
       I think you could re-use a single `IntsRefBuilder` to reduce GC load -- 
it is only used during this `flushGroups()` method?
   
   Not important though.




----------------------------------------------------------------
This is an automated message from the Apache Git Service.
To respond to the message, please log on to GitHub and use the
URL above to go to the specific comment.

For queries about this service, please contact Infrastructure at:
us...@infra.apache.org



---------------------------------------------------------------------
To unsubscribe, e-mail: issues-unsubscr...@lucene.apache.org
For additional commands, e-mail: issues-h...@lucene.apache.org

Reply via email to