diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizer.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizer.java
index 15bdb3422ad..66637a09ecf 100644
--- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizer.java
+++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/KoreanTokenizer.java
@@ -132,13 +132,44 @@ public final class KoreanTokenizer extends Tokenizer {
         userDictionary,
         mode,
         outputUnknownUnigrams,
-        discardPunctuation);
+        discardPunctuation,
+        false
+        );
   }
 
   /**
-   * Create a new KoreanTokenizer supplying a custom system dictionary and unknown dictionary. This
-   * constructor provides an entry point for users that want to construct custom language models
-   * that can be used as input to {@link DictionaryBuilder}.
+   * Create a new KoreanTokenizer using the system and unknown dictionaries shipped with Lucene.
+   *
+   * @param factory the AttributeFactory to use
+   * @param userDictionary Optional: if non-null, user dictionary.
+   * @param mode Decompound mode.
+   * @param outputUnknownUnigrams if true outputs unigrams for unknown words.
+   * @param discardPunctuation true if punctuation tokens should be dropped from the output.
+   * @param keepDecimalPoint true if decimal points in numbers should be kept as part of the number.
+   */
+  public KoreanTokenizer(
+      AttributeFactory factory,
+      UserDictionary userDictionary,
+      DecompoundMode mode,
+      boolean outputUnknownUnigrams,
+      boolean discardPunctuation,
+      boolean keepDecimalPoint
+  ) {
+    this(
+        factory,
+        TokenInfoDictionary.getInstance(),
+        UnknownDictionary.getInstance(),
+        ConnectionCosts.getInstance(),
+        userDictionary,
+        mode,
+        outputUnknownUnigrams,
+        discardPunctuation,
+        keepDecimalPoint
+    );
+  }
+
+  /**
+   * Create a new KoreanTokenizer using the system and unknown dictionaries shipped with Lucene.
    *
    * @param factory the AttributeFactory to use
    * @param systemDictionary a custom known token dictionary
@@ -148,6 +179,7 @@ public final class KoreanTokenizer extends Tokenizer {
    * @param mode Decompound mode.
    * @param outputUnknownUnigrams if true outputs unigrams for unknown words.
    * @param discardPunctuation true if punctuation tokens should be dropped from the output.
+   * @param keepDecimalPoint true if decimal points in numbers should be kept as part of the number.
    * @lucene.experimental
    */
   @IgnoreRandomChains(reason = "Parameters are too complex to be tested")
@@ -159,7 +191,9 @@ public final class KoreanTokenizer extends Tokenizer {
       UserDictionary userDictionary,
       DecompoundMode mode,
       boolean outputUnknownUnigrams,
-      boolean discardPunctuation) {
+      boolean discardPunctuation,
+      boolean keepDecimalPoint
+  ) {
     super(factory);
     TokenInfoFST fst = systemDictionary.getFST();
     FST.BytesReader fstReader = fst.getBytesReader();
@@ -183,7 +217,8 @@ public final class KoreanTokenizer extends Tokenizer {
             unkDictionary.getCharacterDefinition(),
             discardPunctuation,
             mode,
-            outputUnknownUnigrams);
+            outputUnknownUnigrams,
+            keepDecimalPoint);
     viterbi.resetBuffer(input);
     viterbi.resetState();
   }
diff --git a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/Viterbi.java b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/Viterbi.java
index 8ba9cb36979..18c51bd40d2 100644
--- a/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/Viterbi.java
+++ b/lucene/analysis/nori/src/java/org/apache/lucene/analysis/ko/Viterbi.java
@@ -33,11 +33,9 @@ import org.apache.lucene.util.fst.FST;
 
 /** {@link org.apache.lucene.analysis.morph.Viterbi} subclass for Korean morphological analysis. */
 final class Viterbi
-    extends org.apache.lucene.analysis.morph.Viterbi<
-        Token, org.apache.lucene.analysis.morph.Viterbi.Position> {
+    extends org.apache.lucene.analysis.morph.Viterbi<Token, org.apache.lucene.analysis.morph.Viterbi.Position> {
 
-  private final EnumMap<TokenType, Dictionary<? extends KoMorphData>> dictionaryMap =
-      new EnumMap<>(TokenType.class);
+  private final EnumMap<TokenType, Dictionary<? extends KoMorphData>> dictionaryMap = new EnumMap<>(TokenType.class);
 
   private final UnknownDictionary unkDictionary;
   private final CharacterDefinition characterDefinition;
@@ -45,6 +43,7 @@ final class Viterbi
   private final boolean discardPunctuation;
   private final KoreanTokenizer.DecompoundMode mode;
   private final boolean outputUnknownUnigrams;
+  private final boolean keepDecimalPoint;
 
   private GraphvizFormatter<KoMorphData> dotOut;
 
@@ -60,9 +59,10 @@ final class Viterbi
       CharacterDefinition characterDefinition,
       boolean discardPunctuation,
       KoreanTokenizer.DecompoundMode mode,
-      boolean outputUnknownUnigrams) {
-    super(
-        fst, fstReader, dictionary, userFST, userFSTReader, userDictionary, costs, Position.class);
+      boolean outputUnknownUnigrams,
+      boolean keepDecimalPoint
+      ) {
+    super(fst, fstReader, dictionary, userFST, userFSTReader, userDictionary, costs, Position.class);
     this.unkDictionary = unkDictionary;
     this.characterDefinition = characterDefinition;
     this.discardPunctuation = discardPunctuation;
@@ -70,6 +70,7 @@ final class Viterbi
     this.outputUnknownUnigrams = outputUnknownUnigrams;
     this.enableSpacePenaltyFactor = true;
     this.outputLongestUserEntryOnly = true;
+    this.keepDecimalPoint = keepDecimalPoint;
     dictionaryMap.put(TokenType.KNOWN, dictionary);
     dictionaryMap.put(TokenType.UNKNOWN, unkDictionary);
     dictionaryMap.put(TokenType.USER, userDictionary);
@@ -88,7 +89,8 @@ final class Viterbi
         unknownWordLength = 1;
       } else {
         // Extract unknown word. Characters with the same script are considered to be part of
-        // unknown word
+        // unknown word.
+        // Also handles decimal points in numbers if keepDecimalPoint is true.
         unknownWordLength = 1;
         Character.UnicodeScript scriptCode = Character.UnicodeScript.of(firstCharacter);
         final boolean isPunct = isPunctuation(firstCharacter);
@@ -106,16 +108,21 @@ final class Viterbi
                   // Non-spacing marks inherit the script of their base character,
                   // following recommendations from UTR #24.
                   || chType == Character.NON_SPACING_MARK;
-
-          if (sameScript
-              // split on punctuation
-              && isPunctuation(ch, chType) == isPunct
-              // split on digit
-              && Character.isDigit(ch) == isDigit
-              && characterDefinition.isGroup(ch)) {
-            unknownWordLength++;
-          } else {
-            break;
+          if (sameScript && characterDefinition.isGroup(ch)) {
+            // Check if current character matches the start character's type (punctuation/digit).
+            boolean matchBasic =
+                isPunctuation(ch, chType) == isPunct && Character.isDigit(ch) == isDigit;
+
+            // Handle decimal points: if current char is '.' and is followed by a digit,
+            // treat it as part of the number.
+            final boolean isDecimalPoint =
+                this.keepDecimalPoint && ch == '.' && Character.isDigit(buffer.get(posAhead + 1));
+
+            if (matchBasic || isDecimalPoint) {
+              unknownWordLength++;
+            } else {
+              break;
+            }
           }
           // Update the script code and character class if the original script
           // is Inherited or Common.
diff --git a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/TestKoreanTokenizer.java b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/TestKoreanTokenizer.java
index 6ce60a53e3f..a534e2cae9b 100644
--- a/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/TestKoreanTokenizer.java
+++ b/lucene/analysis/nori/src/test/org/apache/lucene/analysis/ko/TestKoreanTokenizer.java
@@ -30,6 +30,9 @@ import org.apache.lucene.analysis.ko.KoreanTokenizer.DecompoundMode;
 import org.apache.lucene.analysis.ko.dict.UserDictionary;
 import org.apache.lucene.analysis.ko.tokenattributes.PartOfSpeechAttribute;
 import org.apache.lucene.analysis.ko.tokenattributes.ReadingAttribute;
+import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
+import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
+import org.apache.lucene.analysis.tokenattributes.PositionIncrementAttribute;
 import org.apache.lucene.tests.analysis.BaseTokenStreamTestCase;
 import org.apache.lucene.tests.analysis.MockGraphTokenFilter;
 
@@ -39,7 +42,8 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
       analyzerUnigram,
       analyzerDecompound,
       analyzerDecompoundKeep,
-      analyzerReading;
+      analyzerReading,
+      analyzerWithKeepDecimalPoint;
 
   public static UserDictionary readDict() {
     InputStream is = TestKoreanTokenizer.class.getResourceAsStream("userdict.txt");
@@ -82,6 +86,17 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
             return new TokenStreamComponents(tokenizer, tokenizer);
           }
         };
+    analyzerWithKeepDecimalPoint =
+        new Analyzer() {
+          @Override
+          protected TokenStreamComponents createComponents(String fieldName) {
+            Tokenizer tokenizer = new KoreanTokenizer(
+                newAttributeFactory(), userDictionary, DecompoundMode.NONE, false, true, true
+            );
+
+            return new TokenStreamComponents(tokenizer, tokenizer);
+          }
+        };
     analyzerUnigram =
         new Analyzer() {
           @Override
@@ -143,6 +158,25 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
         new int[] {1, 1, 1});
   }
 
+  public void testFloatingPointNumberWithKeepDecimalPoint() throws IOException {
+    assertAnalyzesTo(
+        analyzerWithKeepDecimalPoint,
+        "10.1 인치 모니터",
+        /*output*/new String[] {"10.1", "인치", "모니터"},
+        /*start off*/new int[] {0, 5, 8},
+        /*end off*/new int[] {4, 7, 11},
+        /*pos inc*/new int[] {1, 1, 1}
+    );
+    assertAnalyzesTo(
+        analyzerWithKeepDecimalPoint,
+        "지난해 평균 49.3%였던 갱신권 사용 비중은 올해 1∼3월 현재 42.8%로 줄었다.",
+        new String[] {"지난해", "평균", "49.3", "였", "던", "갱신", "권", "사용", "비중" ,"은", "올해", "1", "3", "월", "현재", "42.8", "로", "줄", "었", "다"},
+        /*start off*/new int[] {0 ,4,7,12,13,15,17,19, 22, 24, 26, 29, 31, 32, 34, 37, 42, 44, 45, 46},
+        /*end off*/ new int[] {3, 6, 11, 13, 14, 17, 18, 21, 24, 25, 28, 30, 32, 33, 36, 41, 43, 45, 46, 47},
+        new int [] {1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1}
+    );
+  }
+
   public void testSpaces() throws IOException {
     assertAnalyzesTo(
         analyzer,
@@ -187,19 +221,19 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
         analyzerWithPunctuation,
         "화학 이외의 것!",
         new POS.Type[] {
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME
         },
         new POS.Tag[] {
-          POS.Tag.NNG, POS.Tag.SP, POS.Tag.NNG, POS.Tag.JKG, POS.Tag.SP, POS.Tag.NNB, POS.Tag.SF
+            POS.Tag.NNG, POS.Tag.SP, POS.Tag.NNG, POS.Tag.JKG, POS.Tag.SP, POS.Tag.NNB, POS.Tag.SF
         },
         new POS.Tag[] {
-          POS.Tag.NNG, POS.Tag.SP, POS.Tag.NNG, POS.Tag.JKG, POS.Tag.SP, POS.Tag.NNB, POS.Tag.SF
+            POS.Tag.NNG, POS.Tag.SP, POS.Tag.NNG, POS.Tag.JKG, POS.Tag.SP, POS.Tag.NNB, POS.Tag.SF
         });
   }
 
@@ -234,11 +268,11 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
         analyzer,
         "가락지나물은 한국, 중국, 일본",
         new POS.Type[] {
-          POS.Type.COMPOUND,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME
+            POS.Type.COMPOUND,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME
         },
         new POS.Tag[] {POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP},
         new POS.Tag[] {POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP});
@@ -265,35 +299,35 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
         analyzerDecompound,
         "가락지나물은 한국, 중국, 일본",
         new POS.Type[] {
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME
         },
         new POS.Tag[] {POS.Tag.NNG, POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP},
         new POS.Tag[] {
-          POS.Tag.NNG, POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP
+            POS.Tag.NNG, POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP
         });
 
     assertPartsOfSpeech(
         analyzerDecompoundKeep,
         "가락지나물은 한국, 중국, 일본",
         new POS.Type[] {
-          POS.Type.COMPOUND,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME
+            POS.Type.COMPOUND,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME
         },
         new POS.Tag[] {
-          POS.Tag.NNG, POS.Tag.NNG, POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP
+            POS.Tag.NNG, POS.Tag.NNG, POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP
         },
         new POS.Tag[] {
-          POS.Tag.NNG, POS.Tag.NNG, POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP
+            POS.Tag.NNG, POS.Tag.NNG, POS.Tag.NNG, POS.Tag.JX, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP
         });
   }
 
@@ -354,11 +388,11 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
         analyzer,
         "2018 평창 동계올림픽대회",
         new POS.Type[] {
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME
         },
         new POS.Tag[] {POS.Tag.SN, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNG},
         new POS.Tag[] {POS.Tag.SN, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNP, POS.Tag.NNG});
@@ -375,34 +409,34 @@ public class TestKoreanTokenizer extends BaseTokenStreamTestCase {
         analyzerUnigram,
         "2018 평창 동계올림픽대회",
         new POS.Type[] {
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
-          POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
+            POS.Type.MORPHEME,
         },
         new POS.Tag[] {
-          POS.Tag.SY,
-          POS.Tag.SY,
-          POS.Tag.SY,
-          POS.Tag.SY,
-          POS.Tag.NNP,
-          POS.Tag.NNP,
-          POS.Tag.NNP,
-          POS.Tag.NNG
+            POS.Tag.SY,
+            POS.Tag.SY,
+            POS.Tag.SY,
+            POS.Tag.SY,
+            POS.Tag.NNP,
+            POS.Tag.NNP,
+            POS.Tag.NNP,
+            POS.Tag.NNG
         },
         new POS.Tag[] {
-          POS.Tag.SY,
-          POS.Tag.SY,
-          POS.Tag.SY,
-          POS.Tag.SY,
-          POS.Tag.NNP,
-          POS.Tag.NNP,
-          POS.Tag.NNP,
-          POS.Tag.NNG
+            POS.Tag.SY,
+            POS.Tag.SY,
+            POS.Tag.SY,
+            POS.Tag.SY,
+            POS.Tag.NNP,
+            POS.Tag.NNP,
+            POS.Tag.NNP,
+            POS.Tag.NNG
         });
   }
 
