This is an automated email from the ASF dual-hosted git repository.

liaoxin pushed a commit to branch master
in repository https://gitbox.apache.org/repos/asf/doris-website.git


The following commit(s) were added to refs/heads/master by this push:
     new c5ea31e3b0 [doc](load)fix load complex type doc error (#1716)
c5ea31e3b0 is described below

commit c5ea31e3b0a8576a87d8a4f19e8210899811612b
Author: hui lai <lai...@selectdb.com>
AuthorDate: Mon Jan 6 23:23:26 2025 +0800

    [doc](load)fix load complex type doc error (#1716)
---
 docs/data-operate/import/complex-types/bitmap.md     | 20 ++++++++++----------
 docs/data-operate/import/complex-types/hll.md        | 20 ++++++++++----------
 .../current/data-operate/import/complex-types/hll.md |  8 ++++----
 .../data-operate/import/complex-types/hll.md         |  8 ++++----
 .../data-operate/import/complex-types/hll.md         |  8 ++++----
 .../data-operate/import/complex-types/bitmap.md      | 20 ++++++++++----------
 .../data-operate/import/complex-types/hll.md         | 20 ++++++++++----------
 .../data-operate/import/complex-types/bitmap.md      | 20 ++++++++++----------
 .../data-operate/import/complex-types/hll.md         | 20 ++++++++++----------
 9 files changed, 72 insertions(+), 72 deletions(-)

diff --git a/docs/data-operate/import/complex-types/bitmap.md 
b/docs/data-operate/import/complex-types/bitmap.md
index e60058bf77..0e1fae9719 100644
--- a/docs/data-operate/import/complex-types/bitmap.md
+++ b/docs/data-operate/import/complex-types/bitmap.md
@@ -33,16 +33,16 @@ The BITMAP type can be used in Duplicate tables, Unique 
tables, and Aggregate ta
 Create the following CSV file: test_bitmap.csv
 
 ```sql
-koga|17723
-nijg|146285
-lojn|347890
-lofn|489871
-jfin|545679
-kon|676724
-nhga|767689
-nfubg|879878
-huang|969798
-buag|97997
+1|koga|17723
+2|nijg|146285
+3|lojn|347890
+4|lofn|489871
+5|jfin|545679
+6|kon|676724
+7|nhga|767689
+8|nfubg|879878
+9|huang|969798
+10|buag|97997
 ```
 
 ### Step 2: Create Table in Database
diff --git a/docs/data-operate/import/complex-types/hll.md 
b/docs/data-operate/import/complex-types/hll.md
index a0c3300c13..e23e52667f 100644
--- a/docs/data-operate/import/complex-types/hll.md
+++ b/docs/data-operate/import/complex-types/hll.md
@@ -33,16 +33,16 @@ HLL is used for approximate deduplication and performs 
better than Count Distinc
 Create the following CSV file: test_hll.csv
 
 ```sql
-koga
-nijg
-lojn
-lofn
-jfin
-kon
-nhga
-nfubg
-huang
-buag
+1001|koga
+1002|nijg
+1003|lojn
+1004|lofn
+1005|jfin
+1006|kon
+1007|nhga
+1008|nfubg
+1009|huang
+1010|buag
 ```
 
 ### Step 2: Create Table in Database
diff --git 
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/data-operate/import/complex-types/hll.md
 
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/data-operate/import/complex-types/hll.md
index 2dd6cebb03..e4447b1606 100644
--- 
a/i18n/zh-CN/docusaurus-plugin-content-docs/current/data-operate/import/complex-types/hll.md
+++ 
b/i18n/zh-CN/docusaurus-plugin-content-docs/current/data-operate/import/complex-types/hll.md
@@ -32,7 +32,7 @@ HLL是用作模糊去重,在数据量大的情况性能优于 Count Distinct
 
 创建如下的 csv 文件:test_hll.csv
 
-```SQL
+```sql
 1001|koga
 1002|nijg
 1003|lojn
@@ -47,7 +47,7 @@ HLL是用作模糊去重,在数据量大的情况性能优于 Count Distinct
 
 ### 第 2 步:在库中创建表
 
-```SQL
+```sql
 CREATE TABLE testdb.test_hll(
     typ_id           BIGINT          NULL   COMMENT "ID",
     typ_name         VARCHAR(10)     NULL   COMMENT "NAME",
@@ -59,7 +59,7 @@ DISTRIBUTED BY HASH(typ_id) BUCKETS 10;
 
 ### 第 3 步:导入数据
 
-```SQL
+```sql
 curl --location-trusted -u <doris_user>:<doris_password> \
     -H "column_separator:|" \
     -H "columns:typ_id,typ_name,pv=hll_hash(typ_id)" \
@@ -71,7 +71,7 @@ curl --location-trusted -u <doris_user>:<doris_password> \
 
 使用 hll_cardinality 进行查询:
 
-```SQL
+```sql
 mysql> select typ_id,typ_name,hll_cardinality(pv) from testdb.test_hll;
 +--------+----------+---------------------+
 | typ_id | typ_name | hll_cardinality(pv) |
diff --git 
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.1/data-operate/import/complex-types/hll.md
 
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.1/data-operate/import/complex-types/hll.md
index 2dd6cebb03..e4447b1606 100644
--- 
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.1/data-operate/import/complex-types/hll.md
+++ 
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-2.1/data-operate/import/complex-types/hll.md
@@ -32,7 +32,7 @@ HLL是用作模糊去重,在数据量大的情况性能优于 Count Distinct
 
 创建如下的 csv 文件:test_hll.csv
 
-```SQL
+```sql
 1001|koga
 1002|nijg
 1003|lojn
@@ -47,7 +47,7 @@ HLL是用作模糊去重,在数据量大的情况性能优于 Count Distinct
 
 ### 第 2 步:在库中创建表
 
-```SQL
+```sql
 CREATE TABLE testdb.test_hll(
     typ_id           BIGINT          NULL   COMMENT "ID",
     typ_name         VARCHAR(10)     NULL   COMMENT "NAME",
@@ -59,7 +59,7 @@ DISTRIBUTED BY HASH(typ_id) BUCKETS 10;
 
 ### 第 3 步:导入数据
 
-```SQL
+```sql
 curl --location-trusted -u <doris_user>:<doris_password> \
     -H "column_separator:|" \
     -H "columns:typ_id,typ_name,pv=hll_hash(typ_id)" \
@@ -71,7 +71,7 @@ curl --location-trusted -u <doris_user>:<doris_password> \
 
 使用 hll_cardinality 进行查询:
 
-```SQL
+```sql
 mysql> select typ_id,typ_name,hll_cardinality(pv) from testdb.test_hll;
 +--------+----------+---------------------+
 | typ_id | typ_name | hll_cardinality(pv) |
diff --git 
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-3.0/data-operate/import/complex-types/hll.md
 
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-3.0/data-operate/import/complex-types/hll.md
index 2dd6cebb03..e4447b1606 100644
--- 
a/i18n/zh-CN/docusaurus-plugin-content-docs/version-3.0/data-operate/import/complex-types/hll.md
+++ 
b/i18n/zh-CN/docusaurus-plugin-content-docs/version-3.0/data-operate/import/complex-types/hll.md
@@ -32,7 +32,7 @@ HLL是用作模糊去重,在数据量大的情况性能优于 Count Distinct
 
 创建如下的 csv 文件:test_hll.csv
 
-```SQL
+```sql
 1001|koga
 1002|nijg
 1003|lojn
@@ -47,7 +47,7 @@ HLL是用作模糊去重,在数据量大的情况性能优于 Count Distinct
 
 ### 第 2 步:在库中创建表
 
-```SQL
+```sql
 CREATE TABLE testdb.test_hll(
     typ_id           BIGINT          NULL   COMMENT "ID",
     typ_name         VARCHAR(10)     NULL   COMMENT "NAME",
@@ -59,7 +59,7 @@ DISTRIBUTED BY HASH(typ_id) BUCKETS 10;
 
 ### 第 3 步:导入数据
 
-```SQL
+```sql
 curl --location-trusted -u <doris_user>:<doris_password> \
     -H "column_separator:|" \
     -H "columns:typ_id,typ_name,pv=hll_hash(typ_id)" \
@@ -71,7 +71,7 @@ curl --location-trusted -u <doris_user>:<doris_password> \
 
 使用 hll_cardinality 进行查询:
 
-```SQL
+```sql
 mysql> select typ_id,typ_name,hll_cardinality(pv) from testdb.test_hll;
 +--------+----------+---------------------+
 | typ_id | typ_name | hll_cardinality(pv) |
diff --git 
a/versioned_docs/version-2.1/data-operate/import/complex-types/bitmap.md 
b/versioned_docs/version-2.1/data-operate/import/complex-types/bitmap.md
index e60058bf77..0e1fae9719 100644
--- a/versioned_docs/version-2.1/data-operate/import/complex-types/bitmap.md
+++ b/versioned_docs/version-2.1/data-operate/import/complex-types/bitmap.md
@@ -33,16 +33,16 @@ The BITMAP type can be used in Duplicate tables, Unique 
tables, and Aggregate ta
 Create the following CSV file: test_bitmap.csv
 
 ```sql
-koga|17723
-nijg|146285
-lojn|347890
-lofn|489871
-jfin|545679
-kon|676724
-nhga|767689
-nfubg|879878
-huang|969798
-buag|97997
+1|koga|17723
+2|nijg|146285
+3|lojn|347890
+4|lofn|489871
+5|jfin|545679
+6|kon|676724
+7|nhga|767689
+8|nfubg|879878
+9|huang|969798
+10|buag|97997
 ```
 
 ### Step 2: Create Table in Database
diff --git 
a/versioned_docs/version-2.1/data-operate/import/complex-types/hll.md 
b/versioned_docs/version-2.1/data-operate/import/complex-types/hll.md
index a0c3300c13..e23e52667f 100644
--- a/versioned_docs/version-2.1/data-operate/import/complex-types/hll.md
+++ b/versioned_docs/version-2.1/data-operate/import/complex-types/hll.md
@@ -33,16 +33,16 @@ HLL is used for approximate deduplication and performs 
better than Count Distinc
 Create the following CSV file: test_hll.csv
 
 ```sql
-koga
-nijg
-lojn
-lofn
-jfin
-kon
-nhga
-nfubg
-huang
-buag
+1001|koga
+1002|nijg
+1003|lojn
+1004|lofn
+1005|jfin
+1006|kon
+1007|nhga
+1008|nfubg
+1009|huang
+1010|buag
 ```
 
 ### Step 2: Create Table in Database
diff --git 
a/versioned_docs/version-3.0/data-operate/import/complex-types/bitmap.md 
b/versioned_docs/version-3.0/data-operate/import/complex-types/bitmap.md
index e60058bf77..0e1fae9719 100644
--- a/versioned_docs/version-3.0/data-operate/import/complex-types/bitmap.md
+++ b/versioned_docs/version-3.0/data-operate/import/complex-types/bitmap.md
@@ -33,16 +33,16 @@ The BITMAP type can be used in Duplicate tables, Unique 
tables, and Aggregate ta
 Create the following CSV file: test_bitmap.csv
 
 ```sql
-koga|17723
-nijg|146285
-lojn|347890
-lofn|489871
-jfin|545679
-kon|676724
-nhga|767689
-nfubg|879878
-huang|969798
-buag|97997
+1|koga|17723
+2|nijg|146285
+3|lojn|347890
+4|lofn|489871
+5|jfin|545679
+6|kon|676724
+7|nhga|767689
+8|nfubg|879878
+9|huang|969798
+10|buag|97997
 ```
 
 ### Step 2: Create Table in Database
diff --git 
a/versioned_docs/version-3.0/data-operate/import/complex-types/hll.md 
b/versioned_docs/version-3.0/data-operate/import/complex-types/hll.md
index a0c3300c13..e23e52667f 100644
--- a/versioned_docs/version-3.0/data-operate/import/complex-types/hll.md
+++ b/versioned_docs/version-3.0/data-operate/import/complex-types/hll.md
@@ -33,16 +33,16 @@ HLL is used for approximate deduplication and performs 
better than Count Distinc
 Create the following CSV file: test_hll.csv
 
 ```sql
-koga
-nijg
-lojn
-lofn
-jfin
-kon
-nhga
-nfubg
-huang
-buag
+1001|koga
+1002|nijg
+1003|lojn
+1004|lofn
+1005|jfin
+1006|kon
+1007|nhga
+1008|nfubg
+1009|huang
+1010|buag
 ```
 
 ### Step 2: Create Table in Database


---------------------------------------------------------------------
To unsubscribe, e-mail: commits-unsubscr...@doris.apache.org
For additional commands, e-mail: commits-h...@doris.apache.org

Reply via email to