This is an automated email from the ASF dual-hosted git repository.

jiahuili430 pushed a commit to branch fix-config-options
in repository https://gitbox.apache.org/repos/asf/couchdb.git

commit 2adc59c7f94b822ff1151cc2f29891723b960e01
Author: Jiahui Li <[email protected]>
AuthorDate: Wed Aug 27 15:25:17 2025 -0500

    Fix typos
---
 rel/overlay/etc/default.ini            | 21 ++++++++++-----------
 src/fabric/src/fabric_view_changes.erl |  2 +-
 2 files changed, 11 insertions(+), 12 deletions(-)

diff --git a/rel/overlay/etc/default.ini b/rel/overlay/etc/default.ini
index 6cffc12fa..b0be31ec8 100644
--- a/rel/overlay/etc/default.ini
+++ b/rel/overlay/etc/default.ini
@@ -116,7 +116,7 @@ view_index_dir = {{view_index_dir}}
 ; When enabled, use cfile parallel reads for all the requests. By default the
 ; setting is "false", so only requests which are configured to bypass the IOQ
 ; would use the cfile parallel reads. If there is enough RAM available for a
-; large file cache and the disks have enough IO bandwith, consider enabling
+; large file cache and the disks have enough IO bandwidth, consider enabling
 ; this setting.
 ;cfile_skip_ioq = false
 
@@ -257,7 +257,7 @@ bind_address = 127.0.0.1
 
 ; Set to false to revert to a previous _bulk_get implementation using single
 ; doc fetches internally. Using batches should be faster, however there may be
-; bugs in the new new implemention, so expose this option to allow reverting to
+; bugs in the new implementation, so expose this option to allow reverting to
 ; the old behavior.
 ;bulk_get_use_batches = true
 
@@ -709,7 +709,7 @@ partitioned||* = true
 ; How much time to wait before retrying after a missing doc exception. This
 ; exception happens if the document was seen in the changes feed, but internal
 ; replication hasn't caught up yet, and fetching document's revisions
-; fails. This a common scenario when source is updated while continuous
+; fails. This is a common scenario when source is updated while continuous
 ; replication is running. The retry period would depend on how quickly internal
 ; replication is expected to catch up. In general this is an optimisation to
 ; avoid crashing the whole replication job, which would consume more resources
@@ -728,7 +728,7 @@ partitioned||* = true
 ;   couch_replicator_auth_session - use _session cookie authentication
 ;   couch_replicator_auth_noop - use basic authentication (previous default)
 ; Currently, the new _session cookie authentication is tried first, before
-; falling back to the old basic authenticaion default:
+; falling back to the old basic authentication default:
 ;auth_plugins = couch_replicator_auth_session,couch_replicator_auth_noop
 
 ; To restore the old behaviour, use the following value:
@@ -756,7 +756,7 @@ partitioned||* = true
 ; priority 0, and would render this algorithm useless. The default value of
 ; 0.98 is picked such that if a job ran for one scheduler cycle, then didn't
 ; get to run for 7 hours, it would still have priority > 0. 7 hours was picked
-; as it was close enought to 8 hours which is the default maximum error backoff
+; as it was close enough to 8 hours which is the default maximum error backoff
 ; interval.
 ;priority_coeff = 0.98
 
@@ -969,12 +969,11 @@ port = {{prometheus_port}}
 
 [custodian]
 ; When set to `true`, force using `[cluster] n` values as the expected n value
-; of of shard copies. In cases where the application prevents creating
-; non-default n databases, this could help detect case where the shard map was
-; altered by hand, or via an external tools, such that it doesn't have the
-; necessary number of copies for some ranges. By default, when the setting is
-; `false`, the expected n value is based on the number of available copies in
-; the shard map.
+; of shard copies. In cases where the application prevents creating non-default
+; n databases, this could help detect case where the shard map was altered by
+; hand, or via an external tools, such that it doesn't have the necessary 
number
+; of copies for some ranges. By default, when the setting is `false`, the
+; expected n value is based on the number of available copies in the shard map.
 ;use_cluster_n_as_expected_n = false
 
 [nouveau]
diff --git a/src/fabric/src/fabric_view_changes.erl 
b/src/fabric/src/fabric_view_changes.erl
index 73c05163d..d14ca4ae6 100644
--- a/src/fabric/src/fabric_view_changes.erl
+++ b/src/fabric/src/fabric_view_changes.erl
@@ -543,7 +543,7 @@ get_old_seq(#shard{range = R} = Shard, SinceSeqs) ->
 
 get_db_uuid_shards(DbName) ->
     % Need to use an isolated process as we are performing a fabric call from
-    % another fabric call and there is a good chance we'd polute the mailbox
+    % another fabric call and there is a good chance we'd pollute the mailbox
     % with returned messages
     Timeout = fabric_util:request_timeout(),
     IsolatedFun = fun() -> fabric:db_uuids(DbName) end,

Reply via email to