This is an automated email from the ASF dual-hosted git repository.

vatamane pushed a commit to branch main
in repository https://gitbox.apache.org/repos/asf/couchdb.git


The following commit(s) were added to refs/heads/main by this push:
     new e0b7f6fd7 Use maps comprehensions and generators in a few places
e0b7f6fd7 is described below

commit e0b7f6fd78b13b8cd698079cac32f2eb9993f0f9
Author: Nick Vatamaniuc <[email protected]>
AuthorDate: Thu Jun 12 18:47:53 2025 -0400

    Use maps comprehensions and generators in a few places
    
    These are new in OTP 26 so we can simplify a few lines of code using them.
---
 src/couch/src/couch_db.erl           | 2 +-
 src/couch/src/couch_util.erl         | 6 +++---
 src/mem3/src/mem3_reshard_job.erl    | 2 +-
 src/nouveau/src/nouveau_bookmark.erl | 2 +-
 src/smoosh/src/smoosh_persist.erl    | 4 ++--
 5 files changed, 8 insertions(+), 8 deletions(-)

diff --git a/src/couch/src/couch_db.erl b/src/couch/src/couch_db.erl
index 88a1646c7..3100ecdc7 100644
--- a/src/couch/src/couch_db.erl
+++ b/src/couch/src/couch_db.erl
@@ -420,7 +420,7 @@ purge_docs(#db{main_pid = Pid} = Db, UUIDsIdsRevs, Options) 
->
     % Gather any existing purges with the same UUIDs
     UUIDs = element(1, lists:unzip3(UUIDsIdsRevs1)),
     Old1 = get_purge_infos(Db, UUIDs),
-    Old2 = maps:from_list([{UUID, {Id, Revs}} || {_, UUID, Id, Revs} <- Old1]),
+    Old2 = #{UUID => {Id, Revs} || {_, UUID, Id, Revs} <- Old1},
     % Filter out all the purges which have already been processed
     FilterCheckFun = fun({UUID, Id, Revs}) ->
         case maps:is_key(UUID, Old2) of
diff --git a/src/couch/src/couch_util.erl b/src/couch/src/couch_util.erl
index 4dee5d8fb..aacc6e55d 100644
--- a/src/couch/src/couch_util.erl
+++ b/src/couch/src/couch_util.erl
@@ -803,11 +803,11 @@ remove_sensitive_data(KVList) ->
     lists:keyreplace(password, 1, KVList1, {password, <<"****">>}).
 
 ejson_to_map(#{} = Val) ->
-    maps:map(fun(_, V) -> ejson_to_map(V) end, Val);
+    #{K => ejson_to_map(V) || K := V <- Val};
 ejson_to_map(Val) when is_list(Val) ->
-    lists:map(fun(V) -> ejson_to_map(V) end, Val);
+    [ejson_to_map(V) || V <- Val];
 ejson_to_map({Val}) when is_list(Val) ->
-    maps:from_list(lists:map(fun({K, V}) -> {K, ejson_to_map(V)} end, Val));
+    #{K => ejson_to_map(V) || {K, V} <- Val};
 ejson_to_map(Val) ->
     Val.
 
diff --git a/src/mem3/src/mem3_reshard_job.erl 
b/src/mem3/src/mem3_reshard_job.erl
index b8a18b176..d79288e3b 100644
--- a/src/mem3/src/mem3_reshard_job.erl
+++ b/src/mem3/src/mem3_reshard_job.erl
@@ -411,7 +411,7 @@ topoff_impl(#job{source = #shard{} = Source, target = 
Targets}) ->
     BatchSize = config:get_integer(
         "rexi", "shard_split_topoff_batch_size", ?INTERNAL_REP_BATCH_SIZE
     ),
-    TMap = maps:from_list([{R, T} || #shard{range = R} = T <- Targets]),
+    TMap = #{R => T || #shard{range = R} = T <- Targets},
     Opts = [
         {batch_size, BatchSize},
         {batch_count, all},
diff --git a/src/nouveau/src/nouveau_bookmark.erl 
b/src/nouveau/src/nouveau_bookmark.erl
index 08aeb0f73..3b0878f72 100644
--- a/src/nouveau/src/nouveau_bookmark.erl
+++ b/src/nouveau/src/nouveau_bookmark.erl
@@ -51,7 +51,7 @@ unpack(DbName, PackedBookmark) when is_list(PackedBookmark) ->
     unpack(DbName, list_to_binary(PackedBookmark));
 unpack(DbName, PackedBookmark) when is_binary(PackedBookmark) ->
     Bookmark = jiffy:decode(b64url:decode(PackedBookmark), [return_maps]),
-    maps:from_list([{range_of(DbName, V), V} || V <- Bookmark]).
+    #{range_of(DbName, V) => V || V <- Bookmark}.
 
 pack(nil) ->
     null;
diff --git a/src/smoosh/src/smoosh_persist.erl 
b/src/smoosh/src/smoosh_persist.erl
index c1519f65f..f615fcbb9 100644
--- a/src/smoosh/src/smoosh_persist.erl
+++ b/src/smoosh/src/smoosh_persist.erl
@@ -71,8 +71,8 @@ persist(true, Waiting, Active, Starting) ->
     % already running. We want them to be the first ones to continue after
     % restart. We're relying on infinity sorting higher than float and integer
     % numeric values here.
-    AMap = maps:map(fun(_, _) -> infinity end, Active),
-    SMap = maps:from_list([{K, infinity} || K <- maps:values(Starting)]),
+    AMap = #{K => infinity || K := _Pid <- Active},
+    SMap = #{K => infinity || _Ref := K <- Starting},
     Path = file_path(Name),
     write(maps:merge(WMap, maps:merge(AMap, SMap)), Path).
 

Reply via email to