This is an automated email from the ASF dual-hosted git repository.
rnewson pushed a commit to branch auto-delete-3
in repository https://gitbox.apache.org/repos/asf/couchdb.git
The following commit(s) were added to refs/heads/auto-delete-3 by this push:
new d460ab8f8 PropEr test
d460ab8f8 is described below
commit d460ab8f82ac06d408be3a148d7b4e9a37eed4a7
Author: Robert Newson <[email protected]>
AuthorDate: Fri May 23 13:56:28 2025 +0100
PropEr test
---
erlang_ls.config | 5 +
.../test/eunit/fabric_drop_seq_prop_tests.erl | 245 +++++++++++++++++++++
2 files changed, 250 insertions(+)
diff --git a/erlang_ls.config b/erlang_ls.config
index 94483cfec..3464e6fdd 100644
--- a/erlang_ls.config
+++ b/erlang_ls.config
@@ -3,3 +3,8 @@ apps_dirs:
include_dirs:
- "src"
- "src/*/include"
+macros:
+ - name: WITH_PROPER
+ value: true
+ - name: TEST
+ value: true
diff --git a/src/fabric/test/eunit/fabric_drop_seq_prop_tests.erl
b/src/fabric/test/eunit/fabric_drop_seq_prop_tests.erl
new file mode 100644
index 000000000..f1436407b
--- /dev/null
+++ b/src/fabric/test/eunit/fabric_drop_seq_prop_tests.erl
@@ -0,0 +1,245 @@
+% Licensed under the Apache License, Version 2.0 (the "License"); you may not
+% use this file except in compliance with the License. You may obtain a copy of
+% the License at
+%
+% http://www.apache.org/licenses/LICENSE-2.0
+%
+% Unless required by applicable law or agreed to in writing, software
+% distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+% WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+% License for the specific language governing permissions and limitations under
+% the License.
+
+-module(fabric_drop_seq_prop_tests).
+
+-behaviour(proper_statem).
+
+-include_lib("couch/include/couch_eunit_proper.hrl").
+
+-export([
+ initial_state/0,
+ command/1,
+ precondition/2,
+ postcondition/3,
+ next_state/3
+]).
+
+-export([
+ get_document/2,
+ changes/1,
+ update_document/2,
+ delete_document/2,
+ update_peer_checkpoint/1,
+ update_drop_seq/1,
+ compact/1
+]).
+
+-include_lib("couch/include/couch_db.hrl").
+-include_lib("couch/include/couch_eunit.hrl").
+
+-type docid() :: binary().
+-type epoch() :: non_neg_integer().
+
+-record(state, {
+ docs :: [{docid(), epoch()}],
+ deleted_docs :: [{docid(), epoch()}],
+ current_epoch :: epoch(),
+ peer_checkpoint_epoch :: epoch(),
+ drop_seq_epoch :: epoch(),
+ drop_count :: non_neg_integer()
+}).
+
+property_test_() ->
+ ?EUNIT_QUICKCHECK(600, 2000).
+
+prop_drop_seq() ->
+ ?FORALL(
+ Cmds,
+ commands(?MODULE),
+ ?TRAPEXIT(
+ begin
+ {DbName, Ctx} = setup(),
+ {History, State, Result} = run_commands(?MODULE, Cmds,
[{dbname, DbName}]),
+ teardown(DbName, Ctx),
+ ?WHENFAIL(
+ io:format(
+ "~nHistory: ~p~n~nState: ~p~n~nResult: ~p~n~n",
+ [History, State, Result]
+ ),
+ aggregate(command_names(Cmds), Result =:= ok)
+ )
+ end
+ )
+ ).
+
+setup() ->
+ Ctx = test_util:start_couch([fabric]),
+ DbName = ?tempdb(),
+ ok = fabric:create_db(DbName, [{q, 4}, {n, 1}]),
+ {DbName, Ctx}.
+
+teardown(DbName, Ctx) ->
+ ok = fabric:delete_db(DbName, [?ADMIN_CTX]),
+ test_util:stop_couch(Ctx).
+
+initial_state() ->
+ #state{
+ docs = [],
+ deleted_docs = [],
+ current_epoch = 0,
+ peer_checkpoint_epoch = 0,
+ drop_seq_epoch = undefined,
+ drop_count = 0
+ }.
+
+command(S) ->
+ HasDocs = (S#state.docs =/= []),
+ oneof(
+ [
+ {call, ?MODULE, update_document, [{var, dbname}, docid()]},
+ {call, ?MODULE, get_document, [{var, dbname}, docid()]},
+ {call, ?MODULE, changes, [{var, dbname}]},
+ {call, ?MODULE, update_peer_checkpoint, [{var, dbname}]},
+ {call, ?MODULE, update_drop_seq, [{var, dbname}]},
+ {call, ?MODULE, compact, [{var, dbname}]}
+ ] ++
+ [{call, ?MODULE, delete_document, [{var, dbname}, docid()]} ||
HasDocs]
+ ).
+
+get_document(DbName, DocId) ->
+ fabric:open_doc(DbName, DocId, [?ADMIN_CTX]).
+
+changes(DbName) ->
+ Acc0 = {[], []},
+ Callback = fun
+ ({change, {Change}}, {DocIds, DelDocIds}) ->
+ Id = couch_util:get_value(id, Change),
+ Deleted = couch_util:get_value(deleted, Change),
+ if
+ Deleted ->
+ {ok, {DocIds, [Id | DelDocIds]}};
+ true ->
+ {ok, {[Id | DocIds], DelDocIds}}
+ end;
+ (_Else, Acc) ->
+ {ok, Acc}
+ end,
+ fabric:changes(DbName, Callback, Acc0, []).
+
+update_document(DbName, DocId) ->
+ update_document(DbName, DocId, false).
+
+delete_document(DbName, DocId) ->
+ update_document(DbName, DocId, true).
+
+update_document(DbName, DocId, Deleted) ->
+ case fabric:open_doc(DbName, DocId, [?ADMIN_CTX]) of
+ {ok, Doc} ->
+ {ok, _} = fabric:update_doc(DbName, Doc#doc{deleted = Deleted},
[?ADMIN_CTX]);
+ {not_found, _} when not Deleted ->
+ {ok, _} = fabric:update_doc(DbName, #doc{id = DocId},
[?ADMIN_CTX]);
+ {not_found, _} ->
+ ok
+ end.
+
+update_peer_checkpoint(DbName) ->
+ {ok, DbInfo} = fabric:get_db_info(DbName),
+ UpdateSeq = couch_util:get_value(update_seq, DbInfo),
+ Doc = #doc{id = <<"_local/peer-checkpoint-foo">>, body =
{[{<<"update_seq">>, UpdateSeq}]}},
+ {ok, _} = fabric:update_doc(
+ DbName,
+ Doc,
+ [?ADMIN_CTX]
+ ).
+
+update_drop_seq(DbName) ->
+ {ok, _} = fabric_drop_seq:go(DbName).
+
+compact(DbName) ->
+ ok = fabric:compact(DbName),
+ wait_for_compaction_to_finish(DbName).
+
+wait_for_compaction_to_finish(DbName) ->
+ {ok, DbInfo} = fabric:get_db_info(DbName),
+ CompactRunning = couch_util:get_value(compact_running, DbInfo),
+ if
+ CompactRunning ->
+ timer:sleep(500),
+ wait_for_compaction_to_finish(DbName);
+ true ->
+ ok
+ end.
+
+precondition(S, {call, _, update_document, [_DbName, DocId]}) ->
+ not doc_exists(S, DocId);
+precondition(S, {call, _, delete_document, [_DbName, DocId]}) ->
+ doc_exists(S, DocId);
+precondition(_, _) ->
+ true.
+
+next_state(S, _V, {call, _, update_document, [_DbName, DocId]}) ->
+ S#state{
+ docs = [{DocId, S#state.current_epoch} | S#state.docs],
+ deleted_docs = lists:keydelete(DocId, 1, S#state.deleted_docs)
+ };
+next_state(S, _V, {call, _, delete_document, [_DbName, DocId]}) ->
+ {value, Tuple, RemainingDocs} = lists:keytake(DocId, 1, S#state.docs),
+ S#state{docs = RemainingDocs, deleted_docs = [Tuple |
S#state.deleted_docs]};
+next_state(S, _V, {call, _, update_peer_checkpoint, [_DbName]}) ->
+ S#state{
+ current_epoch = S#state.current_epoch + 1,
+ peer_checkpoint_epoch = S#state.current_epoch
+ };
+next_state(S, _V, {call, _, update_drop_seq, [_DbName]}) ->
+ S#state{
+ drop_seq_epoch = S#state.peer_checkpoint_epoch
+ };
+next_state(S, _V, {call, _, compact, [_DbName]}) ->
+ {KeepDocs, DropDocs} = lists:partition(
+ fun({_, Epoch}) ->
+ S#state.drop_seq_epoch == undefined orelse Epoch >
S#state.drop_seq_epoch
+ end,
+ S#state.deleted_docs
+ ),
+ S#state{
+ deleted_docs = KeepDocs,
+ drop_count = S#state.drop_count + length(DropDocs)
+ };
+next_state(S, _Res, _Call) ->
+ S.
+
+postcondition(S, {call, _, get_document, [_DbName, DocId]}, {ok, _Doc}) ->
+ doc_exists(S, DocId) andalso not deleted_doc_exists(S, DocId);
+postcondition(S, {call, _, get_document, [_DbName, DocId]}, {not_found,
deleted}) ->
+ not doc_exists(S, DocId) andalso deleted_doc_exists(S, DocId);
+postcondition(S, {call, _, get_document, [_DbName, DocId]}, {not_found,
missing}) ->
+ not doc_exists(S, DocId) andalso not deleted_doc_exists(S, DocId);
+postcondition(S, {call, _, changes, [_DbName]}, {ok, {DocIds, DelDocIds}}) ->
+ same_list(DocIds, doc_ids(S)) andalso same_list(DelDocIds,
deleted_doc_ids(S));
+postcondition(_, _, _) ->
+ true.
+
+docid() ->
+ elements([<<"doc", (integer_to_binary(N))/binary>> || N <- lists:seq(1,
10)]).
+
+docid(Tuple) when is_tuple(Tuple) ->
+ element(1, Tuple).
+
+doc_exists(S, DocId) ->
+ lists:keymember(DocId, 1, S#state.docs).
+
+deleted_doc_exists(S, DocId) ->
+ lists:keymember(DocId, 1, S#state.deleted_docs).
+
+doc_ids(S) ->
+ [docid(T) || T <- S#state.docs].
+deleted_doc_ids(S) ->
+ [docid(T) || T <- S#state.deleted_docs].
+
+same_list(ListA, ListB) ->
+ Result = lists:sort(ListA) == lists:sort(ListB),
+ if
+ Result -> ok;
+ true -> io:format("~p != ~p~n", [ListA, ListB])
+ end,
+ Result.