aboutsummaryrefslogtreecommitdiff
path: root/test
diff options
context:
space:
mode:
authorMickael Remond <mremond@process-one.net>2016-12-01 15:05:09 +0100
committerMickael Remond <mremond@process-one.net>2016-12-01 15:05:09 +0100
commit309fd56fb4bbb82215e4e9873eeb677e49f804de (patch)
tree3993dc3b1c9d81c18163f853760e678e3d3337d1 /test
parentMerge branch 'master' of github.com:processone/ejabberd (diff)
parentFixes pt-br translation (thanks to Rodrigues)(#1393) (diff)
Merge branch 'master' of github.com:processone/ejabberd
Diffstat (limited to 'test')
-rw-r--r--test/acl_test.exs2
-rw-r--r--test/announce_tests.erl61
-rw-r--r--test/carbons_tests.erl196
-rw-r--r--test/csi_tests.erl147
-rw-r--r--test/ejabberd_SUITE.erl2428
-rw-r--r--test/ejabberd_SUITE_data/ca.key27
-rw-r--r--test/ejabberd_SUITE_data/ca.pem22
-rw-r--r--test/ejabberd_SUITE_data/cert.pem94
-rw-r--r--test/ejabberd_SUITE_data/ejabberd.yml24
-rwxr-xr-xtest/ejabberd_SUITE_data/extauth.py5
-rwxr-xr-xtest/ejabberd_SUITE_data/gencerts.sh18
-rw-r--r--test/ejabberd_SUITE_data/openssl.cnf323
-rw-r--r--test/ejabberd_SUITE_data/self-signed-cert.pem46
-rw-r--r--test/ejabberd_commands_mock_test.exs2
-rw-r--r--test/ejabberd_cyrsasl_test.exs6
-rw-r--r--test/example_tests.erl52
-rw-r--r--test/jid_test.exs1
-rw-r--r--test/mam_tests.erl538
-rw-r--r--test/mix_tests.erl139
-rw-r--r--test/mod_admin_extra_test.exs2
-rw-r--r--test/mod_legacy.erl38
-rw-r--r--test/muc_tests.erl1885
-rw-r--r--test/offline_tests.erl406
-rw-r--r--test/privacy_tests.erl822
-rw-r--r--test/proxy65_tests.erl113
-rw-r--r--test/pubsub_tests.erl737
-rw-r--r--test/replaced_tests.erl57
-rw-r--r--test/roster_tests.erl527
-rw-r--r--test/sm_tests.erl99
-rw-r--r--test/suite.erl547
-rw-r--r--test/suite.hrl25
-rw-r--r--test/vcard_tests.erl133
32 files changed, 7435 insertions, 2087 deletions
diff --git a/test/acl_test.exs b/test/acl_test.exs
index 4bd8e6989..0ab92ade8 100644
--- a/test/acl_test.exs
+++ b/test/acl_test.exs
@@ -25,7 +25,7 @@ defmodule ACLTest do
setup_all do
:ok = :mnesia.start
- :ok = :jid.start
+ {:ok, _} = :jid.start
:stringprep.start
:ok = :ejabberd_config.start(["domain1", "domain2"], [])
:ok = :acl.start
diff --git a/test/announce_tests.erl b/test/announce_tests.erl
new file mode 100644
index 000000000..3eea5298c
--- /dev/null
+++ b/test/announce_tests.erl
@@ -0,0 +1,61 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(announce_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [server_jid/1, send_recv/2, recv_message/1, disconnect/1,
+ send/2, wait_for_master/1, wait_for_slave/1]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {announce_single, [sequence], []}.
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {announce_master_slave, [sequence],
+ [master_slave_test(set_motd)]}.
+
+set_motd_master(Config) ->
+ ServerJID = server_jid(Config),
+ MotdJID = jid:replace_resource(ServerJID, <<"announce/motd">>),
+ Body = xmpp:mk_text(<<"motd">>),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_slave(Config),
+ send(Config, #message{to = MotdJID, body = Body}),
+ #message{from = ServerJID, body = Body} = recv_message(Config),
+ disconnect(Config).
+
+set_motd_slave(Config) ->
+ ServerJID = server_jid(Config),
+ Body = xmpp:mk_text(<<"motd">>),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ #message{from = ServerJID, body = Body} = recv_message(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("announce_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("announce_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("announce_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("announce_" ++ atom_to_list(T) ++ "_slave")]}.
diff --git a/test/carbons_tests.erl b/test/carbons_tests.erl
new file mode 100644
index 000000000..00dd57e3c
--- /dev/null
+++ b/test/carbons_tests.erl
@@ -0,0 +1,196 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(carbons_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [is_feature_advertised/2, disconnect/1, send_recv/2,
+ recv_presence/1, send/2, get_event/1, recv_message/1,
+ my_jid/1, wait_for_slave/1, wait_for_master/1,
+ put_event/2]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {carbons_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(unsupported_iq)]}.
+
+feature_enabled(Config) ->
+ true = is_feature_advertised(Config, ?NS_CARBONS_2),
+ disconnect(Config).
+
+unsupported_iq(Config) ->
+ lists:foreach(
+ fun({Type, SubEl}) ->
+ #iq{type = error} =
+ send_recv(Config, #iq{type = Type, sub_els = [SubEl]})
+ end, [{Type, SubEl} ||
+ Type <- [get, set],
+ SubEl <- [#carbons_sent{forwarded = #forwarded{}},
+ #carbons_received{forwarded = #forwarded{}},
+ #carbons_private{}]] ++
+ [{get, SubEl} || SubEl <- [#carbons_enable{}, #carbons_disable{}]]),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {carbons_master_slave, [sequence],
+ [master_slave_test(send_recv),
+ master_slave_test(enable_disable)]}.
+
+send_recv_master(Config) ->
+ Peer = ?config(peer, Config),
+ prepare_master(Config),
+ ct:comment("Waiting for the peer to be ready"),
+ ready = get_event(Config),
+ send_messages(Config),
+ ct:comment("Waiting for the peer to disconnect"),
+ #presence{from = Peer, type = unavailable} = recv_presence(Config),
+ disconnect(Config).
+
+send_recv_slave(Config) ->
+ prepare_slave(Config),
+ ok = enable(Config),
+ put_event(Config, ready),
+ recv_carbons(Config),
+ disconnect(Config).
+
+enable_disable_master(Config) ->
+ prepare_master(Config),
+ ct:comment("Waiting for the peer to be ready"),
+ ready = get_event(Config),
+ send_messages(Config),
+ disconnect(Config).
+
+enable_disable_slave(Config) ->
+ Peer = ?config(peer, Config),
+ prepare_slave(Config),
+ ok = enable(Config),
+ ok = disable(Config),
+ put_event(Config, ready),
+ ct:comment("Waiting for the peer to disconnect"),
+ #presence{from = Peer, type = unavailable} = recv_presence(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("carbons_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("carbons_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("carbons_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("carbons_" ++ atom_to_list(T) ++ "_slave")]}.
+
+prepare_master(Config) ->
+ MyJID = my_jid(Config),
+ Peer = ?config(peer, Config),
+ #presence{from = MyJID} = send_recv(Config, #presence{priority = 10}),
+ wait_for_slave(Config),
+ ct:comment("Receiving initial presence from the peer"),
+ #presence{from = Peer} = recv_presence(Config),
+ Config.
+
+prepare_slave(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = enable(Config),
+ wait_for_master(Config),
+ #presence{from = MyJID} = send_recv(Config, #presence{priority = 5}),
+ ct:comment("Receiving initial presence from the peer"),
+ #presence{from = Peer} = recv_presence(Config),
+ Config.
+
+send_messages(Config) ->
+ Server = ?config(server, Config),
+ MyJID = my_jid(Config),
+ JID = jid:make(randoms:get_string(), Server),
+ lists:foreach(
+ fun({send, #message{type = Type} = Msg}) ->
+ I = send(Config, Msg#message{to = JID}),
+ if Type /= error ->
+ #message{id = I, type = error} = recv_message(Config);
+ true ->
+ ok
+ end;
+ ({recv, #message{} = Msg}) ->
+ ejabberd_router:route(
+ JID, MyJID, Msg#message{from = JID, to = MyJID}),
+ ct:comment("Receiving message ~s", [xmpp:pp(Msg)]),
+ #message{} = recv_message(Config)
+ end, message_iterator(Config)).
+
+recv_carbons(Config) ->
+ Peer = ?config(peer, Config),
+ BarePeer = jid:remove_resource(Peer),
+ MyJID = my_jid(Config),
+ lists:foreach(
+ fun({_, #message{sub_els = [#hint{type = 'no-copy'}]}}) ->
+ ok;
+ ({_, #message{sub_els = [#carbons_private{}]}}) ->
+ ok;
+ ({_, #message{type = T}}) when T /= normal, T /= chat ->
+ ok;
+ ({Dir, #message{type = T, body = Body} = M})
+ when (T == chat) or (T == normal andalso Body /= []) ->
+ ct:comment("Receiving carbon ~s", [xmpp:pp(M)]),
+ #message{from = BarePeer, to = MyJID} = CarbonMsg =
+ recv_message(Config),
+ case Dir of
+ send ->
+ #carbons_sent{forwarded = #forwarded{xml_els = [El]}} =
+ xmpp:get_subtag(CarbonMsg, #carbons_sent{}),
+ #message{body = Body} = xmpp:decode(El);
+ recv ->
+ #carbons_received{forwarded = #forwarded{xml_els = [El]}}=
+ xmpp:get_subtag(CarbonMsg, #carbons_received{}),
+ #message{body = Body} = xmpp:decode(El)
+ end;
+ (_) ->
+ false
+ end, message_iterator(Config)).
+
+enable(Config) ->
+ case send_recv(
+ Config, #iq{type = set,
+ sub_els = [#carbons_enable{}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+disable(Config) ->
+ case send_recv(
+ Config, #iq{type = set,
+ sub_els = [#carbons_disable{}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+message_iterator(_Config) ->
+ [{Dir, #message{type = Type, body = Body, sub_els = Els}}
+ || Dir <- [send, recv],
+ Type <- [error, chat, normal, groupchat, headline],
+ Body <- [[], xmpp:mk_text(<<"body">>)],
+ Els <- [[],
+ [#hint{type = 'no-copy'}],
+ [#carbons_private{}]]].
diff --git a/test/csi_tests.erl b/test/csi_tests.erl
new file mode 100644
index 000000000..9a96b8a59
--- /dev/null
+++ b/test/csi_tests.erl
@@ -0,0 +1,147 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(csi_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [disconnect/1, wait_for_slave/1, wait_for_master/1,
+ send/2, send_recv/2, recv_presence/1, recv_message/1,
+ server_jid/1]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {csi_single, [sequence],
+ [single_test(feature_enabled)]}.
+
+feature_enabled(Config) ->
+ true = ?config(csi, Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {csi_master_slave, [sequence],
+ [master_slave_test(all)]}.
+
+all_master(Config) ->
+ Peer = ?config(peer, Config),
+ Presence = #presence{to = Peer},
+ ChatState = #message{to = Peer, thread = <<"1">>,
+ sub_els = [#chatstate{type = active}]},
+ Message = ChatState#message{body = [#text{data = <<"body">>}]},
+ PepPayload = xmpp:encode(#presence{}),
+ PepOne = #message{
+ to = Peer,
+ sub_els =
+ [#ps_event{
+ items =
+ #ps_items{
+ node = <<"foo-1">>,
+ items =
+ [#ps_item{
+ id = <<"pep-1">>,
+ xml_els = [PepPayload]}]}}]},
+ PepTwo = #message{
+ to = Peer,
+ sub_els =
+ [#ps_event{
+ items =
+ #ps_items{
+ node = <<"foo-2">>,
+ items =
+ [#ps_item{
+ id = <<"pep-2">>,
+ xml_els = [PepPayload]}]}}]},
+ %% Wait for the slave to become inactive.
+ wait_for_slave(Config),
+ %% Should be queued (but see below):
+ send(Config, Presence),
+ %% Should replace the previous presence in the queue:
+ send(Config, Presence#presence{type = unavailable}),
+ %% The following two PEP stanzas should be queued (but see below):
+ send(Config, PepOne),
+ send(Config, PepTwo),
+ %% The following two PEP stanzas should replace the previous two:
+ send(Config, PepOne),
+ send(Config, PepTwo),
+ %% Should be queued (but see below):
+ send(Config, ChatState),
+ %% Should replace the previous chat state in the queue:
+ send(Config, ChatState#message{sub_els = [#chatstate{type = composing}]}),
+ %% Should be sent immediately, together with the queued stanzas:
+ send(Config, Message),
+ %% Wait for the slave to become active.
+ wait_for_slave(Config),
+ %% Should be delivered, as the client is active again:
+ send(Config, ChatState),
+ disconnect(Config).
+
+all_slave(Config) ->
+ Peer = ?config(peer, Config),
+ change_client_state(Config, inactive),
+ wait_for_master(Config),
+ #presence{from = Peer, type = unavailable, sub_els = [#delay{}]} =
+ recv_presence(Config),
+ #message{
+ from = Peer,
+ sub_els =
+ [#ps_event{
+ items =
+ #ps_items{
+ node = <<"foo-1">>,
+ items =
+ [#ps_item{
+ id = <<"pep-1">>}]}},
+ #delay{}]} = recv_message(Config),
+ #message{
+ from = Peer,
+ sub_els =
+ [#ps_event{
+ items =
+ #ps_items{
+ node = <<"foo-2">>,
+ items =
+ [#ps_item{
+ id = <<"pep-2">>}]}},
+ #delay{}]} = recv_message(Config),
+ #message{from = Peer, thread = <<"1">>,
+ sub_els = [#chatstate{type = composing},
+ #delay{}]} = recv_message(Config),
+ #message{from = Peer, thread = <<"1">>,
+ body = [#text{data = <<"body">>}],
+ sub_els = [#chatstate{type = active}]} = recv_message(Config),
+ change_client_state(Config, active),
+ wait_for_master(Config),
+ #message{from = Peer, thread = <<"1">>,
+ sub_els = [#chatstate{type = active}]} = recv_message(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("csi_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("csi_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("csi_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("csi_" ++ atom_to_list(T) ++ "_slave")]}.
+
+change_client_state(Config, NewState) ->
+ send(Config, #csi{type = NewState}),
+ send_recv(Config, #iq{type = get, to = server_jid(Config),
+ sub_els = [#ping{}]}).
diff --git a/test/ejabberd_SUITE.erl b/test/ejabberd_SUITE.erl
index d3e7ec668..46711ad49 100644
--- a/test/ejabberd_SUITE.erl
+++ b/test/ejabberd_SUITE.erl
@@ -10,22 +10,24 @@
-compile(export_all).
--import(suite, [init_config/1, connect/1, disconnect/1,
- recv/0, send/2, send_recv/2, my_jid/1, server_jid/1,
- pubsub_jid/1, proxy_jid/1, muc_jid/1, muc_room_jid/1,
- mix_jid/1, mix_room_jid/1, get_features/2, re_register/1,
- is_feature_advertised/2, subscribe_to_events/1,
+-import(suite, [init_config/1, connect/1, disconnect/1, recv_message/1,
+ recv/1, recv_presence/1, send/2, send_recv/2, my_jid/1,
+ server_jid/1, pubsub_jid/1, proxy_jid/1, muc_jid/1,
+ muc_room_jid/1, my_muc_jid/1, peer_muc_jid/1,
+ mix_jid/1, mix_room_jid/1, get_features/2, recv_iq/1,
+ re_register/1, is_feature_advertised/2, subscribe_to_events/1,
is_feature_advertised/3, set_opt/3, auth_SASL/2,
- wait_for_master/1, wait_for_slave/1,
- make_iq_result/1, start_event_relay/0,
+ wait_for_master/1, wait_for_slave/1, flush/1,
+ make_iq_result/1, start_event_relay/0, alt_room_jid/1,
stop_event_relay/1, put_event/2, get_event/1,
- bind/1, auth/1, open_session/1, zlib/1, starttls/1,
- close_socket/1]).
-
+ bind/1, auth/1, auth/2, open_session/1, open_session/2,
+ zlib/1, starttls/1, starttls/2, close_socket/1, init_stream/1,
+ auth_legacy/2, auth_legacy/3, tcp_connect/1, send_text/2,
+ set_roster/3, del_roster/1]).
-include("suite.hrl").
suite() ->
- [{timetrap, {seconds,120}}].
+ [{timetrap, {seconds, 120}}].
init_per_suite(Config) ->
NewConfig = init_config(Config),
@@ -35,6 +37,10 @@ init_per_suite(Config) ->
LDIFFile = filename:join([DataDir, "ejabberd.ldif"]),
{ok, _} = file:copy(ExtAuthScript, filename:join([CWD, "extauth.py"])),
{ok, _} = ldap_srv:start(LDIFFile),
+ inet_db:add_host({127,0,0,1}, [binary_to_list(?S2S_VHOST),
+ binary_to_list(?MNESIA_VHOST)]),
+ inet_db:set_domain(binary_to_list(randoms:get_string())),
+ inet_db:set_lookup([file, native]),
start_ejabberd(NewConfig),
NewConfig.
@@ -78,7 +84,7 @@ init_per_group(Group, Config) ->
do_init_per_group(no_db, Config) ->
re_register(Config),
- Config;
+ set_opt(persistent_room, false, Config);
do_init_per_group(mnesia, Config) ->
mod_muc:shutdown_rooms(?MNESIA_VHOST),
set_opt(server, ?MNESIA_VHOST, Config);
@@ -124,9 +130,32 @@ do_init_per_group(riak, Config) ->
Err ->
{skip, {riak_not_available, Err}}
end;
-do_init_per_group(_GroupName, Config) ->
+do_init_per_group(s2s, Config) ->
+ ejabberd_config:add_option(s2s_use_starttls, required_trusted),
+ ejabberd_config:add_option(domain_certfile, "cert.pem"),
+ Port = ?config(s2s_port, Config),
+ set_opt(server, ?COMMON_VHOST,
+ set_opt(xmlns, ?NS_SERVER,
+ set_opt(type, server,
+ set_opt(server_port, Port,
+ set_opt(stream_from, ?S2S_VHOST,
+ set_opt(lang, <<"">>, Config))))));
+do_init_per_group(component, Config) ->
+ Server = ?config(server, Config),
+ Port = ?config(component_port, Config),
+ set_opt(xmlns, ?NS_COMPONENT,
+ set_opt(server, <<"component.", Server/binary>>,
+ set_opt(type, component,
+ set_opt(server_port, Port,
+ set_opt(stream_version, undefined,
+ set_opt(lang, <<"">>, Config))))));
+do_init_per_group(GroupName, Config) ->
Pid = start_event_relay(),
- set_opt(event_relay, Pid, Config).
+ NewConfig = set_opt(event_relay, Pid, Config),
+ case GroupName of
+ anonymous -> set_opt(anonymous, true, NewConfig);
+ _ -> NewConfig
+ end.
end_per_group(mnesia, _Config) ->
ok;
@@ -144,73 +173,134 @@ end_per_group(ldap, _Config) ->
ok;
end_per_group(extauth, _Config) ->
ok;
-end_per_group(riak, _Config) ->
+end_per_group(riak, Config) ->
+ case ejabberd_riak:is_connected() of
+ true ->
+ clear_riak_tables(Config);
+ false ->
+ Config
+ end;
+end_per_group(component, _Config) ->
ok;
+end_per_group(s2s, _Config) ->
+ ejabberd_config:add_option(s2s_use_starttls, false);
end_per_group(_GroupName, Config) ->
stop_event_relay(Config),
- ok.
+ set_opt(anonymous, false, Config).
init_per_testcase(stop_ejabberd, Config) ->
- open_session(bind(auth(connect(Config))));
+ NewConfig = set_opt(resource, <<"">>,
+ set_opt(anonymous, true, Config)),
+ open_session(bind(auth(connect(NewConfig))));
init_per_testcase(TestCase, OrigConfig) ->
- subscribe_to_events(OrigConfig),
- Server = ?config(server, OrigConfig),
- Resource = ?config(resource, OrigConfig),
- MasterResource = ?config(master_resource, OrigConfig),
- SlaveResource = ?config(slave_resource, OrigConfig),
Test = atom_to_list(TestCase),
IsMaster = lists:suffix("_master", Test),
IsSlave = lists:suffix("_slave", Test),
+ if IsMaster or IsSlave ->
+ subscribe_to_events(OrigConfig);
+ true ->
+ ok
+ end,
+ TestGroup = proplists:get_value(
+ name, ?config(tc_group_properties, OrigConfig)),
+ Server = ?config(server, OrigConfig),
+ Resource = case TestGroup of
+ anonymous ->
+ <<"">>;
+ legacy_auth ->
+ randoms:get_string();
+ _ ->
+ ?config(resource, OrigConfig)
+ end,
+ MasterResource = ?config(master_resource, OrigConfig),
+ SlaveResource = ?config(slave_resource, OrigConfig),
+ Mode = if IsSlave -> slave;
+ IsMaster -> master;
+ true -> single
+ end,
IsCarbons = lists:prefix("carbons_", Test),
- User = if IsMaster or IsCarbons -> <<"test_master!#$%^*()`~+-;_=[]{}|\\">>;
+ IsReplaced = lists:prefix("replaced_", Test),
+ User = if IsReplaced -> <<"test_single!#$%^*()`~+-;_=[]{}|\\">>;
+ IsCarbons and not (IsMaster or IsSlave) ->
+ <<"test_single!#$%^*()`~+-;_=[]{}|\\">>;
+ IsMaster or IsCarbons -> <<"test_master!#$%^*()`~+-;_=[]{}|\\">>;
IsSlave -> <<"test_slave!#$%^*()`~+-;_=[]{}|\\">>;
true -> <<"test_single!#$%^*()`~+-;_=[]{}|\\">>
end,
+ Nick = if IsSlave -> ?config(slave_nick, OrigConfig);
+ IsMaster -> ?config(master_nick, OrigConfig);
+ true -> ?config(nick, OrigConfig)
+ end,
MyResource = if IsMaster and IsCarbons -> MasterResource;
IsSlave and IsCarbons -> SlaveResource;
true -> Resource
end,
Slave = if IsCarbons ->
jid:make(<<"test_master!#$%^*()`~+-;_=[]{}|\\">>, Server, SlaveResource);
+ IsReplaced ->
+ jid:make(User, Server, Resource);
true ->
jid:make(<<"test_slave!#$%^*()`~+-;_=[]{}|\\">>, Server, Resource)
end,
Master = if IsCarbons ->
jid:make(<<"test_master!#$%^*()`~+-;_=[]{}|\\">>, Server, MasterResource);
+ IsReplaced ->
+ jid:make(User, Server, Resource);
true ->
jid:make(<<"test_master!#$%^*()`~+-;_=[]{}|\\">>, Server, Resource)
end,
- Config = set_opt(user, User,
- set_opt(slave, Slave,
- set_opt(master, Master,
- set_opt(resource, MyResource, OrigConfig)))),
- case TestCase of
- test_connect ->
+ Config1 = set_opt(user, User,
+ set_opt(slave, Slave,
+ set_opt(master, Master,
+ set_opt(resource, MyResource,
+ set_opt(nick, Nick,
+ set_opt(mode, Mode, OrigConfig)))))),
+ Config2 = if IsSlave ->
+ set_opt(peer_nick, ?config(master_nick, Config1), Config1);
+ IsMaster ->
+ set_opt(peer_nick, ?config(slave_nick, Config1), Config1);
+ true ->
+ Config1
+ end,
+ Config = if IsSlave -> set_opt(peer, Master, Config2);
+ IsMaster -> set_opt(peer, Slave, Config2);
+ true -> Config2
+ end,
+ case Test of
+ "test_connect" ++ _ ->
Config;
- test_auth ->
+ "test_legacy_auth" ++ _ ->
+ init_stream(set_opt(stream_version, undefined, Config));
+ "test_auth" ++ _ ->
connect(Config);
- test_starttls ->
+ "test_starttls" ++ _ ->
connect(Config);
- test_zlib ->
+ "test_zlib" ->
connect(Config);
- test_register ->
+ "test_register" ->
connect(Config);
- auth_md5 ->
+ "auth_md5" ->
connect(Config);
- auth_plain ->
+ "auth_plain" ->
connect(Config);
- test_bind ->
+ "unauthenticated_" ++ _ ->
+ connect(Config);
+ "test_bind" ->
auth(connect(Config));
- sm_resume ->
+ "sm_resume" ->
auth(connect(Config));
- sm_resume_failed ->
+ "sm_resume_failed" ->
auth(connect(Config));
- test_open_session ->
+ "test_open_session" ->
bind(auth(connect(Config)));
+ "replaced" ++ _ ->
+ auth(connect(Config));
_ when IsMaster or IsSlave ->
Password = ?config(password, Config),
ejabberd_auth:try_register(User, Server, Password),
open_session(bind(auth(connect(Config))));
+ _ when TestGroup == s2s_tests ->
+ auth(connect(starttls(connect(Config))));
_ ->
open_session(bind(auth(connect(Config))))
end.
@@ -218,161 +308,194 @@ init_per_testcase(TestCase, OrigConfig) ->
end_per_testcase(_TestCase, _Config) ->
ok.
+legacy_auth_tests() ->
+ {legacy_auth, [parallel],
+ [test_legacy_auth,
+ test_legacy_auth_digest,
+ test_legacy_auth_no_resource,
+ test_legacy_auth_bad_jid,
+ test_legacy_auth_fail]}.
+
no_db_tests() ->
- [{generic, [sequence],
- [test_connect,
+ [{anonymous, [parallel],
+ [test_connect_bad_xml,
+ test_connect_unexpected_xml,
+ test_connect_unknown_ns,
+ test_connect_bad_xmlns,
+ test_connect_bad_ns_stream,
+ test_connect_bad_lang,
+ test_connect_bad_to,
+ test_connect_missing_to,
+ test_connect,
+ unauthenticated_iq,
+ unauthenticated_stanza,
test_starttls,
test_zlib,
test_auth,
test_bind,
test_open_session,
- presence,
+ codec_failure,
+ unsupported_query,
+ bad_nonza,
+ invalid_from,
+ legacy_iq,
ping,
version,
time,
stats,
- sm,
- sm_resume,
- sm_resume_failed,
disco]},
- {test_proxy65, [parallel],
- [proxy65_master, proxy65_slave]}].
+ {presence_and_s2s, [sequence],
+ [test_auth_fail,
+ presence,
+ s2s_dialback,
+ s2s_optional,
+ s2s_required,
+ s2s_required_trusted]},
+ sm_tests:single_cases(),
+ muc_tests:single_cases(),
+ muc_tests:master_slave_cases(),
+ proxy65_tests:single_cases(),
+ proxy65_tests:master_slave_cases(),
+ replaced_tests:master_slave_cases()].
db_tests(riak) ->
%% No support for mod_pubsub
[{single_user, [sequence],
[test_register,
+ legacy_auth_tests(),
auth_plain,
auth_md5,
presence_broadcast,
last,
- roster_get,
+ roster_tests:single_cases(),
private,
- privacy,
- blocking,
- vcard,
+ privacy_tests:single_cases(),
+ vcard_tests:single_cases(),
+ muc_tests:single_cases(),
+ offline_tests:single_cases(),
test_unregister]},
- {test_muc_register, [sequence],
- [muc_register_master, muc_register_slave]},
- {test_roster_subscribe, [parallel],
- [roster_subscribe_master,
- roster_subscribe_slave]},
- {test_flex_offline, [sequence],
- [flex_offline_master, flex_offline_slave]},
- {test_offline, [sequence],
- [offline_master, offline_slave]},
- {test_muc, [parallel],
- [muc_master, muc_slave]},
- {test_announce, [sequence],
- [announce_master, announce_slave]},
- {test_vcard_xupdate, [parallel],
- [vcard_xupdate_master, vcard_xupdate_slave]},
- {test_roster_remove, [parallel],
- [roster_remove_master,
- roster_remove_slave]}];
+ muc_tests:master_slave_cases(),
+ privacy_tests:master_slave_cases(),
+ roster_tests:master_slave_cases(),
+ offline_tests:master_slave_cases(),
+ vcard_tests:master_slave_cases(),
+ announce_tests:master_slave_cases()];
db_tests(DB) when DB == mnesia; DB == redis ->
[{single_user, [sequence],
[test_register,
+ legacy_auth_tests(),
auth_plain,
auth_md5,
presence_broadcast,
last,
- roster_get,
- roster_ver,
+ roster_tests:single_cases(),
private,
- privacy,
- blocking,
- vcard,
- pubsub,
+ privacy_tests:single_cases(),
+ vcard_tests:single_cases(),
+ pubsub_tests:single_cases(),
+ muc_tests:single_cases(),
+ offline_tests:single_cases(),
+ mam_tests:single_cases(),
+ mix_tests:single_cases(),
+ carbons_tests:single_cases(),
+ csi_tests:single_cases(),
test_unregister]},
- {test_muc_register, [sequence],
- [muc_register_master, muc_register_slave]},
- {test_mix, [parallel],
- [mix_master, mix_slave]},
- {test_roster_subscribe, [parallel],
- [roster_subscribe_master,
- roster_subscribe_slave]},
- {test_flex_offline, [sequence],
- [flex_offline_master, flex_offline_slave]},
- {test_offline, [sequence],
- [offline_master, offline_slave]},
- {test_old_mam, [parallel],
- [mam_old_master, mam_old_slave]},
- {test_new_mam, [parallel],
- [mam_new_master, mam_new_slave]},
- {test_carbons, [parallel],
- [carbons_master, carbons_slave]},
- {test_client_state, [parallel],
- [client_state_master, client_state_slave]},
- {test_muc, [parallel],
- [muc_master, muc_slave]},
- {test_muc_mam, [parallel],
- [muc_mam_master, muc_mam_slave]},
- {test_announce, [sequence],
- [announce_master, announce_slave]},
- {test_vcard_xupdate, [parallel],
- [vcard_xupdate_master, vcard_xupdate_slave]},
- {test_roster_remove, [parallel],
- [roster_remove_master,
- roster_remove_slave]}];
+ muc_tests:master_slave_cases(),
+ privacy_tests:master_slave_cases(),
+ pubsub_tests:master_slave_cases(),
+ roster_tests:master_slave_cases(),
+ offline_tests:master_slave_cases(),
+ mam_tests:master_slave_cases(),
+ mix_tests:master_slave_cases(),
+ vcard_tests:master_slave_cases(),
+ announce_tests:master_slave_cases(),
+ carbons_tests:master_slave_cases(),
+ csi_tests:master_slave_cases()];
db_tests(_) ->
%% No support for carboncopy
[{single_user, [sequence],
[test_register,
+ legacy_auth_tests(),
auth_plain,
auth_md5,
presence_broadcast,
last,
- roster_get,
- roster_ver,
+ roster_tests:single_cases(),
private,
- privacy,
- blocking,
- vcard,
- pubsub,
+ privacy_tests:single_cases(),
+ vcard_tests:single_cases(),
+ pubsub_tests:single_cases(),
+ muc_tests:single_cases(),
+ offline_tests:single_cases(),
+ mam_tests:single_cases(),
+ mix_tests:single_cases(),
test_unregister]},
- {test_muc_register, [sequence],
- [muc_register_master, muc_register_slave]},
- {test_mix, [parallel],
- [mix_master, mix_slave]},
- {test_roster_subscribe, [parallel],
- [roster_subscribe_master,
- roster_subscribe_slave]},
- {test_flex_offline, [sequence],
- [flex_offline_master, flex_offline_slave]},
- {test_offline, [sequence],
- [offline_master, offline_slave]},
- {test_old_mam, [parallel],
- [mam_old_master, mam_old_slave]},
- {test_new_mam, [parallel],
- [mam_new_master, mam_new_slave]},
- {test_muc, [parallel],
- [muc_master, muc_slave]},
- {test_muc_mam, [parallel],
- [muc_mam_master, muc_mam_slave]},
- {test_announce, [sequence],
- [announce_master, announce_slave]},
- {test_vcard_xupdate, [parallel],
- [vcard_xupdate_master, vcard_xupdate_slave]},
- {test_roster_remove, [parallel],
- [roster_remove_master,
- roster_remove_slave]}].
+ muc_tests:master_slave_cases(),
+ privacy_tests:master_slave_cases(),
+ pubsub_tests:master_slave_cases(),
+ roster_tests:master_slave_cases(),
+ offline_tests:master_slave_cases(),
+ mam_tests:master_slave_cases(),
+ mix_tests:master_slave_cases(),
+ vcard_tests:master_slave_cases(),
+ announce_tests:master_slave_cases()].
ldap_tests() ->
[{ldap_tests, [sequence],
[test_auth,
+ test_auth_fail,
vcard_get,
ldap_shared_roster_get]}].
extauth_tests() ->
[{extauth_tests, [sequence],
[test_auth,
+ test_auth_fail,
test_unregister]}].
+component_tests() ->
+ [{component_connect, [parallel],
+ [test_connect_bad_xml,
+ test_connect_unexpected_xml,
+ test_connect_unknown_ns,
+ test_connect_bad_xmlns,
+ test_connect_bad_ns_stream,
+ test_connect_missing_to,
+ test_connect,
+ test_auth,
+ test_auth_fail]},
+ {component_tests, [sequence],
+ [test_missing_address,
+ test_invalid_from,
+ test_component_send,
+ bad_nonza,
+ codec_failure]}].
+
+s2s_tests() ->
+ [{s2s_connect, [parallel],
+ [test_connect_bad_xml,
+ test_connect_unexpected_xml,
+ test_connect_unknown_ns,
+ test_connect_bad_xmlns,
+ test_connect_bad_ns_stream,
+ test_connect,
+ test_connect_s2s_starttls_required,
+ test_starttls,
+ test_connect_missing_from,
+ test_connect_s2s_unauthenticated_iq,
+ test_auth_starttls]},
+ {s2s_tests, [sequence],
+ [test_missing_address,
+ test_invalid_from,
+ bad_nonza,
+ codec_failure]}].
+
groups() ->
[{ldap, [sequence], ldap_tests()},
{extauth, [sequence], extauth_tests()},
{no_db, [sequence], no_db_tests()},
+ {component, [sequence], component_tests()},
+ {s2s, [sequence], s2s_tests()},
{mnesia, [sequence], db_tests(mnesia)},
{redis, [sequence], db_tests(redis)},
{mysql, [sequence], db_tests(mysql)},
@@ -390,6 +513,8 @@ all() ->
{group, sqlite},
{group, extauth},
{group, riak},
+ {group, component},
+ {group, s2s},
stop_ejabberd].
stop_ejabberd(Config) ->
@@ -398,13 +523,91 @@ stop_ejabberd(Config) ->
?recv1({xmlstreamend, <<"stream:stream">>}),
Config.
+test_connect_bad_xml(Config) ->
+ Config0 = tcp_connect(Config),
+ send_text(Config0, <<"<'/>">>),
+ Version = ?config(stream_version, Config0),
+ ?recv1(#stream_start{version = Version}),
+ ?recv1(#stream_error{reason = 'not-well-formed'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_unexpected_xml(Config) ->
+ Config0 = tcp_connect(Config),
+ send(Config0, #caps{}),
+ Version = ?config(stream_version, Config0),
+ ?recv1(#stream_start{version = Version}),
+ ?recv1(#stream_error{reason = 'invalid-xml'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_unknown_ns(Config) ->
+ Config0 = init_stream(set_opt(xmlns, <<"wrong">>, Config)),
+ ?recv1(#stream_error{reason = 'invalid-xml'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_bad_xmlns(Config) ->
+ NS = case ?config(type, Config) of
+ client -> ?NS_SERVER;
+ _ -> ?NS_CLIENT
+ end,
+ Config0 = init_stream(set_opt(xmlns, NS, Config)),
+ ?recv1(#stream_error{reason = 'invalid-namespace'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_bad_ns_stream(Config) ->
+ Config0 = init_stream(set_opt(ns_stream, <<"wrong">>, Config)),
+ ?recv1(#stream_error{reason = 'invalid-namespace'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_bad_lang(Config) ->
+ Lang = iolist_to_binary(lists:duplicate(36, $x)),
+ Config0 = init_stream(set_opt(lang, Lang, Config)),
+ ?recv1(#stream_error{reason = 'policy-violation'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_bad_to(Config) ->
+ Config0 = init_stream(set_opt(server, <<"wrong.com">>, Config)),
+ ?recv1(#stream_error{reason = 'host-unknown'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_missing_to(Config) ->
+ Config0 = init_stream(set_opt(server, <<"">>, Config)),
+ ?recv1(#stream_error{reason = 'improper-addressing'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config0).
+
+test_connect_missing_from(Config) ->
+ Config1 = starttls(connect(Config)),
+ Config2 = set_opt(stream_from, <<"">>, Config1),
+ Config3 = init_stream(Config2),
+ ?recv1(#stream_error{reason = 'policy-violation'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config3).
+
test_connect(Config) ->
disconnect(connect(Config)).
+test_connect_s2s_starttls_required(Config) ->
+ Config1 = connect(Config),
+ send(Config1, #caps{}),
+ ?recv1(#stream_error{reason = 'policy-violation'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config1).
+
+test_connect_s2s_unauthenticated_iq(Config) ->
+ Config1 = connect(starttls(connect(Config))),
+ unauthenticated_iq(Config1).
+
test_starttls(Config) ->
case ?config(starttls, Config) of
true ->
- disconnect(starttls(Config));
+ disconnect(connect(starttls(Config)));
_ ->
{skipped, 'starttls_not_available'}
end.
@@ -432,8 +635,8 @@ test_register(Config) ->
register(Config) ->
#iq{type = result,
- sub_els = [#register{username = none,
- password = none}]} =
+ sub_els = [#register{username = <<>>,
+ password = <<>>}]} =
send_recv(Config, #iq{type = get, to = server_jid(Config),
sub_els = [#register{}]}),
#iq{type = result, sub_els = []} =
@@ -462,6 +665,84 @@ try_unregister(Config) ->
?recv1(#stream_error{reason = conflict}),
Config.
+unauthenticated_stanza(Config) ->
+ %% Unauthenticated stanza should be silently dropped.
+ send(Config, #message{to = server_jid(Config)}),
+ disconnect(Config).
+
+unauthenticated_iq(Config) ->
+ From = my_jid(Config),
+ To = server_jid(Config),
+ #iq{type = error} =
+ send_recv(Config, #iq{type = get, from = From, to = To,
+ sub_els = [#disco_info{}]}),
+ disconnect(Config).
+
+bad_nonza(Config) ->
+ %% Unsupported and invalid nonza should be silently dropped.
+ send(Config, #caps{}),
+ send(Config, #stanza_error{type = wrong}),
+ disconnect(Config).
+
+invalid_from(Config) ->
+ send(Config, #message{from = jid:make(randoms:get_string())}),
+ ?recv1(#stream_error{reason = 'invalid-from'}),
+ ?recv1({xmlstreamend, <<"stream:stream">>}),
+ close_socket(Config).
+
+test_missing_address(Config) ->
+ Server = server_jid(Config),
+ #iq{type = error} = send_recv(Config, #iq{type = get, from = Server}),
+ #iq{type = error} = send_recv(Config, #iq{type = get, to = Server}),
+ disconnect(Config).
+
+test_invalid_from(Config) ->
+ From = jid:make(randoms:get_string()),
+ To = jid:make(randoms:get_string()),
+ #iq{type = error} =
+ send_recv(Config, #iq{type = get, from = From, to = To}),
+ disconnect(Config).
+
+test_component_send(Config) ->
+ To = jid:make(?COMMON_VHOST),
+ From = server_jid(Config),
+ #iq{type = result, from = To, to = From} =
+ send_recv(Config, #iq{type = get, to = To, from = From,
+ sub_els = [#ping{}]}),
+ disconnect(Config).
+
+s2s_dialback(Config) ->
+ ejabberd_s2s:stop_all_connections(),
+ ejabberd_config:add_option(s2s_use_starttls, false),
+ ejabberd_config:add_option(domain_certfile, "self-signed-cert.pem"),
+ s2s_ping(Config).
+
+s2s_optional(Config) ->
+ ejabberd_s2s:stop_all_connections(),
+ ejabberd_config:add_option(s2s_use_starttls, optional),
+ ejabberd_config:add_option(domain_certfile, "self-signed-cert.pem"),
+ s2s_ping(Config).
+
+s2s_required(Config) ->
+ ejabberd_s2s:stop_all_connections(),
+ ejabberd_config:add_option(s2s_use_starttls, required),
+ ejabberd_config:add_option(domain_certfile, "self-signed-cert.pem"),
+ s2s_ping(Config).
+
+s2s_required_trusted(Config) ->
+ ejabberd_s2s:stop_all_connections(),
+ ejabberd_config:add_option(s2s_use_starttls, required),
+ ejabberd_config:add_option(domain_certfile, "cert.pem"),
+ s2s_ping(Config).
+
+s2s_ping(Config) ->
+ From = my_jid(Config),
+ To = jid:make(?MNESIA_VHOST),
+ ID = randoms:get_string(),
+ ejabberd_s2s:route(From, To, #iq{id = ID, type = get, sub_els = [#ping{}]}),
+ #iq{type = result, id = ID, sub_els = []} = recv_iq(Config),
+ disconnect(Config).
+
auth_md5(Config) ->
Mechs = ?config(mechs, Config),
case lists:member(<<"DIGEST-MD5">>, Mechs) of
@@ -482,47 +763,61 @@ auth_plain(Config) ->
{skipped, 'PLAIN_not_available'}
end.
+test_legacy_auth(Config) ->
+ disconnect(auth_legacy(Config, _Digest = false)).
+
+test_legacy_auth_digest(Config) ->
+ disconnect(auth_legacy(Config, _Digest = true)).
+
+test_legacy_auth_no_resource(Config0) ->
+ Config = set_opt(resource, <<"">>, Config0),
+ disconnect(auth_legacy(Config, _Digest = false, _ShouldFail = true)).
+
+test_legacy_auth_bad_jid(Config0) ->
+ Config = set_opt(user, <<"@">>, Config0),
+ disconnect(auth_legacy(Config, _Digest = false, _ShouldFail = true)).
+
+test_legacy_auth_fail(Config0) ->
+ Config = set_opt(user, <<"wrong">>, Config0),
+ disconnect(auth_legacy(Config, _Digest = false, _ShouldFail = true)).
+
test_auth(Config) ->
disconnect(auth(Config)).
+test_auth_starttls(Config) ->
+ disconnect(auth(connect(starttls(Config)))).
+
+test_auth_fail(Config0) ->
+ Config = set_opt(user, <<"wrong">>,
+ set_opt(password, <<"wrong">>, Config0)),
+ disconnect(auth(Config, _ShouldFail = true)).
+
test_bind(Config) ->
disconnect(bind(Config)).
test_open_session(Config) ->
- disconnect(open_session(Config)).
+ disconnect(open_session(Config, true)).
-roster_get(Config) ->
- #iq{type = result, sub_els = [#roster{items = []}]} =
- send_recv(Config, #iq{type = get, sub_els = [#roster{}]}),
+codec_failure(Config) ->
+ JID = my_jid(Config),
+ #iq{type = error} =
+ send_recv(Config, #iq{type = wrong, from = JID, to = JID}),
disconnect(Config).
-roster_ver(Config) ->
- %% Get initial "ver"
- #iq{type = result, sub_els = [#roster{ver = Ver1, items = []}]} =
- send_recv(Config, #iq{type = get,
- sub_els = [#roster{ver = <<"">>}]}),
- %% Should receive empty IQ-result
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = get,
- sub_els = [#roster{ver = Ver1}]}),
- %% Attempting to subscribe to server's JID
- send(Config, #presence{type = subscribe, to = server_jid(Config)}),
- %% Receive a single roster push with the new "ver"
- ?recv1(#iq{type = set, sub_els = [#roster{ver = Ver2}]}),
- %% Requesting roster with the previous "ver". Should receive Ver2 again
- #iq{type = result, sub_els = [#roster{ver = Ver2}]} =
- send_recv(Config, #iq{type = get,
- sub_els = [#roster{ver = Ver1}]}),
- %% Now requesting roster with the newest "ver". Should receive empty IQ.
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = get,
- sub_els = [#roster{ver = Ver2}]}),
+unsupported_query(Config) ->
+ ServerJID = server_jid(Config),
+ #iq{type = error} = send_recv(Config, #iq{type = get, to = ServerJID}),
+ #iq{type = error} = send_recv(Config, #iq{type = get, to = ServerJID,
+ sub_els = [#caps{}]}),
+ #iq{type = error} = send_recv(Config, #iq{type = get, to = ServerJID,
+ sub_els = [#roster_query{},
+ #disco_info{},
+ #privacy_query{}]}),
disconnect(Config).
presence(Config) ->
- send(Config, #presence{}),
JID = my_jid(Config),
- ?recv1(#presence{from = JID, to = JID}),
+ #presence{from = JID, to = JID} = send_recv(Config, #presence{}),
disconnect(Config).
presence_broadcast(Config) ->
@@ -539,18 +834,18 @@ presence_broadcast(Config) ->
lang = <<"en">>,
name = <<"ejabberd_ct">>}],
node = Node, features = [Feature]},
- Caps = #caps{hash = <<"sha-1">>, node = ?EJABBERD_CT_URI, ver = Ver},
+ Caps = #caps{hash = <<"sha-1">>, node = ?EJABBERD_CT_URI, version = B64Ver},
send(Config, #presence{sub_els = [Caps]}),
JID = my_jid(Config),
%% We receive:
%% 1) disco#info iq request for CAPS
%% 2) welcome message
%% 3) presence broadcast
- {IQ, _, _} = ?recv3(#iq{type = get,
- from = ServerJID,
- sub_els = [#disco_info{node = Node}]},
- #message{type = normal},
- #presence{from = JID, to = JID}),
+ IQ = #iq{type = get,
+ from = ServerJID,
+ sub_els = [#disco_info{node = Node}]} = recv_iq(Config),
+ #message{type = normal} = recv_message(Config),
+ #presence{from = JID, to = JID} = recv_presence(Config),
send(Config, #iq{type = result, id = IQ#iq.id,
to = ServerJID, sub_els = [Info]}),
%% We're trying to read our feature from ejabberd database
@@ -559,15 +854,20 @@ presence_broadcast(Config) ->
lists:foldl(
fun(Time, []) ->
timer:sleep(Time),
- mod_caps:get_features(
- Server,
- mod_caps:read_caps(
- [xmpp_codec:encode(Caps)]));
+ mod_caps:get_features(Server, Caps);
(_, Acc) ->
Acc
end, [], [0, 100, 200, 2000, 5000, 10000]),
disconnect(Config).
+legacy_iq(Config) ->
+ true = is_feature_advertised(Config, ?NS_EVENT),
+ ServerJID = server_jid(Config),
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{to = ServerJID, type = get,
+ sub_els = [#xevent{}]}),
+ disconnect(Config).
+
ping(Config) ->
true = is_feature_advertised(Config, ?NS_PING),
#iq{type = result, sub_els = []} =
@@ -607,56 +907,6 @@ disco(Config) ->
end, Items),
disconnect(Config).
-sm(Config) ->
- Server = ?config(server, Config),
- ServerJID = jid:make(<<"">>, Server, <<"">>),
- %% Send messages of type 'headline' so the server discards them silently
- Msg = #message{to = ServerJID, type = headline,
- body = [#text{data = <<"body">>}]},
- true = ?config(sm, Config),
- %% Enable the session management with resumption enabled
- send(Config, #sm_enable{resume = true, xmlns = ?NS_STREAM_MGMT_3}),
- ?recv1(#sm_enabled{id = ID, resume = true}),
- %% Initial request; 'h' should be 0.
- send(Config, #sm_r{xmlns = ?NS_STREAM_MGMT_3}),
- ?recv1(#sm_a{h = 0}),
- %% sending two messages and requesting again; 'h' should be 3.
- send(Config, Msg),
- send(Config, Msg),
- send(Config, Msg),
- send(Config, #sm_r{xmlns = ?NS_STREAM_MGMT_3}),
- ?recv1(#sm_a{h = 3}),
- close_socket(Config),
- {save_config, set_opt(sm_previd, ID, Config)}.
-
-sm_resume(Config) ->
- {sm, SMConfig} = ?config(saved_config, Config),
- ID = ?config(sm_previd, SMConfig),
- Server = ?config(server, Config),
- ServerJID = jid:make(<<"">>, Server, <<"">>),
- MyJID = my_jid(Config),
- Txt = #text{data = <<"body">>},
- Msg = #message{from = ServerJID, to = MyJID, body = [Txt]},
- %% Route message. The message should be queued by the C2S process.
- ejabberd_router:route(ServerJID, MyJID, xmpp_codec:encode(Msg)),
- send(Config, #sm_resume{previd = ID, h = 0, xmlns = ?NS_STREAM_MGMT_3}),
- ?recv1(#sm_resumed{previd = ID, h = 3}),
- ?recv1(#message{from = ServerJID, to = MyJID, body = [Txt]}),
- ?recv1(#sm_r{}),
- send(Config, #sm_a{h = 1, xmlns = ?NS_STREAM_MGMT_3}),
- %% Send another stanza to increment the server's 'h' for sm_resume_failed.
- send(Config, #presence{to = ServerJID}),
- close_socket(Config),
- {save_config, set_opt(sm_previd, ID, Config)}.
-
-sm_resume_failed(Config) ->
- {sm_resume, SMConfig} = ?config(saved_config, Config),
- ID = ?config(sm_previd, SMConfig),
- ct:sleep(5000), % Wait for session to time out.
- send(Config, #sm_resume{previd = ID, h = 1, xmlns = ?NS_STREAM_MGMT_3}),
- ?recv1(#sm_failed{reason = 'item-not-found', h = 4}),
- disconnect(Config).
-
private(Config) ->
Conference = #bookmark_conference{name = <<"Some name">>,
autojoin = true,
@@ -665,22 +915,23 @@ private(Config) ->
<<"some.conference.org">>,
<<>>)},
Storage = #bookmark_storage{conference = [Conference]},
- StorageXMLOut = xmpp_codec:encode(Storage),
+ StorageXMLOut = xmpp:encode(Storage),
+ WrongEl = #xmlel{name = <<"wrong">>},
#iq{type = error} =
- send_recv(Config, #iq{type = get, sub_els = [#private{}],
- to = server_jid(Config)}),
+ send_recv(Config, #iq{type = get,
+ sub_els = [#private{xml_els = [WrongEl]}]}),
#iq{type = result, sub_els = []} =
send_recv(
Config, #iq{type = set,
- sub_els = [#private{xml_els = [StorageXMLOut]}]}),
+ sub_els = [#private{xml_els = [WrongEl, StorageXMLOut]}]}),
#iq{type = result,
sub_els = [#private{xml_els = [StorageXMLIn]}]} =
send_recv(
Config,
#iq{type = get,
- sub_els = [#private{xml_els = [xmpp_codec:encode(
+ sub_els = [#private{xml_els = [xmpp:encode(
#bookmark_storage{})]}]}),
- Storage = xmpp_codec:decode(StorageXMLIn),
+ Storage = xmpp:decode(StorageXMLIn),
disconnect(Config).
last(Config) ->
@@ -690,1685 +941,36 @@ last(Config) ->
to = server_jid(Config)}),
disconnect(Config).
-privacy(Config) ->
- true = is_feature_advertised(Config, ?NS_PRIVACY),
- #iq{type = result, sub_els = [#privacy{}]} =
- send_recv(Config, #iq{type = get, sub_els = [#privacy{}]}),
- JID = <<"tybalt@example.com">>,
- I1 = send(Config,
- #iq{type = set,
- sub_els = [#privacy{
- lists = [#privacy_list{
- name = <<"public">>,
- items =
- [#privacy_item{
- type = jid,
- order = 3,
- action = deny,
- kinds = ['presence-in'],
- value = JID}]}]}]}),
- {Push1, _} =
- ?recv2(
- #iq{type = set,
- sub_els = [#privacy{
- lists = [#privacy_list{
- name = <<"public">>}]}]},
- #iq{type = result, id = I1, sub_els = []}),
- send(Config, make_iq_result(Push1)),
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = set,
- sub_els = [#privacy{active = <<"public">>}]}),
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = set,
- sub_els = [#privacy{default = <<"public">>}]}),
- #iq{type = result,
- sub_els = [#privacy{default = <<"public">>,
- active = <<"public">>,
- lists = [#privacy_list{name = <<"public">>}]}]} =
- send_recv(Config, #iq{type = get, sub_els = [#privacy{}]}),
- #iq{type = result, sub_els = []} =
- send_recv(Config,
- #iq{type = set, sub_els = [#privacy{default = none}]}),
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = set, sub_els = [#privacy{active = none}]}),
- I2 = send(Config, #iq{type = set,
- sub_els = [#privacy{
- lists =
- [#privacy_list{
- name = <<"public">>}]}]}),
- {Push2, _} =
- ?recv2(
- #iq{type = set,
- sub_els = [#privacy{
- lists = [#privacy_list{
- name = <<"public">>}]}]},
- #iq{type = result, id = I2, sub_els = []}),
- send(Config, make_iq_result(Push2)),
- disconnect(Config).
-
-blocking(Config) ->
- true = is_feature_advertised(Config, ?NS_BLOCKING),
- JID = jid:make(<<"romeo">>, <<"montague.net">>, <<>>),
- #iq{type = result, sub_els = [#block_list{}]} =
- send_recv(Config, #iq{type = get, sub_els = [#block_list{}]}),
- I1 = send(Config, #iq{type = set,
- sub_els = [#block{items = [JID]}]}),
- {Push1, Push2, _} =
- ?recv3(
- #iq{type = set,
- sub_els = [#privacy{lists = [#privacy_list{}]}]},
- #iq{type = set,
- sub_els = [#block{items = [JID]}]},
- #iq{type = result, id = I1, sub_els = []}),
- send(Config, make_iq_result(Push1)),
- send(Config, make_iq_result(Push2)),
- I2 = send(Config, #iq{type = set,
- sub_els = [#unblock{items = [JID]}]}),
- {Push3, Push4, _} =
- ?recv3(
- #iq{type = set,
- sub_els = [#privacy{lists = [#privacy_list{}]}]},
- #iq{type = set,
- sub_els = [#unblock{items = [JID]}]},
- #iq{type = result, id = I2, sub_els = []}),
- send(Config, make_iq_result(Push3)),
- send(Config, make_iq_result(Push4)),
- disconnect(Config).
-
-vcard(Config) ->
- true = is_feature_advertised(Config, ?NS_VCARD),
- VCard =
- #vcard{fn = <<"Peter Saint-Andre">>,
- n = #vcard_name{family = <<"Saint-Andre">>,
- given = <<"Peter">>},
- nickname = <<"stpeter">>,
- bday = <<"1966-08-06">>,
- adr = [#vcard_adr{work = true,
- extadd = <<"Suite 600">>,
- street = <<"1899 Wynkoop Street">>,
- locality = <<"Denver">>,
- region = <<"CO">>,
- pcode = <<"80202">>,
- ctry = <<"USA">>},
- #vcard_adr{home = true,
- locality = <<"Denver">>,
- region = <<"CO">>,
- pcode = <<"80209">>,
- ctry = <<"USA">>}],
- tel = [#vcard_tel{work = true,voice = true,
- number = <<"303-308-3282">>},
- #vcard_tel{home = true,voice = true,
- number = <<"303-555-1212">>}],
- email = [#vcard_email{internet = true,pref = true,
- userid = <<"stpeter@jabber.org">>}],
- jabberid = <<"stpeter@jabber.org">>,
- title = <<"Executive Director">>,role = <<"Patron Saint">>,
- org = #vcard_org{name = <<"XMPP Standards Foundation">>},
- url = <<"http://www.xmpp.org/xsf/people/stpeter.shtml">>,
- desc = <<"More information about me is located on my "
- "personal website: http://www.saint-andre.com/">>},
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = set, sub_els = [VCard]}),
- %% TODO: check if VCard == VCard1.
- #iq{type = result, sub_els = [_VCard1]} =
- send_recv(Config, #iq{type = get, sub_els = [#vcard{}]}),
- disconnect(Config).
-
vcard_get(Config) ->
true = is_feature_advertised(Config, ?NS_VCARD),
%% TODO: check if VCard corresponds to LDIF data from ejabberd.ldif
#iq{type = result, sub_els = [_VCard]} =
- send_recv(Config, #iq{type = get, sub_els = [#vcard{}]}),
+ send_recv(Config, #iq{type = get, sub_els = [#vcard_temp{}]}),
disconnect(Config).
ldap_shared_roster_get(Config) ->
Item = #roster_item{jid = jid:from_string(<<"user2@ldap.localhost">>), name = <<"Test User 2">>,
groups = [<<"group1">>], subscription = both},
- #iq{type = result, sub_els = [#roster{items = [Item]}]} =
- send_recv(Config, #iq{type = get, sub_els = [#roster{}]}),
- disconnect(Config).
-
-vcard_xupdate_master(Config) ->
- Img = <<137, "PNG\r\n", 26, $\n>>,
- ImgHash = p1_sha:sha(Img),
- MyJID = my_jid(Config),
- Peer = ?config(slave, Config),
- wait_for_slave(Config),
- send(Config, #presence{}),
- ?recv2(#presence{from = MyJID, type = undefined},
- #presence{from = Peer, type = undefined}),
- VCard = #vcard{photo = #vcard_photo{type = <<"image/png">>, binval = Img}},
- I1 = send(Config, #iq{type = set, sub_els = [VCard]}),
- ?recv2(#iq{type = result, sub_els = [], id = I1},
- #presence{from = MyJID, type = undefined,
- sub_els = [#vcard_xupdate{photo = ImgHash}]}),
- I2 = send(Config, #iq{type = set, sub_els = [#vcard{}]}),
- ?recv3(#iq{type = result, sub_els = [], id = I2},
- #presence{from = MyJID, type = undefined,
- sub_els = [#vcard_xupdate{photo = undefined}]},
- #presence{from = Peer, type = unavailable}),
- disconnect(Config).
-
-vcard_xupdate_slave(Config) ->
- Img = <<137, "PNG\r\n", 26, $\n>>,
- ImgHash = p1_sha:sha(Img),
- MyJID = my_jid(Config),
- Peer = ?config(master, Config),
- send(Config, #presence{}),
- ?recv1(#presence{from = MyJID, type = undefined}),
- wait_for_master(Config),
- ?recv1(#presence{from = Peer, type = undefined}),
- ?recv1(#presence{from = Peer, type = undefined,
- sub_els = [#vcard_xupdate{photo = ImgHash}]}),
- ?recv1(#presence{from = Peer, type = undefined,
- sub_els = [#vcard_xupdate{photo = undefined}]}),
+ #iq{type = result, sub_els = [#roster_query{items = [Item]}]} =
+ send_recv(Config, #iq{type = get, sub_els = [#roster_query{}]}),
disconnect(Config).
stats(Config) ->
- #iq{type = result, sub_els = [#stats{stat = Stats}]} =
+ #iq{type = result, sub_els = [#stats{list = Stats}]} =
send_recv(Config, #iq{type = get, sub_els = [#stats{}],
to = server_jid(Config)}),
lists:foreach(
fun(#stat{} = Stat) ->
#iq{type = result, sub_els = [_|_]} =
send_recv(Config, #iq{type = get,
- sub_els = [#stats{stat = [Stat]}],
+ sub_els = [#stats{list = [Stat]}],
to = server_jid(Config)})
end, Stats),
disconnect(Config).
-pubsub(Config) ->
- Features = get_features(Config, pubsub_jid(Config)),
- true = lists:member(?NS_PUBSUB, Features),
- %% Publish <presence/> element within node "presence"
- ItemID = randoms:get_string(),
- Node = <<"presence!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>,
- Item = #pubsub_item{id = ItemID,
- xml_els = [xmpp_codec:encode(#presence{})]},
- #iq{type = result,
- sub_els = [#pubsub{publish = #pubsub_publish{
- node = Node,
- items = [#pubsub_item{id = ItemID}]}}]} =
- send_recv(Config,
- #iq{type = set, to = pubsub_jid(Config),
- sub_els = [#pubsub{publish = #pubsub_publish{
- node = Node,
- items = [Item]}}]}),
- %% Subscribe to node "presence"
- I1 = send(Config,
- #iq{type = set, to = pubsub_jid(Config),
- sub_els = [#pubsub{subscribe = #pubsub_subscribe{
- node = Node,
- jid = my_jid(Config)}}]}),
- ?recv2(
- #message{sub_els = [#pubsub_event{}, #delay{}]},
- #iq{type = result, id = I1}),
- %% Get subscriptions
- true = lists:member(?PUBSUB("retrieve-subscriptions"), Features),
- #iq{type = result,
- sub_els =
- [#pubsub{subscriptions =
- {none, [#pubsub_subscription{node = Node}]}}]} =
- send_recv(Config, #iq{type = get, to = pubsub_jid(Config),
- sub_els = [#pubsub{subscriptions = {none, []}}]}),
- %% Get affiliations
- true = lists:member(?PUBSUB("retrieve-affiliations"), Features),
- #iq{type = result,
- sub_els = [#pubsub{
- affiliations =
- [#pubsub_affiliation{node = Node, type = owner}]}]} =
- send_recv(Config, #iq{type = get, to = pubsub_jid(Config),
- sub_els = [#pubsub{affiliations = []}]}),
- %% Fetching published items from node "presence"
- #iq{type = result,
- sub_els = [#pubsub{items = #pubsub_items{
- node = Node,
- items = [Item]}}]} =
- send_recv(Config,
- #iq{type = get, to = pubsub_jid(Config),
- sub_els = [#pubsub{items = #pubsub_items{node = Node}}]}),
- %% Deleting the item from the node
- true = lists:member(?PUBSUB("delete-items"), Features),
- I2 = send(Config,
- #iq{type = set, to = pubsub_jid(Config),
- sub_els = [#pubsub{retract = #pubsub_retract{
- node = Node,
- items = [#pubsub_item{id = ItemID}]}}]}),
- ?recv2(
- #iq{type = result, id = I2, sub_els = []},
- #message{sub_els = [#pubsub_event{
- items = [#pubsub_event_items{
- node = Node,
- retract = [ItemID]}]}]}),
- %% Unsubscribe from node "presence"
- #iq{type = result, sub_els = []} =
- send_recv(Config,
- #iq{type = set, to = pubsub_jid(Config),
- sub_els = [#pubsub{unsubscribe = #pubsub_unsubscribe{
- node = Node,
- jid = my_jid(Config)}}]}),
- disconnect(Config).
-
-mix_master(Config) ->
- MIX = mix_jid(Config),
- Room = mix_room_jid(Config),
- MyJID = my_jid(Config),
- MyBareJID = jid:remove_resource(MyJID),
- true = is_feature_advertised(Config, ?NS_MIX_0, MIX),
- #iq{type = result,
- sub_els =
- [#disco_info{
- identities = [#identity{category = <<"conference">>,
- type = <<"text">>}],
- xdata = [#xdata{type = result, fields = XFields}]}]} =
- send_recv(Config, #iq{type = get, to = MIX, sub_els = [#disco_info{}]}),
- true = lists:any(
- fun(#xdata_field{var = <<"FORM_TYPE">>,
- values = [?NS_MIX_SERVICEINFO_0]}) -> true;
- (_) -> false
- end, XFields),
- %% Joining
- Nodes = [?NS_MIX_NODES_MESSAGES, ?NS_MIX_NODES_PRESENCE,
- ?NS_MIX_NODES_PARTICIPANTS, ?NS_MIX_NODES_SUBJECT,
- ?NS_MIX_NODES_CONFIG],
- I0 = send(Config, #iq{type = set, to = Room,
- sub_els = [#mix_join{subscribe = Nodes}]}),
- {_, #message{sub_els =
- [#pubsub_event{
- items = [#pubsub_event_items{
- node = ?NS_MIX_NODES_PARTICIPANTS,
- items = [#pubsub_event_item{
- id = ParticipantID,
- xml_els = [PXML]}]}]}]}} =
- ?recv2(#iq{type = result, id = I0,
- sub_els = [#mix_join{subscribe = Nodes, jid = MyBareJID}]},
- #message{from = Room}),
- #mix_participant{jid = MyBareJID} = xmpp_codec:decode(PXML),
- %% Coming online
- PresenceID = randoms:get_string(),
- Presence = xmpp_codec:encode(#presence{}),
- I1 = send(
- Config,
- #iq{type = set, to = Room,
- sub_els =
- [#pubsub{
- publish = #pubsub_publish{
- node = ?NS_MIX_NODES_PRESENCE,
- items = [#pubsub_item{
- id = PresenceID,
- xml_els = [Presence]}]}}]}),
- ?recv2(#iq{type = result, id = I1,
- sub_els =
- [#pubsub{
- publish = #pubsub_publish{
- node = ?NS_MIX_NODES_PRESENCE,
- items = [#pubsub_item{id = PresenceID}]}}]},
- #message{from = Room,
- sub_els =
- [#pubsub_event{
- items = [#pubsub_event_items{
- node = ?NS_MIX_NODES_PRESENCE,
- items = [#pubsub_event_item{
- id = PresenceID,
- xml_els = [Presence]}]}]}]}),
- %% Coming offline
- send(Config, #presence{type = unavailable, to = Room}),
- %% Receiving presence retract event
- #message{from = Room,
- sub_els = [#pubsub_event{
- items = [#pubsub_event_items{
- node = ?NS_MIX_NODES_PRESENCE,
- retract = [PresenceID]}]}]} = recv(),
- %% Leaving
- I2 = send(Config, #iq{type = set, to = Room, sub_els = [#mix_leave{}]}),
- ?recv2(#iq{type = result, id = I2, sub_els = []},
- #message{from = Room,
- sub_els =
- [#pubsub_event{
- items = [#pubsub_event_items{
- node = ?NS_MIX_NODES_PARTICIPANTS,
- retract = [ParticipantID]}]}]}),
- disconnect(Config).
-
-mix_slave(Config) ->
- disconnect(Config).
-
-roster_subscribe_master(Config) ->
- send(Config, #presence{}),
- ?recv1(#presence{}),
- wait_for_slave(Config),
- Peer = ?config(slave, Config),
- LPeer = jid:remove_resource(Peer),
- send(Config, #presence{type = subscribe, to = LPeer}),
- Push1 = ?recv1(#iq{type = set,
- sub_els = [#roster{items = [#roster_item{
- ask = subscribe,
- subscription = none,
- jid = LPeer}]}]}),
- send(Config, make_iq_result(Push1)),
- {Push2, _} = ?recv2(
- #iq{type = set,
- sub_els = [#roster{items = [#roster_item{
- subscription = to,
- jid = LPeer}]}]},
- #presence{type = subscribed, from = LPeer}),
- send(Config, make_iq_result(Push2)),
- ?recv1(#presence{type = undefined, from = Peer}),
- %% BUG: ejabberd sends previous push again. Is it ok?
- Push3 = ?recv1(#iq{type = set,
- sub_els = [#roster{items = [#roster_item{
- subscription = to,
- jid = LPeer}]}]}),
- send(Config, make_iq_result(Push3)),
- ?recv1(#presence{type = subscribe, from = LPeer}),
- send(Config, #presence{type = subscribed, to = LPeer}),
- Push4 = ?recv1(#iq{type = set,
- sub_els = [#roster{items = [#roster_item{
- subscription = both,
- jid = LPeer}]}]}),
- send(Config, make_iq_result(Push4)),
- %% Move into a group
- Groups = [<<"A">>, <<"B">>],
- Item = #roster_item{jid = LPeer, groups = Groups},
- I1 = send(Config, #iq{type = set, sub_els = [#roster{items = [Item]}]}),
- {Push5, _} = ?recv2(
- #iq{type = set,
- sub_els =
- [#roster{items = [#roster_item{
- jid = LPeer,
- subscription = both}]}]},
- #iq{type = result, id = I1, sub_els = []}),
- send(Config, make_iq_result(Push5)),
- #iq{sub_els = [#roster{items = [#roster_item{groups = G1}]}]} = Push5,
- Groups = lists:sort(G1),
- wait_for_slave(Config),
- ?recv1(#presence{type = unavailable, from = Peer}),
- disconnect(Config).
-
-roster_subscribe_slave(Config) ->
- send(Config, #presence{}),
- ?recv1(#presence{}),
- wait_for_master(Config),
- Peer = ?config(master, Config),
- LPeer = jid:remove_resource(Peer),
- ?recv1(#presence{type = subscribe, from = LPeer}),
- send(Config, #presence{type = subscribed, to = LPeer}),
- Push1 = ?recv1(#iq{type = set,
- sub_els = [#roster{items = [#roster_item{
- subscription = from,
- jid = LPeer}]}]}),
- send(Config, make_iq_result(Push1)),
- send(Config, #presence{type = subscribe, to = LPeer}),
- Push2 = ?recv1(#iq{type = set,
- sub_els = [#roster{items = [#roster_item{
- ask = subscribe,
- subscription = from,
- jid = LPeer}]}]}),
- send(Config, make_iq_result(Push2)),
- {Push3, _} = ?recv2(
- #iq{type = set,
- sub_els = [#roster{items = [#roster_item{
- subscription = both,
- jid = LPeer}]}]},
- #presence{type = subscribed, from = LPeer}),
- send(Config, make_iq_result(Push3)),
- ?recv1(#presence{type = undefined, from = Peer}),
- wait_for_master(Config),
- disconnect(Config).
-
-roster_remove_master(Config) ->
- MyJID = my_jid(Config),
- Peer = ?config(slave, Config),
- LPeer = jid:remove_resource(Peer),
- Groups = [<<"A">>, <<"B">>],
- wait_for_slave(Config),
- send(Config, #presence{}),
- ?recv2(#presence{from = MyJID, type = undefined},
- #presence{from = Peer, type = undefined}),
- %% The peer removed us from its roster.
- {Push1, Push2, _, _, _} =
- ?recv5(
- %% TODO: I guess this can be optimized, we don't need
- %% to send transient roster push with subscription = 'to'.
- #iq{type = set,
- sub_els =
- [#roster{items = [#roster_item{
- jid = LPeer,
- subscription = to}]}]},
- #iq{type = set,
- sub_els =
- [#roster{items = [#roster_item{
- jid = LPeer,
- subscription = none}]}]},
- #presence{type = unsubscribe, from = LPeer},
- #presence{type = unsubscribed, from = LPeer},
- #presence{type = unavailable, from = Peer}),
- send(Config, make_iq_result(Push1)),
- send(Config, make_iq_result(Push2)),
- #iq{sub_els = [#roster{items = [#roster_item{groups = G1}]}]} = Push1,
- #iq{sub_els = [#roster{items = [#roster_item{groups = G2}]}]} = Push2,
- Groups = lists:sort(G1), Groups = lists:sort(G2),
- disconnect(Config).
-
-roster_remove_slave(Config) ->
- MyJID = my_jid(Config),
- Peer = ?config(master, Config),
- LPeer = jid:remove_resource(Peer),
- send(Config, #presence{}),
- ?recv1(#presence{from = MyJID, type = undefined}),
- wait_for_master(Config),
- ?recv1(#presence{from = Peer, type = undefined}),
- %% Remove the peer from roster.
- Item = #roster_item{jid = LPeer, subscription = remove},
- I = send(Config, #iq{type = set, sub_els = [#roster{items = [Item]}]}),
- {Push, _, _} = ?recv3(
- #iq{type = set,
- sub_els =
- [#roster{items = [#roster_item{
- jid = LPeer,
- subscription = remove}]}]},
- #iq{type = result, id = I, sub_els = []},
- #presence{type = unavailable, from = Peer}),
- send(Config, make_iq_result(Push)),
- disconnect(Config).
-
-proxy65_master(Config) ->
- Proxy = proxy_jid(Config),
- MyJID = my_jid(Config),
- Peer = ?config(slave, Config),
- wait_for_slave(Config),
- send(Config, #presence{}),
- ?recv1(#presence{from = MyJID, type = undefined}),
- true = is_feature_advertised(Config, ?NS_BYTESTREAMS, Proxy),
- #iq{type = result, sub_els = [#bytestreams{hosts = [StreamHost]}]} =
- send_recv(
- Config,
- #iq{type = get, sub_els = [#bytestreams{}], to = Proxy}),
- SID = randoms:get_string(),
- Data = crypto:rand_bytes(1024),
- put_event(Config, {StreamHost, SID, Data}),
- Socks5 = socks5_connect(StreamHost, {SID, MyJID, Peer}),
- wait_for_slave(Config),
- #iq{type = result, sub_els = []} =
- send_recv(Config,
- #iq{type = set, to = Proxy,
- sub_els = [#bytestreams{activate = Peer, sid = SID}]}),
- socks5_send(Socks5, Data),
- %%?recv1(#presence{type = unavailable, from = Peer}),
- disconnect(Config).
-
-proxy65_slave(Config) ->
- MyJID = my_jid(Config),
- Peer = ?config(master, Config),
- send(Config, #presence{}),
- ?recv1(#presence{from = MyJID, type = undefined}),
- wait_for_master(Config),
- {StreamHost, SID, Data} = get_event(Config),
- Socks5 = socks5_connect(StreamHost, {SID, Peer, MyJID}),
- wait_for_master(Config),
- socks5_recv(Socks5, Data),
- disconnect(Config).
-
-send_messages_to_room(Config, Range) ->
- MyNick = ?config(master_nick, Config),
- Room = muc_room_jid(Config),
- MyNickJID = jid:replace_resource(Room, MyNick),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- I = send(Config, #message{to = Room, body = [Text],
- type = groupchat}),
- ?recv1(#message{from = MyNickJID, id = I,
- type = groupchat,
- body = [Text]})
- end, Range).
-
-retrieve_messages_from_room_via_mam(Config, Range) ->
- MyNick = ?config(master_nick, Config),
- Room = muc_room_jid(Config),
- MyNickJID = jid:replace_resource(Room, MyNick),
- QID = randoms:get_string(),
- I = send(Config, #iq{type = set, to = Room,
- sub_els = [#mam_query{xmlns = ?NS_MAM_1, id = QID}]}),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{
- to = MyJID, from = Room,
- sub_els =
- [#mam_result{
- xmlns = ?NS_MAM_1,
- queryid = QID,
- sub_els =
- [#forwarded{
- delay = #delay{},
- sub_els = [#message{
- from = MyNickJID,
- type = groupchat,
- body = [Text]}]}]}]})
- end, Range),
- ?recv1(#iq{from = Room, id = I, type = result, sub_els = []}).
-
-muc_mam_master(Config) ->
- MyJID = my_jid(Config),
- MyNick = ?config(master_nick, Config),
- Room = muc_room_jid(Config),
- MyNickJID = jid:replace_resource(Room, MyNick),
- %% Joining
- send(Config, #presence{to = MyNickJID, sub_els = [#muc{}]}),
- %% Receive self-presence
- ?recv1(#presence{from = MyNickJID}),
- %% MAM feature should not be advertised at this point,
- %% because MAM is not enabled so far
- false = is_feature_advertised(Config, ?NS_MAM_1, Room),
- %% Fill in some history
- send_messages_to_room(Config, lists:seq(1, 21)),
- %% We now should be able to retrieve those via MAM, even though
- %% MAM is disabled. However, only last 20 messages should be received.
- retrieve_messages_from_room_via_mam(Config, lists:seq(2, 21)),
- %% Now enable MAM for the conference
- %% Retrieve config first
- #iq{type = result, sub_els = [#muc_owner{config = #xdata{} = RoomCfg}]} =
- send_recv(Config, #iq{type = get, sub_els = [#muc_owner{}],
- to = Room}),
- %% Find the MAM field in the config and enable it
- NewFields = lists:flatmap(
- fun(#xdata_field{var = <<"muc#roomconfig_mam">> = Var}) ->
- [#xdata_field{var = Var, values = [<<"1">>]}];
- (_) ->
- []
- end, RoomCfg#xdata.fields),
- NewRoomCfg = #xdata{type = submit, fields = NewFields},
- I1 = send(Config, #iq{type = set, to = Room,
- sub_els = [#muc_owner{config = NewRoomCfg}]}),
- ?recv2(#iq{type = result, id = I1},
- #message{from = Room, type = groupchat,
- sub_els = [#muc_user{status_codes = [104]}]}),
- %% Check if MAM has been enabled
- true = is_feature_advertised(Config, ?NS_MAM_1, Room),
- %% We now sending some messages again
- send_messages_to_room(Config, lists:seq(1, 5)),
- %% And retrieve them via MAM again.
- retrieve_messages_from_room_via_mam(Config, lists:seq(1, 5)),
- disconnect(Config).
-
-muc_mam_slave(Config) ->
- disconnect(Config).
-
-muc_master(Config) ->
- MyJID = my_jid(Config),
- PeerJID = ?config(slave, Config),
- PeerBareJID = jid:remove_resource(PeerJID),
- PeerJIDStr = jid:to_string(PeerJID),
- MUC = muc_jid(Config),
- Room = muc_room_jid(Config),
- MyNick = ?config(master_nick, Config),
- MyNickJID = jid:replace_resource(Room, MyNick),
- PeerNick = ?config(slave_nick, Config),
- PeerNickJID = jid:replace_resource(Room, PeerNick),
- Subject = ?config(room_subject, Config),
- Localhost = jid:make(<<"">>, <<"localhost">>, <<"">>),
- true = is_feature_advertised(Config, ?NS_MUC, MUC),
- %% Joining
- send(Config, #presence{to = MyNickJID, sub_els = [#muc{}]}),
- %% As per XEP-0045 we MUST receive stanzas in the following order:
- %% 1. In-room presence from other occupants
- %% 2. In-room presence from the joining entity itself (so-called "self-presence")
- %% 3. Room history (if any)
- %% 4. The room subject
- %% 5. Live messages, presence updates, new user joins, etc.
- %% As this is the newly created room, we receive only the 2nd stanza.
- ?recv1(#presence{
- from = MyNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- status_codes = Codes,
- items = [#muc_item{role = moderator,
- jid = MyJID,
- affiliation = owner}]}]}),
- %% 110 -> Inform user that presence refers to itself
- %% 201 -> Inform user that a new room has been created
- [110, 201] = lists:sort(Codes),
- %% Request the configuration
- #iq{type = result, sub_els = [#muc_owner{config = #xdata{} = RoomCfg}]} =
- send_recv(Config, #iq{type = get, sub_els = [#muc_owner{}],
- to = Room}),
- NewFields =
- lists:flatmap(
- fun(#xdata_field{var = Var, values = OrigVals}) ->
- Vals = case Var of
- <<"FORM_TYPE">> ->
- OrigVals;
- <<"muc#roomconfig_roomname">> ->
- [<<"Test room">>];
- <<"muc#roomconfig_roomdesc">> ->
- [<<"Trying to break the server">>];
- <<"muc#roomconfig_persistentroom">> ->
- [<<"1">>];
- <<"members_by_default">> ->
- [<<"0">>];
- <<"muc#roomconfig_allowvoicerequests">> ->
- [<<"1">>];
- <<"public_list">> ->
- [<<"1">>];
- <<"muc#roomconfig_publicroom">> ->
- [<<"1">>];
- _ ->
- []
- end,
- if Vals /= [] ->
- [#xdata_field{values = Vals, var = Var}];
- true ->
- []
- end
- end, RoomCfg#xdata.fields),
- NewRoomCfg = #xdata{type = submit, fields = NewFields},
- ID = send(Config, #iq{type = set, to = Room,
- sub_els = [#muc_owner{config = NewRoomCfg}]}),
- ?recv2(#iq{type = result, id = ID},
- #message{from = Room, type = groupchat,
- sub_els = [#muc_user{status_codes = [104]}]}),
- %% Set subject
- send(Config, #message{to = Room, type = groupchat,
- body = [#text{data = Subject}]}),
- ?recv1(#message{from = MyNickJID, type = groupchat,
- body = [#text{data = Subject}]}),
- %% Sending messages (and thus, populating history for our peer)
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- I = send(Config, #message{to = Room, body = [Text],
- type = groupchat}),
- ?recv1(#message{from = MyNickJID, id = I,
- type = groupchat,
- body = [Text]})
- end, lists:seq(1, 5)),
- %% Inviting the peer
- send(Config, #message{to = Room, type = normal,
- sub_els =
- [#muc_user{
- invites =
- [#muc_invite{to = PeerJID}]}]}),
- %% Peer is joining
- ?recv1(#presence{from = PeerNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- items = [#muc_item{role = visitor,
- jid = PeerJID,
- affiliation = none}]}]}),
- %% Receiving a voice request
- ?recv1(#message{from = Room,
- sub_els = [#xdata{type = form,
- instructions = [_],
- fields = VoiceReqFs}]}),
- %% Approving the voice request
- ReplyVoiceReqFs =
- lists:map(
- fun(#xdata_field{var = Var, values = OrigVals}) ->
- Vals = case {Var, OrigVals} of
- {<<"FORM_TYPE">>,
- [<<"http://jabber.org/protocol/muc#request">>]} ->
- OrigVals;
- {<<"muc#role">>, [<<"participant">>]} ->
- [<<"participant">>];
- {<<"muc#jid">>, [PeerJIDStr]} ->
- [PeerJIDStr];
- {<<"muc#roomnick">>, [PeerNick]} ->
- [PeerNick];
- {<<"muc#request_allow">>, [<<"0">>]} ->
- [<<"1">>]
- end,
- #xdata_field{values = Vals, var = Var}
- end, VoiceReqFs),
- send(Config, #message{to = Room,
- sub_els = [#xdata{type = submit,
- fields = ReplyVoiceReqFs}]}),
- %% Peer is becoming a participant
- ?recv1(#presence{from = PeerNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- items = [#muc_item{role = participant,
- jid = PeerJID,
- affiliation = none}]}]}),
- %% Receive private message from the peer
- ?recv1(#message{from = PeerNickJID, body = [#text{data = Subject}]}),
- %% Granting membership to the peer and localhost server
- I1 = send(Config,
- #iq{type = set, to = Room,
- sub_els =
- [#muc_admin{
- items = [#muc_item{jid = Localhost,
- affiliation = member},
- #muc_item{nick = PeerNick,
- jid = PeerBareJID,
- affiliation = member}]}]}),
- %% Peer became a member
- ?recv1(#presence{from = PeerNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- items = [#muc_item{affiliation = member,
- jid = PeerJID,
- role = participant}]}]}),
- ?recv1(#message{from = Room,
- sub_els = [#muc_user{
- items = [#muc_item{affiliation = member,
- jid = Localhost,
- role = none}]}]}),
- %% BUG: We should not receive any sub_els!
- ?recv1(#iq{type = result, id = I1, sub_els = [_|_]}),
- %% Receive groupchat message from the peer
- ?recv1(#message{type = groupchat, from = PeerNickJID,
- body = [#text{data = Subject}]}),
- %% Retrieving a member list
- #iq{type = result, sub_els = [#muc_admin{items = MemberList}]} =
- send_recv(Config,
- #iq{type = get, to = Room,
- sub_els =
- [#muc_admin{items = [#muc_item{affiliation = member}]}]}),
- [#muc_item{affiliation = member,
- jid = Localhost},
- #muc_item{affiliation = member,
- jid = MyBareJID}] = lists:keysort(#muc_item.jid, MemberList),
- %% Kick the peer
- I2 = send(Config,
- #iq{type = set, to = Room,
- sub_els = [#muc_admin{
- items = [#muc_item{nick = PeerNick,
- role = none}]}]}),
- %% Got notification the peer is kicked
- %% 307 -> Inform user that he or she has been kicked from the room
- ?recv1(#presence{from = PeerNickJID, type = unavailable,
- sub_els = [#muc_user{
- status_codes = [307],
- items = [#muc_item{affiliation = member,
- jid = PeerJID,
- role = none}]}]}),
- %% BUG: We should not receive any sub_els!
- ?recv1(#iq{type = result, id = I2, sub_els = [_|_]}),
- %% Destroying the room
- I3 = send(Config,
- #iq{type = set, to = Room,
- sub_els = [#muc_owner{
- destroy = #muc_owner_destroy{
- reason = Subject}}]}),
- %% Kicked off
- ?recv1(#presence{from = MyNickJID, type = unavailable,
- sub_els = [#muc_user{items = [#muc_item{role = none,
- affiliation = none}],
- destroy = #muc_user_destroy{
- reason = Subject}}]}),
- %% BUG: We should not receive any sub_els!
- ?recv1(#iq{type = result, id = I3, sub_els = [_|_]}),
- disconnect(Config).
-
-muc_slave(Config) ->
- MyJID = my_jid(Config),
- MyBareJID = jid:remove_resource(MyJID),
- PeerJID = ?config(master, Config),
- MUC = muc_jid(Config),
- Room = muc_room_jid(Config),
- MyNick = ?config(slave_nick, Config),
- MyNickJID = jid:replace_resource(Room, MyNick),
- PeerNick = ?config(master_nick, Config),
- PeerNickJID = jid:replace_resource(Room, PeerNick),
- Subject = ?config(room_subject, Config),
- Localhost = jid:make(<<"">>, <<"localhost">>, <<"">>),
- %% Receive an invite from the peer
- ?recv1(#message{from = Room, type = normal,
- sub_els =
- [#muc_user{invites =
- [#muc_invite{from = PeerJID}]}]}),
- %% But before joining we discover the MUC service first
- %% to check if the room is in the disco list
- #iq{type = result,
- sub_els = [#disco_items{items = [#disco_item{jid = Room}]}]} =
- send_recv(Config, #iq{type = get, to = MUC,
- sub_els = [#disco_items{}]}),
- %% Now check if the peer is in the room. We check this via disco#items
- #iq{type = result,
- sub_els = [#disco_items{items = [#disco_item{jid = PeerNickJID,
- name = PeerNick}]}]} =
- send_recv(Config, #iq{type = get, to = Room,
- sub_els = [#disco_items{}]}),
- %% Now joining
- send(Config, #presence{to = MyNickJID, sub_els = [#muc{}]}),
- %% First presence is from the participant, i.e. from the peer
- ?recv1(#presence{
- from = PeerNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- status_codes = [],
- items = [#muc_item{role = moderator,
- affiliation = owner}]}]}),
- %% The next is the self-presence (code 110 means it)
- ?recv1(#presence{
- from = MyNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- status_codes = [110],
- items = [#muc_item{role = visitor,
- affiliation = none}]}]}),
- %% Receive the room subject
- ?recv1(#message{from = PeerNickJID, type = groupchat,
- body = [#text{data = Subject}],
- sub_els = [#delay{}]}),
- %% Receive MUC history
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{from = PeerNickJID,
- type = groupchat,
- body = [Text],
- sub_els = [#delay{}]})
- end, lists:seq(1, 5)),
- %% Sending a voice request
- VoiceReq = #xdata{
- type = submit,
- fields =
- [#xdata_field{
- var = <<"FORM_TYPE">>,
- values = [<<"http://jabber.org/protocol/muc#request">>]},
- #xdata_field{
- var = <<"muc#role">>,
- type = 'text-single',
- values = [<<"participant">>]}]},
- send(Config, #message{to = Room, sub_els = [VoiceReq]}),
- %% Becoming a participant
- ?recv1(#presence{from = MyNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- items = [#muc_item{role = participant,
- affiliation = none}]}]}),
- %% Sending private message to the peer
- send(Config, #message{to = PeerNickJID,
- body = [#text{data = Subject}]}),
- %% Becoming a member
- ?recv1(#presence{from = MyNickJID,
- sub_els = [#vcard_xupdate{},
- #muc_user{
- items = [#muc_item{role = participant,
- affiliation = member}]}]}),
- %% Sending groupchat message
- send(Config, #message{to = Room, type = groupchat,
- body = [#text{data = Subject}]}),
- %% Receive this message back
- ?recv1(#message{type = groupchat, from = MyNickJID,
- body = [#text{data = Subject}]}),
- %% We're kicked off
- %% 307 -> Inform user that he or she has been kicked from the room
- ?recv1(#presence{from = MyNickJID, type = unavailable,
- sub_els = [#muc_user{
- status_codes = [307],
- items = [#muc_item{affiliation = member,
- role = none}]}]}),
- disconnect(Config).
-
-muc_register_nick(Config, MUC, PrevNick, Nick) ->
- {Registered, PrevNickVals} = if PrevNick /= <<"">> ->
- {true, [PrevNick]};
- true ->
- {false, []}
- end,
- %% Request register form
- #iq{type = result,
- sub_els = [#register{registered = Registered,
- xdata = #xdata{type = form,
- fields = FsWithoutNick}}]} =
- send_recv(Config, #iq{type = get, to = MUC,
- sub_els = [#register{}]}),
- %% Check if 'nick' field presents
- #xdata_field{type = 'text-single',
- var = <<"nick">>,
- values = PrevNickVals} =
- lists:keyfind(<<"nick">>, #xdata_field.var, FsWithoutNick),
- X = #xdata{type = submit,
- fields = [#xdata_field{var = <<"nick">>, values = [Nick]}]},
- %% Submitting form
- #iq{type = result, sub_els = [_|_]} =
- send_recv(Config, #iq{type = set, to = MUC,
- sub_els = [#register{xdata = X}]}),
- %% Check if the nick was registered
- #iq{type = result,
- sub_els = [#register{registered = true,
- xdata = #xdata{type = form,
- fields = FsWithNick}}]} =
- send_recv(Config, #iq{type = get, to = MUC,
- sub_els = [#register{}]}),
- #xdata_field{type = 'text-single', var = <<"nick">>,
- values = [Nick]} =
- lists:keyfind(<<"nick">>, #xdata_field.var, FsWithNick).
-
-muc_register_master(Config) ->
- MUC = muc_jid(Config),
- %% Register nick "master1"
- muc_register_nick(Config, MUC, <<"">>, <<"master1">>),
- %% Unregister nick "master1" via jabber:register
- #iq{type = result, sub_els = [_|_]} =
- send_recv(Config, #iq{type = set, to = MUC,
- sub_els = [#register{remove = true}]}),
- %% Register nick "master2"
- muc_register_nick(Config, MUC, <<"">>, <<"master2">>),
- %% Now register nick "master"
- muc_register_nick(Config, MUC, <<"master2">>, <<"master">>),
- disconnect(Config).
-
-muc_register_slave(Config) ->
- MUC = muc_jid(Config),
- %% Trying to register occupied nick "master"
- X = #xdata{type = submit,
- fields = [#xdata_field{var = <<"nick">>,
- values = [<<"master">>]}]},
- #iq{type = error} =
- send_recv(Config, #iq{type = set, to = MUC,
- sub_els = [#register{xdata = X}]}),
- disconnect(Config).
-
-announce_master(Config) ->
- MyJID = my_jid(Config),
- ServerJID = server_jid(Config),
- MotdJID = jid:replace_resource(ServerJID, <<"announce/motd">>),
- MotdText = #text{data = <<"motd">>},
- send(Config, #presence{}),
- ?recv1(#presence{from = MyJID}),
- %% Set message of the day
- send(Config, #message{to = MotdJID, body = [MotdText]}),
- %% Receive this message back
- ?recv1(#message{from = ServerJID, body = [MotdText]}),
- disconnect(Config).
-
-announce_slave(Config) ->
- MyJID = my_jid(Config),
- ServerJID = server_jid(Config),
- MotdDelJID = jid:replace_resource(ServerJID, <<"announce/motd/delete">>),
- MotdText = #text{data = <<"motd">>},
- send(Config, #presence{}),
- ?recv2(#presence{from = MyJID},
- #message{from = ServerJID, body = [MotdText]}),
- %% Delete message of the day
- send(Config, #message{to = MotdDelJID}),
- disconnect(Config).
-
-flex_offline_master(Config) ->
- Peer = ?config(slave, Config),
- LPeer = jid:remove_resource(Peer),
- lists:foreach(
- fun(I) ->
- Body = integer_to_binary(I),
- send(Config, #message{to = LPeer,
- body = [#text{data = Body}],
- subject = [#text{data = <<"subject">>}]})
- end, lists:seq(1, 5)),
- disconnect(Config).
-
-flex_offline_slave(Config) ->
- MyJID = my_jid(Config),
- MyBareJID = jid:remove_resource(MyJID),
- Peer = ?config(master, Config),
- Peer_s = jid:to_string(Peer),
- true = is_feature_advertised(Config, ?NS_FLEX_OFFLINE),
- %% Request disco#info
- #iq{type = result,
- sub_els = [#disco_info{
- node = ?NS_FLEX_OFFLINE,
- identities = Ids,
- features = Fts,
- xdata = [X]}]} =
- send_recv(Config, #iq{type = get,
- sub_els = [#disco_info{
- node = ?NS_FLEX_OFFLINE}]}),
- %% Check if we have correct identities
- true = lists:any(
- fun(#identity{category = <<"automation">>,
- type = <<"message-list">>}) -> true;
- (_) -> false
- end, Ids),
- %% Check if we have needed feature
- true = lists:member(?NS_FLEX_OFFLINE, Fts),
- %% Check xdata, the 'number_of_messages' should be 5
- #xdata{type = result,
- fields = [#xdata_field{type = hidden,
- var = <<"FORM_TYPE">>},
- #xdata_field{var = <<"number_of_messages">>,
- values = [<<"5">>]}]} = X,
- %% Fetch headers,
- #iq{type = result,
- sub_els = [#disco_items{
- node = ?NS_FLEX_OFFLINE,
- items = DiscoItems}]} =
- send_recv(Config, #iq{type = get,
- sub_els = [#disco_items{
- node = ?NS_FLEX_OFFLINE}]}),
- %% Check if headers are correct
- Nodes = lists:sort(
- lists:map(
- fun(#disco_item{jid = J, name = P, node = N})
- when (J == MyBareJID) and (P == Peer_s) ->
- N
- end, DiscoItems)),
- %% Since headers are received we can send initial presence without a risk
- %% of getting offline messages flood
- send(Config, #presence{}),
- ?recv1(#presence{from = MyJID}),
- %% Check full fetch
- I0 = send(Config, #iq{type = get, sub_els = [#offline{fetch = true}]}),
- lists:foreach(
- fun({I, N}) ->
- Text = integer_to_binary(I),
- ?recv1(#message{body = Body, sub_els = SubEls}),
- [#text{data = Text}] = Body,
- #offline{items = [#offline_item{node = N}]} =
- lists:keyfind(offline, 1, SubEls),
- #delay{} = lists:keyfind(delay, 1, SubEls)
- end, lists:zip(lists:seq(1, 5), Nodes)),
- ?recv1(#iq{type = result, id = I0, sub_els = []}),
- %% Fetch 2nd and 4th message
- I1 = send(Config,
- #iq{type = get,
- sub_els = [#offline{
- items = [#offline_item{
- action = view,
- node = lists:nth(2, Nodes)},
- #offline_item{
- action = view,
- node = lists:nth(4, Nodes)}]}]}),
- lists:foreach(
- fun({I, N}) ->
- Text = integer_to_binary(I),
- ?recv1(#message{body = [#text{data = Text}], sub_els = SubEls}),
- #offline{items = [#offline_item{node = N}]} =
- lists:keyfind(offline, 1, SubEls)
- end, lists:zip([2, 4], [lists:nth(2, Nodes), lists:nth(4, Nodes)])),
- ?recv1(#iq{type = result, id = I1, sub_els = []}),
- %% Delete 2nd and 4th message
- #iq{type = result, sub_els = []} =
- send_recv(
- Config,
- #iq{type = set,
- sub_els = [#offline{
- items = [#offline_item{
- action = remove,
- node = lists:nth(2, Nodes)},
- #offline_item{
- action = remove,
- node = lists:nth(4, Nodes)}]}]}),
- %% Check if messages were deleted
- #iq{type = result,
- sub_els = [#disco_items{
- node = ?NS_FLEX_OFFLINE,
- items = RemainedItems}]} =
- send_recv(Config, #iq{type = get,
- sub_els = [#disco_items{
- node = ?NS_FLEX_OFFLINE}]}),
- RemainedNodes = [lists:nth(1, Nodes),
- lists:nth(3, Nodes),
- lists:nth(5, Nodes)],
- RemainedNodes = lists:sort(
- lists:map(
- fun(#disco_item{node = N}) -> N end,
- RemainedItems)),
- %% Purge everything left
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = set, sub_els = [#offline{purge = true}]}),
- %% Check if there is no offline messages
- #iq{type = result,
- sub_els = [#disco_items{node = ?NS_FLEX_OFFLINE, items = []}]} =
- send_recv(Config, #iq{type = get,
- sub_els = [#disco_items{
- node = ?NS_FLEX_OFFLINE}]}),
- disconnect(Config).
-
-offline_master(Config) ->
- Peer = ?config(slave, Config),
- LPeer = jid:remove_resource(Peer),
- send(Config, #message{to = LPeer,
- body = [#text{data = <<"body">>}],
- subject = [#text{data = <<"subject">>}]}),
- disconnect(Config).
-
-offline_slave(Config) ->
- Peer = ?config(master, Config),
- send(Config, #presence{}),
- {_, #message{sub_els = SubEls}} =
- ?recv2(#presence{},
- #message{from = Peer,
- body = [#text{data = <<"body">>}],
- subject = [#text{data = <<"subject">>}]}),
- true = lists:keymember(delay, 1, SubEls),
- disconnect(Config).
-
-carbons_master(Config) ->
- MyJID = my_jid(Config),
- MyBareJID = jid:remove_resource(MyJID),
- Peer = ?config(slave, Config),
- Txt = #text{data = <<"body">>},
- true = is_feature_advertised(Config, ?NS_CARBONS_2),
- send(Config, #presence{priority = 10}),
- ?recv1(#presence{from = MyJID}),
- wait_for_slave(Config),
- ?recv1(#presence{from = Peer}),
- %% Enable carbons
- #iq{type = result, sub_els = []} =
- send_recv(Config,
- #iq{type = set,
- sub_els = [#carbons_enable{}]}),
- %% Send a message to bare and full JID
- send(Config, #message{to = MyBareJID, type = chat, body = [Txt]}),
- send(Config, #message{to = MyJID, type = chat, body = [Txt]}),
- send(Config, #message{to = MyBareJID, type = chat, body = [Txt],
- sub_els = [#carbons_private{}]}),
- send(Config, #message{to = MyJID, type = chat, body = [Txt],
- sub_els = [#carbons_private{}]}),
- %% Receive the messages back
- ?recv4(#message{from = MyJID, to = MyBareJID, type = chat,
- body = [Txt], sub_els = []},
- #message{from = MyJID, to = MyJID, type = chat,
- body = [Txt], sub_els = []},
- #message{from = MyJID, to = MyBareJID, type = chat,
- body = [Txt], sub_els = [#carbons_private{}]},
- #message{from = MyJID, to = MyJID, type = chat,
- body = [Txt], sub_els = [#carbons_private{}]}),
- %% Disable carbons
- #iq{type = result, sub_els = []} =
- send_recv(Config,
- #iq{type = set,
- sub_els = [#carbons_disable{}]}),
- wait_for_slave(Config),
- %% Repeat the same and leave
- send(Config, #message{to = MyBareJID, type = chat, body = [Txt]}),
- send(Config, #message{to = MyJID, type = chat, body = [Txt]}),
- send(Config, #message{to = MyBareJID, type = chat, body = [Txt],
- sub_els = [#carbons_private{}]}),
- send(Config, #message{to = MyJID, type = chat, body = [Txt],
- sub_els = [#carbons_private{}]}),
- ?recv4(#message{from = MyJID, to = MyBareJID, type = chat,
- body = [Txt], sub_els = []},
- #message{from = MyJID, to = MyJID, type = chat,
- body = [Txt], sub_els = []},
- #message{from = MyJID, to = MyBareJID, type = chat,
- body = [Txt], sub_els = [#carbons_private{}]},
- #message{from = MyJID, to = MyJID, type = chat,
- body = [Txt], sub_els = [#carbons_private{}]}),
- disconnect(Config).
-
-carbons_slave(Config) ->
- MyJID = my_jid(Config),
- MyBareJID = jid:remove_resource(MyJID),
- Peer = ?config(master, Config),
- Txt = #text{data = <<"body">>},
- wait_for_master(Config),
- send(Config, #presence{priority = 5}),
- ?recv2(#presence{from = MyJID}, #presence{from = Peer}),
- %% Enable carbons
- #iq{type = result, sub_els = []} =
- send_recv(Config,
- #iq{type = set,
- sub_els = [#carbons_enable{}]}),
- %% Receive messages sent by the peer
- ?recv4(
- #message{from = MyBareJID, to = MyJID, type = chat,
- sub_els =
- [#carbons_sent{
- forwarded = #forwarded{
- sub_els =
- [#message{from = Peer,
- to = MyBareJID,
- type = chat,
- body = [Txt]}]}}]},
- #message{from = MyBareJID, to = MyJID, type = chat,
- sub_els =
- [#carbons_sent{
- forwarded = #forwarded{
- sub_els =
- [#message{from = Peer,
- to = Peer,
- type = chat,
- body = [Txt]}]}}]},
- #message{from = MyBareJID, to = MyJID, type = chat,
- sub_els =
- [#carbons_received{
- forwarded = #forwarded{
- sub_els =
- [#message{from = Peer,
- to = MyBareJID,
- type = chat,
- body = [Txt]}]}}]},
- #message{from = MyBareJID, to = MyJID, type = chat,
- sub_els =
- [#carbons_received{
- forwarded = #forwarded{
- sub_els =
- [#message{from = Peer,
- to = Peer,
- type = chat,
- body = [Txt]}]}}]}),
- %% Disable carbons
- #iq{type = result, sub_els = []} =
- send_recv(Config,
- #iq{type = set,
- sub_els = [#carbons_disable{}]}),
- wait_for_master(Config),
- %% Now we should receive nothing but presence unavailable from the peer
- ?recv1(#presence{from = Peer, type = unavailable}),
- disconnect(Config).
-
-mam_old_master(Config) ->
- mam_master(Config, ?NS_MAM_TMP).
-
-mam_new_master(Config) ->
- mam_master(Config, ?NS_MAM_0).
-
-mam_master(Config, NS) ->
- true = is_feature_advertised(Config, NS),
- MyJID = my_jid(Config),
- BareMyJID = jid:remove_resource(MyJID),
- Peer = ?config(slave, Config),
- send(Config, #presence{}),
- ?recv1(#presence{}),
- wait_for_slave(Config),
- ?recv1(#presence{from = Peer}),
- #iq{type = result, sub_els = [#mam_prefs{xmlns = NS, default = roster}]} =
- send_recv(Config,
- #iq{type = set,
- sub_els = [#mam_prefs{xmlns = NS,
- default = roster,
- never = [MyJID]}]}),
- if NS == ?NS_MAM_TMP ->
- FakeArchived = #mam_archived{id = randoms:get_string(),
- by = server_jid(Config)},
- send(Config, #message{to = MyJID,
- sub_els = [FakeArchived],
- body = [#text{data = <<"a">>}]}),
- send(Config, #message{to = BareMyJID,
- sub_els = [FakeArchived],
- body = [#text{data = <<"b">>}]}),
- %% NOTE: The server should strip fake archived tags,
- %% i.e. the sub_els received should be [].
- ?recv2(#message{body = [#text{data = <<"a">>}], sub_els = []},
- #message{body = [#text{data = <<"b">>}], sub_els = []});
- true ->
- ok
- end,
- wait_for_slave(Config),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- send(Config,
- #message{to = Peer, body = [Text]})
- end, lists:seq(1, 5)),
- ?recv1(#presence{type = unavailable, from = Peer}),
- mam_query_all(Config, NS),
- mam_query_with(Config, Peer, NS),
- %% mam_query_with(Config, jid:remove_resource(Peer)),
- mam_query_rsm(Config, NS),
- #iq{type = result, sub_els = [#mam_prefs{xmlns = NS, default = never}]} =
- send_recv(Config, #iq{type = set,
- sub_els = [#mam_prefs{xmlns = NS,
- default = never}]}),
- disconnect(Config).
-
-mam_old_slave(Config) ->
- mam_slave(Config, ?NS_MAM_TMP).
-
-mam_new_slave(Config) ->
- mam_slave(Config, ?NS_MAM_0).
-
-mam_slave(Config, NS) ->
- Peer = ?config(master, Config),
- ServerJID = server_jid(Config),
- wait_for_master(Config),
- send(Config, #presence{}),
- ?recv2(#presence{}, #presence{from = Peer}),
- #iq{type = result, sub_els = [#mam_prefs{xmlns = NS, default = always}]} =
- send_recv(Config,
- #iq{type = set,
- sub_els = [#mam_prefs{xmlns = NS, default = always}]}),
- wait_for_master(Config),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{from = Peer, body = [Text],
- sub_els = [#mam_archived{by = ServerJID}]})
- end, lists:seq(1, 5)),
- #iq{type = result, sub_els = [#mam_prefs{xmlns = NS, default = never}]} =
- send_recv(Config, #iq{type = set,
- sub_els = [#mam_prefs{xmlns = NS, default = never}]}),
- disconnect(Config).
-
-mam_query_all(Config, NS) ->
- QID = randoms:get_string(),
- MyJID = my_jid(Config),
- Peer = ?config(slave, Config),
- Type = case NS of
- ?NS_MAM_TMP -> get;
- _ -> set
- end,
- I = send(Config, #iq{type = Type, sub_els = [#mam_query{xmlns = NS, id = QID}]}),
- maybe_recv_iq_result(NS, I),
- Iter = if NS == ?NS_MAM_TMP -> lists:seq(1, 5);
- true -> lists:seq(1, 5) ++ lists:seq(1, 5)
- end,
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{to = MyJID,
- sub_els =
- [#mam_result{
- queryid = QID,
- sub_els =
- [#forwarded{
- delay = #delay{},
- sub_els =
- [#message{
- from = MyJID, to = Peer,
- body = [Text]}]}]}]})
- end, Iter),
- if NS == ?NS_MAM_TMP ->
- ?recv1(#iq{type = result, id = I,
- sub_els = [#mam_query{xmlns = NS, id = QID}]});
- true ->
- ?recv1(#message{sub_els = [#mam_fin{complete = true, id = QID}]})
- end.
-
-mam_query_with(Config, JID, NS) ->
- MyJID = my_jid(Config),
- Peer = ?config(slave, Config),
- {Query, Type} = if NS == ?NS_MAM_TMP ->
- {#mam_query{xmlns = NS, with = JID}, get};
- true ->
- Fs = [#xdata_field{var = <<"jid">>,
- values = [jid:to_string(JID)]}],
- {#mam_query{xmlns = NS,
- xdata = #xdata{type = submit, fields = Fs}}, set}
- end,
- I = send(Config, #iq{type = Type, sub_els = [Query]}),
- Iter = if NS == ?NS_MAM_TMP -> lists:seq(1, 5);
- true -> lists:seq(1, 5) ++ lists:seq(1, 5)
- end,
- maybe_recv_iq_result(NS, I),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{to = MyJID,
- sub_els =
- [#mam_result{
- sub_els =
- [#forwarded{
- delay = #delay{},
- sub_els =
- [#message{
- from = MyJID, to = Peer,
- body = [Text]}]}]}]})
- end, Iter),
- if NS == ?NS_MAM_TMP ->
- ?recv1(#iq{type = result, id = I,
- sub_els = [#mam_query{xmlns = NS}]});
- true ->
- ?recv1(#message{sub_els = [#mam_fin{complete = true}]})
- end.
-
-maybe_recv_iq_result(?NS_MAM_0, I1) ->
- ?recv1(#iq{type = result, id = I1});
-maybe_recv_iq_result(_, _) ->
- ok.
-
-mam_query_rsm(Config, NS) ->
- MyJID = my_jid(Config),
- Peer = ?config(slave, Config),
- Type = case NS of
- ?NS_MAM_TMP -> get;
- _ -> set
- end,
- %% Get the first 3 items out of 5
- I1 = send(Config,
- #iq{type = Type,
- sub_els = [#mam_query{xmlns = NS, rsm = #rsm_set{max = 3}}]}),
- maybe_recv_iq_result(NS, I1),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{to = MyJID,
- sub_els =
- [#mam_result{
- xmlns = NS,
- sub_els =
- [#forwarded{
- delay = #delay{},
- sub_els =
- [#message{
- from = MyJID, to = Peer,
- body = [Text]}]}]}]})
- end, lists:seq(1, 3)),
- if NS == ?NS_MAM_TMP ->
- ?recv1(#iq{type = result, id = I1,
- sub_els = [#mam_query{xmlns = NS,
- rsm = #rsm_set{last = Last, count = 5}}]});
- true ->
- ?recv1(#message{sub_els = [#mam_fin{
- complete = false,
- rsm = #rsm_set{last = Last, count = 10}}]})
- end,
- %% Get the next items starting from the `Last`.
- %% Limit the response to 2 items.
- I2 = send(Config,
- #iq{type = Type,
- sub_els = [#mam_query{xmlns = NS,
- rsm = #rsm_set{max = 2,
- 'after' = Last}}]}),
- maybe_recv_iq_result(NS, I2),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{to = MyJID,
- sub_els =
- [#mam_result{
- xmlns = NS,
- sub_els =
- [#forwarded{
- delay = #delay{},
- sub_els =
- [#message{
- from = MyJID, to = Peer,
- body = [Text]}]}]}]})
- end, lists:seq(4, 5)),
- if NS == ?NS_MAM_TMP ->
- ?recv1(#iq{type = result, id = I2,
- sub_els = [#mam_query{
- xmlns = NS,
- rsm = #rsm_set{
- count = 5,
- first = #rsm_first{data = First}}}]});
- true ->
- ?recv1(#message{
- sub_els = [#mam_fin{
- complete = false,
- rsm = #rsm_set{
- count = 10,
- first = #rsm_first{data = First}}}]})
- end,
- %% Paging back. Should receive 3 elements: 1, 2, 3.
- I3 = send(Config,
- #iq{type = Type,
- sub_els = [#mam_query{xmlns = NS,
- rsm = #rsm_set{max = 3,
- before = First}}]}),
- maybe_recv_iq_result(NS, I3),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{to = MyJID,
- sub_els =
- [#mam_result{
- xmlns = NS,
- sub_els =
- [#forwarded{
- delay = #delay{},
- sub_els =
- [#message{
- from = MyJID, to = Peer,
- body = [Text]}]}]}]})
- end, lists:seq(1, 3)),
- if NS == ?NS_MAM_TMP ->
- ?recv1(#iq{type = result, id = I3,
- sub_els = [#mam_query{xmlns = NS, rsm = #rsm_set{count = 5}}]});
- true ->
- ?recv1(#message{
- sub_els = [#mam_fin{complete = true,
- rsm = #rsm_set{count = 10}}]})
- end,
- %% Getting the item count. Should be 5 (or 10).
- I4 = send(Config,
- #iq{type = Type,
- sub_els = [#mam_query{xmlns = NS,
- rsm = #rsm_set{max = 0}}]}),
- maybe_recv_iq_result(NS, I4),
- if NS == ?NS_MAM_TMP ->
- ?recv1(#iq{type = result, id = I4,
- sub_els = [#mam_query{
- xmlns = NS,
- rsm = #rsm_set{count = 5,
- first = undefined,
- last = undefined}}]});
- true ->
- ?recv1(#message{
- sub_els = [#mam_fin{
- complete = false,
- rsm = #rsm_set{count = 10,
- first = undefined,
- last = undefined}}]})
- end,
- %% Should receive 2 last messages
- I5 = send(Config,
- #iq{type = Type,
- sub_els = [#mam_query{xmlns = NS,
- rsm = #rsm_set{max = 2,
- before = none}}]}),
- maybe_recv_iq_result(NS, I5),
- lists:foreach(
- fun(N) ->
- Text = #text{data = integer_to_binary(N)},
- ?recv1(#message{to = MyJID,
- sub_els =
- [#mam_result{
- xmlns = NS,
- sub_els =
- [#forwarded{
- delay = #delay{},
- sub_els =
- [#message{
- from = MyJID, to = Peer,
- body = [Text]}]}]}]})
- end, lists:seq(4, 5)),
- if NS == ?NS_MAM_TMP ->
- ?recv1(#iq{type = result, id = I5,
- sub_els = [#mam_query{xmlns = NS, rsm = #rsm_set{count = 5}}]});
- true ->
- ?recv1(#message{
- sub_els = [#mam_fin{complete = false,
- rsm = #rsm_set{count = 10}}]})
- end.
-
-client_state_master(Config) ->
- true = ?config(csi, Config),
- Peer = ?config(slave, Config),
- Presence = #presence{to = Peer},
- ChatState = #message{to = Peer, thread = <<"1">>,
- sub_els = [#chatstate{type = active}]},
- Message = ChatState#message{body = [#text{data = <<"body">>}]},
- PepPayload = xmpp_codec:encode(#presence{}),
- PepOne = #message{
- to = Peer,
- sub_els =
- [#pubsub_event{
- items =
- [#pubsub_event_items{
- node = <<"foo-1">>,
- items =
- [#pubsub_event_item{
- id = <<"pep-1">>,
- xml_els = [PepPayload]}]}]}]},
- PepTwo = #message{
- to = Peer,
- sub_els =
- [#pubsub_event{
- items =
- [#pubsub_event_items{
- node = <<"foo-2">>,
- items =
- [#pubsub_event_item{
- id = <<"pep-2">>,
- xml_els = [PepPayload]}]}]}]},
- %% Wait for the slave to become inactive.
- wait_for_slave(Config),
- %% Should be queued (but see below):
- send(Config, Presence),
- %% Should replace the previous presence in the queue:
- send(Config, Presence#presence{type = unavailable}),
- %% The following two PEP stanzas should be queued (but see below):
- send(Config, PepOne),
- send(Config, PepTwo),
- %% The following two PEP stanzas should replace the previous two:
- send(Config, PepOne),
- send(Config, PepTwo),
- %% Should be queued (but see below):
- send(Config, ChatState),
- %% Should replace the previous chat state in the queue:
- send(Config, ChatState#message{sub_els = [#chatstate{type = composing}]}),
- %% Should be sent immediately, together with the queued stanzas:
- send(Config, Message),
- %% Wait for the slave to become active.
- wait_for_slave(Config),
- %% Should be delivered, as the client is active again:
- send(Config, ChatState),
- disconnect(Config).
-
-client_state_slave(Config) ->
- Peer = ?config(master, Config),
- change_client_state(Config, inactive),
- wait_for_master(Config),
- ?recv1(#presence{from = Peer, type = unavailable,
- sub_els = [#delay{}]}),
- #message{
- from = Peer,
- sub_els =
- [#pubsub_event{
- items =
- [#pubsub_event_items{
- node = <<"foo-1">>,
- items =
- [#pubsub_event_item{
- id = <<"pep-1">>}]}]},
- #delay{}]} = recv(),
- #message{
- from = Peer,
- sub_els =
- [#pubsub_event{
- items =
- [#pubsub_event_items{
- node = <<"foo-2">>,
- items =
- [#pubsub_event_item{
- id = <<"pep-2">>}]}]},
- #delay{}]} = recv(),
- ?recv1(#message{from = Peer, thread = <<"1">>,
- sub_els = [#chatstate{type = composing},
- #delay{}]}),
- ?recv1(#message{from = Peer, thread = <<"1">>,
- body = [#text{data = <<"body">>}],
- sub_els = [#chatstate{type = active}]}),
- change_client_state(Config, active),
- wait_for_master(Config),
- ?recv1(#message{from = Peer, thread = <<"1">>,
- sub_els = [#chatstate{type = active}]}),
- disconnect(Config).
-
%%%===================================================================
%%% Aux functions
%%%===================================================================
-change_client_state(Config, NewState) ->
- send(Config, #csi{type = NewState}),
- send_recv(Config, #iq{type = get, to = server_jid(Config),
- sub_els = [#ping{}]}).
-
bookmark_conference() ->
#bookmark_conference{name = <<"Some name">>,
autojoin = true,
@@ -2377,28 +979,22 @@ bookmark_conference() ->
<<"some.conference.org">>,
<<>>)}.
-socks5_connect(#streamhost{host = Host, port = Port},
- {SID, JID1, JID2}) ->
- Hash = p1_sha:sha([SID, jid:to_string(JID1), jid:to_string(JID2)]),
- {ok, Sock} = gen_tcp:connect(binary_to_list(Host), Port,
- [binary, {active, false}]),
- Init = <<?VERSION_5, 1, ?AUTH_ANONYMOUS>>,
- InitAck = <<?VERSION_5, ?AUTH_ANONYMOUS>>,
- Req = <<?VERSION_5, ?CMD_CONNECT, 0,
- ?ATYP_DOMAINNAME, 40, Hash:40/binary, 0, 0>>,
- Resp = <<?VERSION_5, ?SUCCESS, 0, ?ATYP_DOMAINNAME,
- 40, Hash:40/binary, 0, 0>>,
- gen_tcp:send(Sock, Init),
- {ok, InitAck} = gen_tcp:recv(Sock, size(InitAck)),
- gen_tcp:send(Sock, Req),
- {ok, Resp} = gen_tcp:recv(Sock, size(Resp)),
- Sock.
-
-socks5_send(Sock, Data) ->
- ok = gen_tcp:send(Sock, Data).
-
-socks5_recv(Sock, Data) ->
- {ok, Data} = gen_tcp:recv(Sock, size(Data)).
+'$handle_undefined_function'(F, [Config]) when is_list(Config) ->
+ case re:split(atom_to_list(F), "_", [{return, list}, {parts, 2}]) of
+ [M, T] ->
+ Module = list_to_atom(M ++ "_tests"),
+ Function = list_to_atom(T),
+ case erlang:function_exported(Module, Function, 1) of
+ true ->
+ Module:Function(Config);
+ false ->
+ erlang:error({undef, F})
+ end;
+ _ ->
+ erlang:error({undef, F})
+ end;
+'$handle_undefined_function'(_, _) ->
+ erlang:error(undef).
%%%===================================================================
%%% SQL stuff
@@ -2480,12 +1076,12 @@ split(Data) ->
clear_riak_tables(Config) ->
User = ?config(user, Config),
Server = ?config(server, Config),
- Room = muc_room_jid(Config),
- {URoom, SRoom, _} = jid:tolower(Room),
+ Master = <<"test_master!#$%^*()`~+-;_=[]{}|\\">>,
+ Slave = <<"test_slave!#$%^*()`~+-;_=[]{}|\\">>,
ejabberd_auth:remove_user(User, Server),
- ejabberd_auth:remove_user(<<"test_slave">>, Server),
- ejabberd_auth:remove_user(<<"test_master">>, Server),
- mod_muc:forget_room(Server, URoom, SRoom),
- ejabberd_riak:delete(muc_registered, {{<<"test_slave">>, Server}, SRoom}),
- ejabberd_riak:delete(muc_registered, {{<<"test_master">>, Server}, SRoom}),
+ ejabberd_auth:remove_user(Master, Server),
+ ejabberd_auth:remove_user(Slave, Server),
+ ejabberd_riak:delete(muc_room),
+ ejabberd_riak:delete(muc_registered),
+ timer:sleep(timer:seconds(5)),
Config.
diff --git a/test/ejabberd_SUITE_data/ca.key b/test/ejabberd_SUITE_data/ca.key
new file mode 100644
index 000000000..858100686
--- /dev/null
+++ b/test/ejabberd_SUITE_data/ca.key
@@ -0,0 +1,27 @@
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAxGSSFSDTbBTk2GwkORLCXoBdYq5YxwPfen8bK+8WjxRb9Thp
+FsHYfImtDQV0qvcZyWnjUFxRh7Dyw7A2X690nplCdzZ9Gl+5yzzlRefHborMSnNY
+rnTqx3vs9qiac0A5bzdjMY7XN3VuVwz0XWY6rAiL/7OxunCNUnQz+oswDx7cj1W4
+bb9pFzBvW5TjaAiziyzS3IxvTc7kYQYJEa99vIlDZ+Ov9rHtiF/5CZ8kHc457B3s
+uc9hHxO2t0EzmBiqg7wpksJjoJeXaJvT9sKSgW6LXkjBCm/7jm1ElPq+7FCph0qp
+uIsxMtu15exLKQaSRLcc+tyNkWIZGQ371D2+7wIDAQABAoIBACzcNCozV1fm5ecx
+vIx05oUjmTFDVfAPyGp4wkIk2OhR5Dd9bTPPj53S7P5+coni67cAQvZGQDFYj/t3
+MtRkhaT8qRwGDEmL+CqefFidewabGdMfye//sOlkO1qUZMNStkvbQQM+95Ypcszb
+nq3+/gPx59i+uSg3MXDWLlFand217d8oU4JxmCxHc9ezhkpWsdReiAukWTud+q/5
+DzyPetaP09z8Ua/YNXuI6IdsvObYxOSCI1hPPuMSQGM4hQiqkHPqPNBIJDwfM9wk
+WzGom5M7nGitrKynJHdS2VRzsZwFL3Hg0yBXnSY1o8er5A6i5//dS2ISSEN9xHjz
+9PRRCbECgYEA+yVmv8i5uBLuz/Oeu/iOcX9ZNHfNowuIpInWVyfVhURIc1OwP1Sy
+uj5Qst2IY+Hm4IVq0sNg3cZdEk+K6RMyc/Qgd7GoYeJNKH1v0RbA6E1zEzqm8Xv+
+jA3dd7RLb5NTwFv11Qh0BDZfw2e8pCmN4oDp+n8fo7RE3NQGaLb77QsCgYEAyDBE
+FoYVwXhGaKnhDT1AqM3hkOGBqheJJIxkNUnyMhlU/AxmWtfvTtw7MCP+311bz4Ma
+h6yUfaEiHQJs2wkPyIaZ8CbbVyP7bXWMZzA/Rnk4dQWZ/VjRYvEzIvmz9di3w5j6
+P1fWX0QODqcY2CvHyMmPLIysbC0cjVDA4ZpDvC0CgYEAlqvrpuevtCV3rL7F3pPS
+MXlrdTTi5AyJX91qAEPfr+I1bSsqM/SGfYHhPE34A6SFtPGWEvgwZx0YvWGHPynL
+PRGbYPPuxzrTe5U1vkVeWoAMp96qRXpUToYK9kPudfP3bRI+vB4kLFrKvRrBa+Oa
+QeeBeE1IGBiQr8NsTOpq3d0CgYB9R+d0iRlYaKL3oUjcdjbe7Wl6uAXjorMLEmks
+CEjwHXZX/pKXy4dSPPU1nXFF7DEm3o9d1R1gudSVfw0MztD313TDHC4sjLIuwF/L
+vB/9RKOWaJkEOe9gEj7EZqy+8I+gcz45IglguUBq3xvnPQ7ck3dsk+TcFidGMQFk
+rpwxSQKBgQDbdzOJagPep0HVJPkOmF1X4idb1rnQUuMi59I3k6lFTXAaypy6nU69
+aAUgv7UY4i3XglEhbztk/o51W4/fJ1N8UzbXlBur/pJD8GN2h52ea77CbpOAmDSm
+Bjjoj92wmYGfBRf7DwJQDgqxvpa0s1cwtYjNf0RmbDPzBsfzrKLKbQ==
+-----END RSA PRIVATE KEY-----
diff --git a/test/ejabberd_SUITE_data/ca.pem b/test/ejabberd_SUITE_data/ca.pem
new file mode 100644
index 000000000..3daa7f5d6
--- /dev/null
+++ b/test/ejabberd_SUITE_data/ca.pem
@@ -0,0 +1,22 @@
+-----BEGIN CERTIFICATE-----
+MIIDtTCCAp2gAwIBAgIJAKI8WTrCnPXzMA0GCSqGSIb3DQEBCwUAMEUxCzAJBgNV
+BAYTAkFVMRMwEQYDVQQIEwpTb21lLVN0YXRlMSEwHwYDVQQKExhJbnRlcm5ldCBX
+aWRnaXRzIFB0eSBMdGQwHhcNMTUwNDE1MTQxNTI0WhcNNDIwODMxMTQxNTI0WjBF
+MQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50
+ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIB
+CgKCAQEAxGSSFSDTbBTk2GwkORLCXoBdYq5YxwPfen8bK+8WjxRb9ThpFsHYfImt
+DQV0qvcZyWnjUFxRh7Dyw7A2X690nplCdzZ9Gl+5yzzlRefHborMSnNYrnTqx3vs
+9qiac0A5bzdjMY7XN3VuVwz0XWY6rAiL/7OxunCNUnQz+oswDx7cj1W4bb9pFzBv
+W5TjaAiziyzS3IxvTc7kYQYJEa99vIlDZ+Ov9rHtiF/5CZ8kHc457B3suc9hHxO2
+t0EzmBiqg7wpksJjoJeXaJvT9sKSgW6LXkjBCm/7jm1ElPq+7FCph0qpuIsxMtu1
+5exLKQaSRLcc+tyNkWIZGQ371D2+7wIDAQABo4GnMIGkMB0GA1UdDgQWBBTQ9mbL
+xyIyE3pDyrNMsC36DRHp+TB1BgNVHSMEbjBsgBTQ9mbLxyIyE3pDyrNMsC36DRHp
++aFJpEcwRTELMAkGA1UEBhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNV
+BAoTGEludGVybmV0IFdpZGdpdHMgUHR5IEx0ZIIJAKI8WTrCnPXzMAwGA1UdEwQF
+MAMBAf8wDQYJKoZIhvcNAQELBQADggEBAGyAi//UQaUhy8RLGc33T36Ni6TnRgpz
+1xu2aahMe0YfPUZsZwwCP6dK+6fSw7OsRqyXZNZJntlur30yMMDlvjXmV6UDzeS4
+/HGd/hr0LqruYpmvOKmvT/y8VkmBqsGlcaRNhSJGDzMHAVEQ0hzAJe3Emw5R753p
+iVRbxPqiOVt4U/gjwtrVumSt1v9O4buWo1lTp0jxK1L6K8YWmETLuxyS3IG+i9Ij
+DDNyU/UxyocP/mcscUAoV9MJX56exwPC93rPxOlwJT5e5ZMRGnwwUt017dPUrKbA
+u+24S8uJCKN2w0OzsrqzC6lvxOf0JRfNxxxGr1KZYyEGT7ps1jhTebA=
+-----END CERTIFICATE-----
diff --git a/test/ejabberd_SUITE_data/cert.pem b/test/ejabberd_SUITE_data/cert.pem
index 11e18491f..ee9cf1641 100644
--- a/test/ejabberd_SUITE_data/cert.pem
+++ b/test/ejabberd_SUITE_data/cert.pem
@@ -1,52 +1,54 @@
-----BEGIN CERTIFICATE-----
-MIIGbDCCBVSgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJBVTET
+MIIEmTCCA4GgAwIBAgIBATANBgkqhkiG9w0BAQUFADBFMQswCQYDVQQGEwJBVTET
MBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQ
-dHkgTHRkMB4XDTE2MDUyNDE3NDIyNVoXDTQzMTAxMDE3NDIyNVowVjELMAkGA1UE
+dHkgTHRkMB4XDTE2MDkyMzA3MDMyNFoXDTQ0MDIwOTA3MDMyNFowVjELMAkGA1UE
BhMCQVUxEzARBgNVBAgTClNvbWUtU3RhdGUxITAfBgNVBAoTGEludGVybmV0IFdp
-ZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAxMGYWN0aXZlMIGfMA0GCSqGSIb3DQEBAQUA
-A4GNADCBiQKBgQC+GTA1D1+yiXgLqUhJXkSj3hj5FiqlBAfJT/8OSXYifY4M4HYv
-VQrqER2Fs7jdCaeoGWDvwfK/UOV0b1ROnf+T/2bXFs8EOeqjOz4xG2oexNKVrYj9
-ICYAgmSh6Hf2cZJM/YCAISje93Xl2J2w/N7oFC1ZXasPoBIZv3Fgg7hTtQIDAQAB
-o4ID2DCCA9QwCQYDVR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5l
-cmF0ZWQgQ2VydGlmaWNhdGUwHQYDVR0OBBYEFEynWiCoZK4tLDk3KM1wMsbrz9Ug
-MB8GA1UdIwQYMBaAFND2ZsvHIjITekPKs0ywLfoNEen5MDMGA1UdHwQsMCowKKAm
-oCSGImh0dHA6Ly9sb2NhbGhvc3Q6NTI4MC9kYXRhL2NybC5kZXIwNgYIKwYBBQUH
-AQEEKjAoMCYGCCsGAQUFBzABhhpodHRwOi8vbG9jYWxob3N0OjUyODAvb2NzcDAL
-BgNVHQ8EBAMCBeAwEwYDVR0lBAwwCgYIKwYBBQUHAwkwggLIBgNVHREEggK/MIIC
-u6A4BggrBgEFBQcIBaAsDCp0ZXN0X3NpbmdsZSEjJCVeKigpYH4rLTtfPVtde318
-XEBsb2NhbGhvc3SgPwYIKwYBBQUHCAWgMwwxdGVzdF9zaW5nbGUhIyQlXiooKWB+
-Ky07Xz1bXXt9fFxAbW5lc2lhLmxvY2FsaG9zdKA+BggrBgEFBQcIBaAyDDB0ZXN0
-X3NpbmdsZSEjJCVeKigpYH4rLTtfPVtde318XEBteXNxbC5sb2NhbGhvc3SgPgYI
-KwYBBQUHCAWgMgwwdGVzdF9zaW5nbGUhIyQlXiooKWB+Ky07Xz1bXXt9fFxAcGdz
-cWwubG9jYWxob3N0oD8GCCsGAQUFBwgFoDMMMXRlc3Rfc2luZ2xlISMkJV4qKClg
-fistO189W117fXxcQHNxbGl0ZS5sb2NhbGhvc3SgQAYIKwYBBQUHCAWgNAwydGVz
-dF9zaW5nbGUhIyQlXiooKWB+Ky07Xz1bXXt9fFxAZXh0YXV0aC5sb2NhbGhvc3Sg
-PQYIKwYBBQUHCAWgMQwvdGVzdF9zaW5nbGUhIyQlXiooKWB+Ky07Xz1bXXt9fFxA
-bGRhcC5sb2NhbGhvc3SgPQYIKwYBBQUHCAWgMQwvdGVzdF9zaW5nbGUhIyQlXioo
-KWB+Ky07Xz1bXXt9fFxAcDFkYi5sb2NhbGhvc3SgPQYIKwYBBQUHCAWgMQwvdGVz
-dF9zaW5nbGUhIyQlXiooKWB+Ky07Xz1bXXt9fFxAcmlhay5sb2NhbGhvc3SgPgYI
-KwYBBQUHCAWgMgwwdGVzdF9zaW5nbGUhIyQlXiooKWB+Ky07Xz1bXXt9fFxAcmVk
-aXMubG9jYWxob3N0oD4GCCsGAQUFBwgFoDIMMHRlc3Rfc2luZ2xlISMkJV4qKClg
-fistO189W117fXxcQG1zc3FsLmxvY2FsaG9zdDANBgkqhkiG9w0BAQUFAAOCAQEA
-et4jpmpwlE+2bw+/iqCt7sfU/5nPmQ8YtgMB+32wf7DINNJgkwOdkYJpzhlMXKrh
-/bn8+Ybmq6MbK0r2R91Uu858xQf8VKExQm44qaGSyL5Ug3jsAWb3GLZSaWQo37e9
-QdDeP8XijCEyr3rum19tRIdiImsRAxJqwfaE4pUSgfCEQMkvb+6//8HSf9RRPToD
-o6eAg8QerEtTfxerEdW/0K1ozOrzSrQembWOu+JjvANRl+p59j+1YOWHzS/yQeZl
-K3sjFoCvXPvocRnUznvT+TSdy3ORJSjwfEcP5Crim70amZZ6NeMAxfby9wwmmX0x
-zkwPCSUXliXke6T88Olj7Q==
+ZGdpdHMgUHR5IEx0ZDEPMA0GA1UEAxMGYWN0aXZlMIIBIjANBgkqhkiG9w0BAQEF
+AAOCAQ8AMIIBCgKCAQEAselBnOh089g/VN7gH1m43Vo67kSqh8QRnXZxfjpzt3oP
+Dl5nd04eNey4ezoSBo7o1hKhj/m5KLxmy1kN+xssyutgzto1FZu8GC2jDyLvByNL
+h0Z3XLmzdzBzBjosCtllJtzHlVL08SPuuOId5hToiiT8h3ElgNI4L6w+eLzhZIk5
+Rj1WojGa+pnaTEgoOaZPcNrkOj81o1tgnbLXN7HY3hJKnRp78DmPySq82cRhvfNr
+ePCs6BJr3y7yYJk0nG+EOaj5BK95YSJondZ8fOZuCigJPMogEaSw0SGsSUiQrPsd
++3vZQ+3ctOimnhW7cF3fAM79g+zDdv9N9E3D+inhyQIDAQABo4IBgTCCAX0wCQYD
+VR0TBAIwADAsBglghkgBhvhCAQ0EHxYdT3BlblNTTCBHZW5lcmF0ZWQgQ2VydGlm
+aWNhdGUwHQYDVR0OBBYEFJgip1fThIyZu9J+YNz3XKDkOcMKMB8GA1UdIwQYMBaA
+FND2ZsvHIjITekPKs0ywLfoNEen5MDMGA1UdHwQsMCowKKAmoCSGImh0dHA6Ly9s
+b2NhbGhvc3Q6NTI4MC9kYXRhL2NybC5kZXIwNgYIKwYBBQUHAQEEKjAoMCYGCCsG
+AQUFBzABhhpodHRwOi8vbG9jYWxob3N0OjUyODAvb2NzcDALBgNVHQ8EBAMCBeAw
+JwYDVR0lBCAwHgYIKwYBBQUHAwkGCCsGAQUFBwMBBggrBgEFBQcDAjBfBgNVHREE
+WDBWoBcGCCsGAQUFBwgFoAsMCWxvY2FsaG9zdKAbBggrBgEFBQcIBaAPDA1zMnMu
+bG9jYWxob3N0oB4GCCsGAQUFBwgFoBIMEG1uZXNpYS5sb2NhbGhvc3QwDQYJKoZI
+hvcNAQEFBQADggEBAEwHeECqeEJIz0VFA0OZ0w9+3rfZPX9K59rbJNNnKVATPhk5
+g5NFpXy1mFTV/3MWjDS1QRbgoXzOYR64S87oez4l3jyDz3YxklyjbbiN3QKaUq5h
+284Ze6CiRqxIi6V2bhjjp3voMSP8BQ72bX9uAWjqQl7Z16wYuCzV4QzVZRD5p0c1
+y45WZ6J+sU1GTwEGh0vXZBlDMeTb+53smjEoCxET1ecJmStAvJi+UHiLn63Z3Yzz
+CTfdAZ/mj+ytaNLVsgrULXrmZAeo064HVqeyLWL8ZBoM0zLs6u14OQOeDCCB62cj
+UXb9npKmIdfsWvdii6emCVQqKBQmHnlUMCh56tE=
-----END CERTIFICATE-----
-----BEGIN RSA PRIVATE KEY-----
-MIICXAIBAAKBgQC+GTA1D1+yiXgLqUhJXkSj3hj5FiqlBAfJT/8OSXYifY4M4HYv
-VQrqER2Fs7jdCaeoGWDvwfK/UOV0b1ROnf+T/2bXFs8EOeqjOz4xG2oexNKVrYj9
-ICYAgmSh6Hf2cZJM/YCAISje93Xl2J2w/N7oFC1ZXasPoBIZv3Fgg7hTtQIDAQAB
-AoGALddtJJ58eVVlOYqs/+RXsRyR8R9DUV/TcNx1qUBV2KNmafyHA4sCgsd10xQv
-9D2rzIGyOp8OpswfSSC/t+WqB9+ezSruzMuX6IURdHZbX6aWWX6maICtPKEEkCmI
-gaLxE/ojuOXnTEBTkVuVWtuFL9PsK/WGi/FIDzJbwqTWJ4ECQQDy9DrBAQM96B6u
-G4XpFzBsfgJZoS+NaMdCwK+/jgcEpI6oxobK8tuGB6drp5jNSuQ905W9n8XjA6Xq
-x8/GH9I5AkEAyE5g05HhMlxBWCq+P70pBDIamdHJcPQVL8+6NXkT+mTqqZxxkUy4
-nMfTh5zE6WfmqYNtrmNBDxXUyaoRSBydXQJACnFnCR7DBekxUGiMc/10LmWoMjQU
-eC6Vyg/APiqbsJ5mJ2kJKDYSK4uurZjxn3lloCa1HAZ/GgfxHMtj6e86OQJAetq3
-wIwE12KGIZF1xpo6gfxJHHbzWngaVozN5OYyPq2O0CDH9xpbUK2vK8oXbCDx9J5L
-s13lFV+Kd3X7y4LhcQJBAKSFg7ht33l8Sa0TdUkY6Tl1NBMCCLf+np+HYrAbQZux
-2NtR6nj2YqeOpEe1ibWZm8tj3dzlTm1FCOIpa+pm114=
+MIIEpAIBAAKCAQEAselBnOh089g/VN7gH1m43Vo67kSqh8QRnXZxfjpzt3oPDl5n
+d04eNey4ezoSBo7o1hKhj/m5KLxmy1kN+xssyutgzto1FZu8GC2jDyLvByNLh0Z3
+XLmzdzBzBjosCtllJtzHlVL08SPuuOId5hToiiT8h3ElgNI4L6w+eLzhZIk5Rj1W
+ojGa+pnaTEgoOaZPcNrkOj81o1tgnbLXN7HY3hJKnRp78DmPySq82cRhvfNrePCs
+6BJr3y7yYJk0nG+EOaj5BK95YSJondZ8fOZuCigJPMogEaSw0SGsSUiQrPsd+3vZ
+Q+3ctOimnhW7cF3fAM79g+zDdv9N9E3D+inhyQIDAQABAoIBAQCWIyxVx+36YgGA
+E927VzIkyqJ0tMncbOAYq/228oj4yy6th4l1Kx1fkHdWtnjDxBJFpc9l+u4ArI1r
+Cao8wIAadmxp48dshtJC7TBv86EXuvdgH11XiPcknGRVWv4T4cX099gN8cX3QcWR
+jHCC3B4phnD9s8RcZAs6X/cQWQU0mxiHodYJefSXDyRIx9wimXmmW83ZqcsFftXS
+MI0+jflmRTf07M4gALVL0LlaBkg2FMoNiaKYPTbubcrEMUgTDsoDsjX3Fi43qLdF
+QTq+lF7HrHQ1EQlngCJupka9JxwZc3Fae6XYlDQvSDPcRxzWJoOuVBPtheGeoU3c
+PAry9KihAoGBAN8HCb0k4bMN06WZjSzClKhb6eFw4GMbVpDAOwPDl2N+9+pwrGxE
+ztekrM+VdXVScIj23g6wKd6fPqK6EYuEEu3Hre82e9ApqjJ34p1UcOs9Vs4N3VDy
+HJnWhEytsc9c03O5nhsK1YAXoGHEPmCYGsg2UA171LDcarnO1WDmpKkNAoGBAMw2
+sTCC/LBwgsuPZL5fR10wQ1sr1fIheSL+VK10jSRDwNXT2Y4wdCpQXQ6XNi+n98n5
+VvKaE6PxFqjnKCrUUty8X5+fzVcTKpBYVICceEzpVY9FrKbeY1shMnOBRTCkaQwz
+8CoEbbQz6SH5s4qW7M8iJdUJ0RulaFDfpmangTStAoGBALMkMxVjZ4rsI0GT2grG
+7KNi2LTFducEUX8JeR2n4JUBql78S/LXPhGGa2x9z5ACPPQ23tyLccYowSXyMR+Q
+YafuyO4pJEBrBxNsqnDXH7BEX9I43rkjEAgdf70bk4RNOmdtA+sSw7UUxTVibPwn
+kPOadKiv+4JoOa2vzkL8X+yNAoGAbU85OUZkC+2tlViEDILjqDYVV8/3DUxtkxWg
+LdidVDQQHGTxpvK4u42Ywh6empPGRw54RBPFP5PlFTPmhEZytEUAymi3eUyBFBKz
+6MPYgRLFAZPB/vA7LqRuZPVlG8xljmqeu17zeenveIg4Wo6+44Dbz1UZ4TqAxAlz
+AK/YsWECgYAPuZnIo9fWJtUAIe5IA2LIqcN0rj3PsZ/tL6eaMXqKZgCYwTvVUGbT
+XD4O352t+yLM8v2hJGHrIPuHooN2dCadYuzoBvVFsRTZjGpBlAZ+EJ5WfDYFL0qf
+68O2KZNXaSS8ZARlp9g3C8AFiakm/uWhtSfwx09uSBHJgld1V3GAoA==
-----END RSA PRIVATE KEY-----
diff --git a/test/ejabberd_SUITE_data/ejabberd.yml b/test/ejabberd_SUITE_data/ejabberd.yml
index aca547d99..9448df080 100644
--- a/test/ejabberd_SUITE_data/ejabberd.yml
+++ b/test/ejabberd_SUITE_data/ejabberd.yml
@@ -61,6 +61,7 @@ Welcome to this XMPP server."
mod_version: []
"sqlite.localhost":
sql_type: sqlite
+ sql_pool_size: 1
auth_method: sql
sm_db_type: sql
modules:
@@ -327,7 +328,7 @@ Welcome to this XMPP server."
mod_time: []
mod_version: []
"localhost":
- auth_method: internal
+ auth_method: [internal, anonymous]
"ldap.localhost":
ldap_servers:
- "localhost"
@@ -337,7 +338,8 @@ Welcome to this XMPP server."
ldap_base: "ou=users,dc=localhost"
auth_method: ldap
modules:
- mod_vcard_ldap: []
+ mod_vcard:
+ db_type: ldap
mod_roster: [] # mod_roster is required by mod_shared_roster
mod_shared_roster_ldap:
ldap_auth_check: off
@@ -387,8 +389,7 @@ access:
local:
local: allow
max_user_offline_messages:
- admin: 5000
- all: 100
+ all: infinity
max_user_sessions:
all: 10
muc:
@@ -408,6 +409,7 @@ acl:
user_regexp: ""
define_macro:
CERTFILE: "cert.pem"
+ CAFILE: "ca.pem"
language: "en"
listen:
-
@@ -427,6 +429,11 @@ listen:
port: @@web_port@@
module: ejabberd_http
captcha: true
+ -
+ port: @@component_port@@
+ module: ejabberd_service
+ password: >-
+ @@password@@
loglevel: @@loglevel@@
max_fsm_queue: 1000
modules:
@@ -435,6 +442,8 @@ modules:
mod_disco: []
mod_ping: []
mod_proxy65: []
+ mod_legacy: []
+ mod_muc: []
mod_register:
welcome_message:
subject: "Welcome!"
@@ -444,6 +453,11 @@ Welcome to this XMPP server."
mod_time: []
mod_version: []
registration_timeout: infinity
+route_subdomains: s2s
+domain_certfile: CERTFILE
+s2s_use_starttls: false
+s2s_cafile: CAFILE
+outgoing_s2s_port: @@s2s_port@@
shaper:
fast: 50000
- normal: 1000
+ normal: 10000
diff --git a/test/ejabberd_SUITE_data/extauth.py b/test/ejabberd_SUITE_data/extauth.py
index 84c000144..fa2c9efd0 100755
--- a/test/ejabberd_SUITE_data/extauth.py
+++ b/test/ejabberd_SUITE_data/extauth.py
@@ -7,7 +7,10 @@ def read():
cmd = pkt[0]
args_num = len(pkt) - 1
if cmd == 'auth' and args_num >= 3:
- write(True)
+ if pkt[1] == "wrong":
+ write(False)
+ else:
+ write(True)
elif cmd == 'isuser' and args_num == 2:
write(True)
elif cmd == 'setpass' and args_num >= 3:
diff --git a/test/ejabberd_SUITE_data/gencerts.sh b/test/ejabberd_SUITE_data/gencerts.sh
new file mode 100755
index 000000000..d0acd4b0c
--- /dev/null
+++ b/test/ejabberd_SUITE_data/gencerts.sh
@@ -0,0 +1,18 @@
+#!/bin/sh
+# Update openssl.cnf if needed (in particular section [alt_names])
+
+rm -rf ssl
+mkdir -p ssl/newcerts
+touch ssl/index.txt
+echo 01 > ssl/serial
+echo 1000 > ssl/crlnumber
+openssl genrsa -out ssl/client.key
+openssl req -new -key ssl/client.key -out ssl/client.csr -config openssl.cnf -batch -subj /C=AU/ST=Some-State/O=Internet\ Widgits\ Pty\ Ltd/CN=active
+openssl ca -keyfile ca.key -cert ca.pem -in ssl/client.csr -out ssl/client.crt -config openssl.cnf -days 10000 -batch -notext
+openssl req -new -key ssl/client.key -out ssl/self-signed-client.csr -batch -subj /C=AU/ST=Some-State/O=Internet\ Widgits\ Pty\ Ltd/CN=active
+openssl x509 -req -in ssl/self-signed-client.csr -signkey ssl/client.key -out ssl/self-signed-client.crt -days 10000
+cat ssl/client.crt > cert.pem
+cat ssl/self-signed-client.crt > self-signed-cert.pem
+cat ssl/client.key >> cert.pem
+cat ssl/client.key >> self-signed-cert.pem
+rm -rf ssl
diff --git a/test/ejabberd_SUITE_data/openssl.cnf b/test/ejabberd_SUITE_data/openssl.cnf
new file mode 100644
index 000000000..ff11d1460
--- /dev/null
+++ b/test/ejabberd_SUITE_data/openssl.cnf
@@ -0,0 +1,323 @@
+#
+# OpenSSL example configuration file.
+# This is mostly being used for generation of certificate requests.
+#
+
+# This definition stops the following lines choking if HOME isn't
+# defined.
+HOME = .
+RANDFILE = $ENV::HOME/.rnd
+
+# Extra OBJECT IDENTIFIER info:
+#oid_file = $ENV::HOME/.oid
+oid_section = new_oids
+
+# To use this configuration file with the "-extfile" option of the
+# "openssl x509" utility, name here the section containing the
+# X.509v3 extensions to use:
+extensions = v3_req
+# (Alternatively, use a configuration file that has only
+# X.509v3 extensions in its main [= default] section.)
+
+[ new_oids ]
+# We can add new OIDs in here for use by 'ca' and 'req'.
+# Add a simple OID like this:
+# testoid1=1.2.3.4
+# Or use config file substitution like this:
+# testoid2=${testoid1}.5.6
+
+####################################################################
+[ ca ]
+default_ca = CA_default # The default ca section
+
+####################################################################
+[ CA_default ]
+
+#dir = ./demoCA # Where everything is kept
+dir = ssl
+certs = $dir/certs # Where the issued certs are kept
+crl_dir = $dir/crl # Where the issued crl are kept
+database = $dir/index.txt # database index file.
+#unique_subject = no # Set to 'no' to allow creation of
+ # several ctificates with same subject.
+new_certs_dir = $dir/newcerts # default place for new certs.
+
+certificate = $dir/cacert.pem # The CA certificate
+serial = $dir/serial # The current serial number
+crlnumber = $dir/crlnumber # the current crl number
+ # must be commented out to leave a V1 CRL
+crl = $dir/crl.pem # The current CRL
+private_key = $dir/private/cakey.pem# The private key
+RANDFILE = $dir/private/.rand # private random number file
+
+x509_extensions = usr_cert # The extentions to add to the cert
+
+# Comment out the following two lines for the "traditional"
+# (and highly broken) format.
+name_opt = ca_default # Subject Name options
+cert_opt = ca_default # Certificate field options
+
+# Extension copying option: use with caution.
+copy_extensions = copy
+
+# Extensions to add to a CRL. Note: Netscape communicator chokes on V2 CRLs
+# so this is commented out by default to leave a V1 CRL.
+# crlnumber must also be commented out to leave a V1 CRL.
+# crl_extensions = crl_ext
+
+default_days = 365 # how long to certify for
+default_crl_days= 30 # how long before next CRL
+default_md = sha1 # which md to use.
+preserve = no # keep passed DN ordering
+
+# A few difference way of specifying how similar the request should look
+# For type CA, the listed attributes must be the same, and the optional
+# and supplied fields are just that :-)
+policy = policy_match
+
+# For the CA policy
+[ policy_match ]
+countryName = match
+stateOrProvinceName = match
+organizationName = match
+organizationalUnitName = optional
+commonName = optional
+emailAddress = optional
+
+# For the 'anything' policy
+# At this point in time, you must list all acceptable 'object'
+# types.
+[ policy_anything ]
+countryName = optional
+stateOrProvinceName = optional
+localityName = optional
+organizationName = optional
+organizationalUnitName = optional
+commonName = optional
+emailAddress = optional
+
+####################################################################
+[ req ]
+default_bits = 1024
+default_keyfile = privkey.pem
+distinguished_name = req_distinguished_name
+attributes = req_attributes
+x509_extensions = v3_ca # The extentions to add to the self signed cert
+
+# Passwords for private keys if not present they will be prompted for
+# input_password = secret
+# output_password = secret
+
+# This sets a mask for permitted string types. There are several options.
+# default: PrintableString, T61String, BMPString.
+# pkix : PrintableString, BMPString.
+# utf8only: only UTF8Strings.
+# nombstr : PrintableString, T61String (no BMPStrings or UTF8Strings).
+# MASK:XXXX a literal mask value.
+# WARNING: current versions of Netscape crash on BMPStrings or UTF8Strings
+# so use this option with caution!
+string_mask = nombstr
+
+req_extensions = v3_req # The extensions to add to a certificate request
+
+[ req_distinguished_name ]
+countryName = Country Name (2 letter code)
+countryName_default = AU
+countryName_min = 2
+countryName_max = 2
+
+stateOrProvinceName = State or Province Name (full name)
+stateOrProvinceName_default = Some-State
+
+localityName = Locality Name (eg, city)
+
+0.organizationName = Organization Name (eg, company)
+0.organizationName_default = Internet Widgits Pty Ltd
+
+# we can do this but it is not needed normally :-)
+#1.organizationName = Second Organization Name (eg, company)
+#1.organizationName_default = World Wide Web Pty Ltd
+
+organizationalUnitName = Organizational Unit Name (eg, section)
+#organizationalUnitName_default =
+
+commonName = Common Name (eg, YOUR name)
+commonName_max = 64
+
+emailAddress = Email Address
+emailAddress_max = 64
+
+# SET-ex3 = SET extension number 3
+
+[ req_attributes ]
+challengePassword = A challenge password
+challengePassword_min = 4
+challengePassword_max = 20
+
+unstructuredName = An optional company name
+
+[ usr_cert ]
+
+# These extensions are added when 'ca' signs a request.
+
+# This goes against PKIX guidelines but some CAs do it and some software
+# requires this to avoid interpreting an end user certificate as a CA.
+
+basicConstraints=CA:FALSE
+
+# Here are some examples of the usage of nsCertType. If it is omitted
+# the certificate can be used for anything *except* object signing.
+
+# This is OK for an SSL server.
+# nsCertType = server
+
+# For an object signing certificate this would be used.
+# nsCertType = objsign
+
+# For normal client use this is typical
+# nsCertType = client, email
+
+# and for everything including object signing:
+# nsCertType = client, email, objsign
+
+# This is typical in keyUsage for a client certificate.
+# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+# This will be displayed in Netscape's comment listbox.
+nsComment = "OpenSSL Generated Certificate"
+
+# PKIX recommendations harmless if included in all certificates.
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid,issuer
+
+# This stuff is for subjectAltName and issuerAltname.
+# Import the email address.
+# subjectAltName=email:copy
+# An alternative to produce certificates that aren't
+# deprecated according to PKIX.
+# subjectAltName=email:move
+
+# Copy subject details
+# issuerAltName=issuer:copy
+
+#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
+#nsBaseUrl
+#nsRevocationUrl
+#nsRenewalUrl
+#nsCaPolicyUrl
+#nsSslServerName
+
+crlDistributionPoints = URI:http://localhost:5280/data/crl.der
+authorityInfoAccess = OCSP;URI:http://localhost:5280/ocsp
+
+[ v3_req ]
+
+# Extensions to add to a certificate request
+
+basicConstraints = CA:FALSE
+keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+extendedKeyUsage = OCSPSigning,serverAuth,clientAuth
+subjectAltName = @alt_names
+
+[alt_names]
+otherName.1 = 1.3.6.1.5.5.7.8.5;UTF8:"localhost"
+otherName.2 = 1.3.6.1.5.5.7.8.5;UTF8:"s2s.localhost"
+otherName.3 = 1.3.6.1.5.5.7.8.5;UTF8:"mnesia.localhost"
+
+[ v3_ca ]
+crlDistributionPoints = URI:http://localhost:5280/data/crl.der
+
+# Extensions for a typical CA
+
+
+# PKIX recommendation.
+
+subjectKeyIdentifier=hash
+
+authorityKeyIdentifier=keyid:always,issuer:always
+
+# This is what PKIX recommends but some broken software chokes on critical
+# extensions.
+#basicConstraints = critical,CA:true
+# So we do this instead.
+basicConstraints = CA:true
+
+# Key usage: this is typical for a CA certificate. However since it will
+# prevent it being used as an test self-signed certificate it is best
+# left out by default.
+# keyUsage = cRLSign, keyCertSign
+
+# Some might want this also
+# nsCertType = sslCA, emailCA
+
+# Include email address in subject alt name: another PKIX recommendation
+# subjectAltName=email:copy
+# Copy issuer details
+# issuerAltName=issuer:copy
+
+# DER hex encoding of an extension: beware experts only!
+# obj=DER:02:03
+# Where 'obj' is a standard or added object
+# You can even override a supported extension:
+# basicConstraints= critical, DER:30:03:01:01:FF
+
+[ crl_ext ]
+
+# CRL extensions.
+# Only issuerAltName and authorityKeyIdentifier make any sense in a CRL.
+
+# issuerAltName=issuer:copy
+authorityKeyIdentifier=keyid:always,issuer:always
+
+[ proxy_cert_ext ]
+# These extensions should be added when creating a proxy certificate
+
+# This goes against PKIX guidelines but some CAs do it and some software
+# requires this to avoid interpreting an end user certificate as a CA.
+
+basicConstraints=CA:FALSE
+
+# Here are some examples of the usage of nsCertType. If it is omitted
+# the certificate can be used for anything *except* object signing.
+
+# This is OK for an SSL server.
+# nsCertType = server
+
+# For an object signing certificate this would be used.
+# nsCertType = objsign
+
+# For normal client use this is typical
+# nsCertType = client, email
+
+# and for everything including object signing:
+# nsCertType = client, email, objsign
+
+# This is typical in keyUsage for a client certificate.
+# keyUsage = nonRepudiation, digitalSignature, keyEncipherment
+
+# This will be displayed in Netscape's comment listbox.
+nsComment = "OpenSSL Generated Certificate"
+
+# PKIX recommendations harmless if included in all certificates.
+subjectKeyIdentifier=hash
+authorityKeyIdentifier=keyid,issuer:always
+
+# This stuff is for subjectAltName and issuerAltname.
+# Import the email address.
+# subjectAltName=email:copy
+# An alternative to produce certificates that aren't
+# deprecated according to PKIX.
+# subjectAltName=email:move
+
+# Copy subject details
+# issuerAltName=issuer:copy
+
+#nsCaRevocationUrl = http://www.domain.dom/ca-crl.pem
+#nsBaseUrl
+#nsRevocationUrl
+#nsRenewalUrl
+#nsCaPolicyUrl
+#nsSslServerName
+
+# This really needs to be in place for it to be a proxy certificate.
+proxyCertInfo=critical,language:id-ppl-anyLanguage,pathlen:3,policy:foo
diff --git a/test/ejabberd_SUITE_data/self-signed-cert.pem b/test/ejabberd_SUITE_data/self-signed-cert.pem
new file mode 100644
index 000000000..d6b34f50e
--- /dev/null
+++ b/test/ejabberd_SUITE_data/self-signed-cert.pem
@@ -0,0 +1,46 @@
+-----BEGIN CERTIFICATE-----
+MIIDKDCCAhACCQCsLYnJDV1wHDANBgkqhkiG9w0BAQsFADBWMQswCQYDVQQGEwJB
+VTETMBEGA1UECBMKU29tZS1TdGF0ZTEhMB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0
+cyBQdHkgTHRkMQ8wDQYDVQQDEwZhY3RpdmUwHhcNMTYwOTIzMDcwMzI0WhcNNDQw
+MjA5MDcwMzI0WjBWMQswCQYDVQQGEwJBVTETMBEGA1UECBMKU29tZS1TdGF0ZTEh
+MB8GA1UEChMYSW50ZXJuZXQgV2lkZ2l0cyBQdHkgTHRkMQ8wDQYDVQQDEwZhY3Rp
+dmUwggEiMA0GCSqGSIb3DQEBAQUAA4IBDwAwggEKAoIBAQCx6UGc6HTz2D9U3uAf
+WbjdWjruRKqHxBGddnF+OnO3eg8OXmd3Th417Lh7OhIGjujWEqGP+bkovGbLWQ37
+GyzK62DO2jUVm7wYLaMPIu8HI0uHRndcubN3MHMGOiwK2WUm3MeVUvTxI+644h3m
+FOiKJPyHcSWA0jgvrD54vOFkiTlGPVaiMZr6mdpMSCg5pk9w2uQ6PzWjW2Cdstc3
+sdjeEkqdGnvwOY/JKrzZxGG982t48KzoEmvfLvJgmTScb4Q5qPkEr3lhImid1nx8
+5m4KKAk8yiARpLDRIaxJSJCs+x37e9lD7dy06KaeFbtwXd8Azv2D7MN2/030TcP6
+KeHJAgMBAAEwDQYJKoZIhvcNAQELBQADggEBAEAIFwHpNCVUiivAcfkxcUPKp0nn
+mhGqkMDRrPA7fOCm0ir1Puz4GQ/G4i+tWejzzFoS6kKQl+sUZAUYJdziftJFFoZ7
+br3q3Xafc2dWa8SHNcHH6lA1OEk8tXlhkNl+EgSLnRGMhIf0iZL2wGjE8Hlig6cu
+3h+OpbUijXUmq0XdH+ui3wNgXb7+Tosg/Od+lr0fNjkopsk3t1oiVXD4OQBZdUyq
+V5XValiZjMFDUUBdxBA+l6/Qj3bFmluz+FXI8UwfbinukqADTJzkMeUjEkvmKZWO
+tb+EU77NIuvg/k7b1yp4lEmATpdUfcGEuhWNtgeh5AqgMxOhAsJ7zUTA80I=
+-----END CERTIFICATE-----
+-----BEGIN RSA PRIVATE KEY-----
+MIIEpAIBAAKCAQEAselBnOh089g/VN7gH1m43Vo67kSqh8QRnXZxfjpzt3oPDl5n
+d04eNey4ezoSBo7o1hKhj/m5KLxmy1kN+xssyutgzto1FZu8GC2jDyLvByNLh0Z3
+XLmzdzBzBjosCtllJtzHlVL08SPuuOId5hToiiT8h3ElgNI4L6w+eLzhZIk5Rj1W
+ojGa+pnaTEgoOaZPcNrkOj81o1tgnbLXN7HY3hJKnRp78DmPySq82cRhvfNrePCs
+6BJr3y7yYJk0nG+EOaj5BK95YSJondZ8fOZuCigJPMogEaSw0SGsSUiQrPsd+3vZ
+Q+3ctOimnhW7cF3fAM79g+zDdv9N9E3D+inhyQIDAQABAoIBAQCWIyxVx+36YgGA
+E927VzIkyqJ0tMncbOAYq/228oj4yy6th4l1Kx1fkHdWtnjDxBJFpc9l+u4ArI1r
+Cao8wIAadmxp48dshtJC7TBv86EXuvdgH11XiPcknGRVWv4T4cX099gN8cX3QcWR
+jHCC3B4phnD9s8RcZAs6X/cQWQU0mxiHodYJefSXDyRIx9wimXmmW83ZqcsFftXS
+MI0+jflmRTf07M4gALVL0LlaBkg2FMoNiaKYPTbubcrEMUgTDsoDsjX3Fi43qLdF
+QTq+lF7HrHQ1EQlngCJupka9JxwZc3Fae6XYlDQvSDPcRxzWJoOuVBPtheGeoU3c
+PAry9KihAoGBAN8HCb0k4bMN06WZjSzClKhb6eFw4GMbVpDAOwPDl2N+9+pwrGxE
+ztekrM+VdXVScIj23g6wKd6fPqK6EYuEEu3Hre82e9ApqjJ34p1UcOs9Vs4N3VDy
+HJnWhEytsc9c03O5nhsK1YAXoGHEPmCYGsg2UA171LDcarnO1WDmpKkNAoGBAMw2
+sTCC/LBwgsuPZL5fR10wQ1sr1fIheSL+VK10jSRDwNXT2Y4wdCpQXQ6XNi+n98n5
+VvKaE6PxFqjnKCrUUty8X5+fzVcTKpBYVICceEzpVY9FrKbeY1shMnOBRTCkaQwz
+8CoEbbQz6SH5s4qW7M8iJdUJ0RulaFDfpmangTStAoGBALMkMxVjZ4rsI0GT2grG
+7KNi2LTFducEUX8JeR2n4JUBql78S/LXPhGGa2x9z5ACPPQ23tyLccYowSXyMR+Q
+YafuyO4pJEBrBxNsqnDXH7BEX9I43rkjEAgdf70bk4RNOmdtA+sSw7UUxTVibPwn
+kPOadKiv+4JoOa2vzkL8X+yNAoGAbU85OUZkC+2tlViEDILjqDYVV8/3DUxtkxWg
+LdidVDQQHGTxpvK4u42Ywh6empPGRw54RBPFP5PlFTPmhEZytEUAymi3eUyBFBKz
+6MPYgRLFAZPB/vA7LqRuZPVlG8xljmqeu17zeenveIg4Wo6+44Dbz1UZ4TqAxAlz
+AK/YsWECgYAPuZnIo9fWJtUAIe5IA2LIqcN0rj3PsZ/tL6eaMXqKZgCYwTvVUGbT
+XD4O352t+yLM8v2hJGHrIPuHooN2dCadYuzoBvVFsRTZjGpBlAZ+EJ5WfDYFL0qf
+68O2KZNXaSS8ZARlp9g3C8AFiakm/uWhtSfwx09uSBHJgld1V3GAoA==
+-----END RSA PRIVATE KEY-----
diff --git a/test/ejabberd_commands_mock_test.exs b/test/ejabberd_commands_mock_test.exs
index 419a989d6..12444f79a 100644
--- a/test/ejabberd_commands_mock_test.exs
+++ b/test/ejabberd_commands_mock_test.exs
@@ -48,7 +48,7 @@ defmodule EjabberdCommandsMockTest do
_ -> :ok
end
:mnesia.start
- :ok = :jid.start
+ {:ok, _} = :jid.start
:ok = :ejabberd_config.start(["domain1", "domain2"], [])
{:ok, _} = :ejabberd_access_permissions.start_link()
:ok = :acl.start
diff --git a/test/ejabberd_cyrsasl_test.exs b/test/ejabberd_cyrsasl_test.exs
index d9b949294..1b98048c7 100644
--- a/test/ejabberd_cyrsasl_test.exs
+++ b/test/ejabberd_cyrsasl_test.exs
@@ -27,7 +27,7 @@ defmodule EjabberdCyrsaslTest do
:p1_sha.load_nif()
:mnesia.start
:ok = start_module(:stringprep)
- :ok = start_module(:jid)
+ {:ok, _} = start_module(:jid)
:ok = :ejabberd_config.start(["domain1"], [])
:ok = :cyrsasl.start
cyrstate = :cyrsasl.server_new("domain1", "domain1", "domain1", :ok, &get_password/1,
@@ -44,12 +44,12 @@ defmodule EjabberdCyrsaslTest do
test "Plain text (correct user wrong pass)", context do
step1 = :cyrsasl.server_start(context[:cyrstate], "PLAIN", <<0,"user1",0,"badpass">>)
- assert step1 == {:error, "not-authorized", "user1"}, "got error response"
+ assert step1 == {:error, :"not-authorized", "user1"}
end
test "Plain text (wrong user wrong pass)", context do
step1 = :cyrsasl.server_start(context[:cyrstate], "PLAIN", <<0,"nouser1",0,"badpass">>)
- assert step1 == {:error, "not-authorized", "nouser1"}, "got error response"
+ assert step1 == {:error, :"not-authorized", "nouser1"}
end
test "Anonymous", context do
diff --git a/test/example_tests.erl b/test/example_tests.erl
new file mode 100644
index 000000000..d7965376e
--- /dev/null
+++ b/test/example_tests.erl
@@ -0,0 +1,52 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(example_tests).
+
+%% API
+-compile(export_all).
+-import(suite, []).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {example_single, [sequence],
+ [single_test(foo)]}.
+
+foo(Config) ->
+ Config.
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {example_master_slave, [sequence],
+ [master_slave_test(foo)]}.
+
+foo_master(Config) ->
+ Config.
+
+foo_slave(Config) ->
+ Config.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("example_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("example_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("example_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("example_" ++ atom_to_list(T) ++ "_slave")]}.
diff --git a/test/jid_test.exs b/test/jid_test.exs
index b75a3603a..7d063b707 100644
--- a/test/jid_test.exs
+++ b/test/jid_test.exs
@@ -29,6 +29,7 @@ defmodule JidTest do
setup_all do
:stringprep.start
:jid.start
+ :ok
end
test "create a jid from a binary" do
diff --git a/test/mam_tests.erl b/test/mam_tests.erl
new file mode 100644
index 000000000..d628ddd2a
--- /dev/null
+++ b/test/mam_tests.erl
@@ -0,0 +1,538 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 14 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(mam_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [get_features/1, disconnect/1, my_jid/1, send_recv/2,
+ wait_for_slave/1, server_jid/1, send/2, get_features/2,
+ wait_for_master/1, recv_message/1, recv_iq/1, muc_room_jid/1,
+ muc_jid/1, is_feature_advertised/3, get_event/1, put_event/2]).
+
+-include("suite.hrl").
+-define(VERSIONS, [?NS_MAM_TMP, ?NS_MAM_0, ?NS_MAM_1]).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {mam_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(get_set_prefs),
+ single_test(get_form),
+ single_test(fake_by)]}.
+
+feature_enabled(Config) ->
+ BareMyJID = jid:remove_resource(my_jid(Config)),
+ RequiredFeatures = sets:from_list(?VERSIONS),
+ ServerFeatures = sets:from_list(get_features(Config)),
+ UserFeatures = sets:from_list(get_features(Config, BareMyJID)),
+ MUCFeatures = get_features(Config, muc_jid(Config)),
+ ct:comment("Checking if all MAM server features are enabled"),
+ true = sets:is_subset(RequiredFeatures, ServerFeatures),
+ ct:comment("Checking if all MAM user features are enabled"),
+ true = sets:is_subset(RequiredFeatures, UserFeatures),
+ ct:comment("Checking if all MAM conference service features are enabled"),
+ true = lists:member(?NS_MAM_1, MUCFeatures),
+ clean(disconnect(Config)).
+
+fake_by(Config) ->
+ BareServerJID = server_jid(Config),
+ FullServerJID = jid:replace_resource(BareServerJID, randoms:get_string()),
+ FullMyJID = my_jid(Config),
+ BareMyJID = jid:remove_resource(FullMyJID),
+ Fakes = lists:flatmap(
+ fun(JID) ->
+ [#mam_archived{id = randoms:get_string(), by = JID},
+ #stanza_id{id = randoms:get_string(), by = JID}]
+ end, [BareServerJID, FullServerJID, BareMyJID, FullMyJID]),
+ Body = xmpp:mk_text(<<"body">>),
+ ForeignJID = jid:make(randoms:get_string()),
+ Archived = #mam_archived{id = randoms:get_string(), by = ForeignJID},
+ StanzaID = #stanza_id{id = randoms:get_string(), by = ForeignJID},
+ #message{body = Body, sub_els = SubEls} =
+ send_recv(Config, #message{to = FullMyJID,
+ body = Body,
+ sub_els = [Archived, StanzaID|Fakes]}),
+ ct:comment("Checking if only foreign tags present"),
+ [ForeignJID, ForeignJID] = lists:flatmap(
+ fun(#mam_archived{by = By}) -> [By];
+ (#stanza_id{by = By}) -> [By];
+ (_) -> []
+ end, SubEls),
+ clean(disconnect(Config)).
+
+get_set_prefs(Config) ->
+ Range = [{JID, #mam_prefs{xmlns = NS,
+ default = Default,
+ always = Always,
+ never = Never}} ||
+ JID <- [undefined, server_jid(Config)],
+ NS <- ?VERSIONS,
+ Default <- [always, never, roster],
+ Always <- [[], [jid:from_string(<<"foo@bar.baz">>)]],
+ Never <- [[], [jid:from_string(<<"baz@bar.foo">>)]]],
+ lists:foreach(
+ fun({To, Prefs}) ->
+ NS = Prefs#mam_prefs.xmlns,
+ #iq{type = result, sub_els = [Prefs]} =
+ send_recv(Config, #iq{type = set, to = To,
+ sub_els = [Prefs]}),
+ #iq{type = result, sub_els = [Prefs]} =
+ send_recv(Config, #iq{type = get, to = To,
+ sub_els = [#mam_prefs{xmlns = NS}]})
+ end, Range),
+ clean(disconnect(Config)).
+
+get_form(Config) ->
+ ServerJID = server_jid(Config),
+ Range = [{JID, NS} || JID <- [undefined, ServerJID],
+ NS <- ?VERSIONS -- [?NS_MAM_TMP]],
+ lists:foreach(
+ fun({To, NS}) ->
+ #iq{type = result,
+ sub_els = [#mam_query{xmlns = NS,
+ xdata = #xdata{} = X}]} =
+ send_recv(Config, #iq{type = get, to = To,
+ sub_els = [#mam_query{xmlns = NS}]}),
+ [NS] = xmpp_util:get_xdata_values(<<"FORM_TYPE">>, X),
+ true = xmpp_util:has_xdata_var(<<"with">>, X),
+ true = xmpp_util:has_xdata_var(<<"start">>, X),
+ true = xmpp_util:has_xdata_var(<<"end">>, X)
+ end, Range),
+ clean(disconnect(Config)).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {mam_master_slave, [sequence],
+ [master_slave_test(archived_and_stanza_id),
+ master_slave_test(query_all),
+ master_slave_test(query_with),
+ master_slave_test(query_rsm_max),
+ master_slave_test(query_rsm_after),
+ master_slave_test(query_rsm_before),
+ master_slave_test(muc)]}.
+
+archived_and_stanza_id_master(Config) ->
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_slave(Config),
+ send_messages(Config, lists:seq(1, 5)),
+ clean(disconnect(Config)).
+
+archived_and_stanza_id_slave(Config) ->
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ recv_messages(Config, lists:seq(1, 5)),
+ clean(disconnect(Config)).
+
+query_all_master(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_slave(Config),
+ send_messages(Config, lists:seq(1, 5)),
+ query_all(Config, MyJID, Peer),
+ clean(disconnect(Config)).
+
+query_all_slave(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ recv_messages(Config, lists:seq(1, 5)),
+ query_all(Config, Peer, MyJID),
+ clean(disconnect(Config)).
+
+query_with_master(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_slave(Config),
+ send_messages(Config, lists:seq(1, 5)),
+ query_with(Config, MyJID, Peer),
+ clean(disconnect(Config)).
+
+query_with_slave(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ recv_messages(Config, lists:seq(1, 5)),
+ query_with(Config, Peer, MyJID),
+ clean(disconnect(Config)).
+
+query_rsm_max_master(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_slave(Config),
+ send_messages(Config, lists:seq(1, 5)),
+ query_rsm_max(Config, MyJID, Peer),
+ clean(disconnect(Config)).
+
+query_rsm_max_slave(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ recv_messages(Config, lists:seq(1, 5)),
+ query_rsm_max(Config, Peer, MyJID),
+ clean(disconnect(Config)).
+
+query_rsm_after_master(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_slave(Config),
+ send_messages(Config, lists:seq(1, 5)),
+ query_rsm_after(Config, MyJID, Peer),
+ clean(disconnect(Config)).
+
+query_rsm_after_slave(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ recv_messages(Config, lists:seq(1, 5)),
+ query_rsm_after(Config, Peer, MyJID),
+ clean(disconnect(Config)).
+
+query_rsm_before_master(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_slave(Config),
+ send_messages(Config, lists:seq(1, 5)),
+ query_rsm_before(Config, MyJID, Peer),
+ clean(disconnect(Config)).
+
+query_rsm_before_slave(Config) ->
+ Peer = ?config(peer, Config),
+ MyJID = my_jid(Config),
+ ok = set_default(Config, always),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ recv_messages(Config, lists:seq(1, 5)),
+ query_rsm_before(Config, Peer, MyJID),
+ clean(disconnect(Config)).
+
+muc_master(Config) ->
+ Room = muc_room_jid(Config),
+ %% Joining
+ ok = muc_tests:join_new(Config),
+ %% MAM feature should not be advertised at this point,
+ %% because MAM is not enabled so far
+ false = is_feature_advertised(Config, ?NS_MAM_1, Room),
+ %% Fill in some history
+ send_messages_to_room(Config, lists:seq(1, 21)),
+ %% We now should be able to retrieve those via MAM, even though
+ %% MAM is disabled. However, only last 20 messages should be received.
+ recv_messages_from_room(Config, lists:seq(2, 21)),
+ %% Now enable MAM for the conference
+ %% Retrieve config first
+ #iq{type = result, sub_els = [#muc_owner{config = #xdata{} = RoomCfg}]} =
+ send_recv(Config, #iq{type = get, sub_els = [#muc_owner{}],
+ to = Room}),
+ %% Find the MAM field in the config and enable it
+ NewFields = lists:flatmap(
+ fun(#xdata_field{var = <<"mam">> = Var}) ->
+ [#xdata_field{var = Var, values = [<<"1">>]}];
+ (_) ->
+ []
+ end, RoomCfg#xdata.fields),
+ NewRoomCfg = #xdata{type = submit, fields = NewFields},
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, to = Room,
+ sub_els = [#muc_owner{config = NewRoomCfg}]}),
+ #message{from = Room, type = groupchat,
+ sub_els = [#muc_user{status_codes = [104]}]} = recv_message(Config),
+ %% Check if MAM has been enabled
+ true = is_feature_advertised(Config, ?NS_MAM_1, Room),
+ %% We now sending some messages again
+ send_messages_to_room(Config, lists:seq(1, 5)),
+ %% And retrieve them via MAM again.
+ recv_messages_from_room(Config, lists:seq(1, 5)),
+ put_event(Config, disconnect),
+ clean(disconnect(Config)).
+
+muc_slave(Config) ->
+ disconnect = get_event(Config),
+ clean(disconnect(Config)).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("mam_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("mam_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("mam_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("mam_" ++ atom_to_list(T) ++ "_slave")]}.
+
+clean(Config) ->
+ {U, S, _} = jid:tolower(my_jid(Config)),
+ mod_mam:remove_user(U, S),
+ Config.
+
+set_default(Config, Default) ->
+ lists:foreach(
+ fun(NS) ->
+ ct:comment("Setting default preferences of '~s' to '~s'",
+ [NS, Default]),
+ #iq{type = result,
+ sub_els = [#mam_prefs{xmlns = NS, default = Default}]} =
+ send_recv(Config, #iq{type = set,
+ sub_els = [#mam_prefs{xmlns = NS,
+ default = Default}]})
+ end, ?VERSIONS).
+
+send_messages(Config, Range) ->
+ Peer = ?config(peer, Config),
+ lists:foreach(
+ fun(N) ->
+ Body = xmpp:mk_text(integer_to_binary(N)),
+ send(Config, #message{to = Peer, body = Body})
+ end, Range).
+
+recv_messages(Config, Range) ->
+ Peer = ?config(peer, Config),
+ lists:foreach(
+ fun(N) ->
+ Body = xmpp:mk_text(integer_to_binary(N)),
+ #message{from = Peer, body = Body} = Msg =
+ recv_message(Config),
+ #mam_archived{by = BareMyJID} =
+ xmpp:get_subtag(Msg, #mam_archived{}),
+ #stanza_id{by = BareMyJID} =
+ xmpp:get_subtag(Msg, #stanza_id{})
+ end, Range).
+
+recv_archived_messages(Config, From, To, QID, Range) ->
+ MyJID = my_jid(Config),
+ lists:foreach(
+ fun(N) ->
+ ct:comment("Retreiving ~pth message in range ~p",
+ [N, Range]),
+ Body = xmpp:mk_text(integer_to_binary(N)),
+ #message{to = MyJID,
+ sub_els =
+ [#mam_result{
+ queryid = QID,
+ sub_els =
+ [#forwarded{
+ delay = #delay{},
+ xml_els = [El]}]}]} = recv_message(Config),
+ #message{from = From, to = To,
+ body = Body} = xmpp:decode(El)
+ end, Range).
+
+maybe_recv_iq_result(Config, ?NS_MAM_0, I) ->
+ #iq{type = result, id = I} = recv_iq(Config);
+maybe_recv_iq_result(_, _, _) ->
+ ok.
+
+query_iq_type(?NS_MAM_TMP) -> get;
+query_iq_type(_) -> set.
+
+send_query(Config, #mam_query{xmlns = NS} = Query) ->
+ Type = query_iq_type(NS),
+ I = send(Config, #iq{type = Type, sub_els = [Query]}),
+ maybe_recv_iq_result(Config, NS, I),
+ I.
+
+recv_fin(Config, I, QueryID, ?NS_MAM_1 = NS, IsComplete) ->
+ ct:comment("Receiving fin iq for namespace '~s'", [NS]),
+ #iq{type = result, id = I,
+ sub_els = [#mam_fin{xmlns = NS,
+ id = QueryID,
+ complete = Complete,
+ rsm = RSM}]} = recv_iq(Config),
+ ct:comment("Checking if complete is ~s", [IsComplete]),
+ Complete = IsComplete,
+ RSM;
+recv_fin(Config, I, QueryID, ?NS_MAM_TMP = NS, _IsComplete) ->
+ ct:comment("Receiving fin iq for namespace '~s'", [NS]),
+ #iq{type = result, id = I,
+ sub_els = [#mam_query{xmlns = NS,
+ rsm = RSM,
+ id = QueryID}]} = recv_iq(Config),
+ RSM;
+recv_fin(Config, _, QueryID, ?NS_MAM_0 = NS, IsComplete) ->
+ ct:comment("Receiving fin message for namespace '~s'", [NS]),
+ #message{} = FinMsg = recv_message(Config),
+ #mam_fin{xmlns = NS,
+ id = QueryID,
+ complete = Complete,
+ rsm = RSM} = xmpp:get_subtag(FinMsg, #mam_fin{xmlns = NS}),
+ ct:comment("Checking if complete is ~s", [IsComplete]),
+ Complete = IsComplete,
+ RSM.
+
+send_messages_to_room(Config, Range) ->
+ MyNick = ?config(master_nick, Config),
+ Room = muc_room_jid(Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ lists:foreach(
+ fun(N) ->
+ Body = xmpp:mk_text(integer_to_binary(N)),
+ #message{from = MyNickJID,
+ type = groupchat,
+ body = Body} =
+ send_recv(Config, #message{to = Room, body = Body,
+ type = groupchat})
+ end, Range).
+
+recv_messages_from_room(Config, Range) ->
+ MyNick = ?config(master_nick, Config),
+ Room = muc_room_jid(Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ MyJID = my_jid(Config),
+ QID = randoms:get_string(),
+ Count = length(Range),
+ I = send(Config, #iq{type = set, to = Room,
+ sub_els = [#mam_query{xmlns = ?NS_MAM_1, id = QID}]}),
+ lists:foreach(
+ fun(N) ->
+ Body = xmpp:mk_text(integer_to_binary(N)),
+ #message{
+ to = MyJID, from = Room,
+ sub_els =
+ [#mam_result{
+ xmlns = ?NS_MAM_1,
+ queryid = QID,
+ sub_els =
+ [#forwarded{
+ delay = #delay{},
+ xml_els = [El]}]}]} = recv_message(Config),
+ #message{from = MyNickJID,
+ type = groupchat,
+ body = Body} = xmpp:decode(El)
+ end, Range),
+ #iq{from = Room, id = I, type = result,
+ sub_els = [#mam_fin{xmlns = ?NS_MAM_1,
+ id = QID,
+ rsm = #rsm_set{count = Count},
+ complete = true}]} = recv_iq(Config).
+
+query_all(Config, From, To) ->
+ lists:foreach(
+ fun(NS) ->
+ query_all(Config, From, To, NS)
+ end, ?VERSIONS).
+
+query_all(Config, From, To, NS) ->
+ QID = randoms:get_string(),
+ Range = lists:seq(1, 5),
+ ID = send_query(Config, #mam_query{xmlns = NS, id = QID}),
+ recv_archived_messages(Config, From, To, QID, Range),
+ #rsm_set{count = 5} = recv_fin(Config, ID, QID, NS, _Complete = true).
+
+query_with(Config, From, To) ->
+ lists:foreach(
+ fun(NS) ->
+ query_with(Config, From, To, NS)
+ end, ?VERSIONS).
+
+query_with(Config, From, To, NS) ->
+ Peer = ?config(peer, Config),
+ BarePeer = jid:remove_resource(Peer),
+ QID = randoms:get_string(),
+ Range = lists:seq(1, 5),
+ lists:foreach(
+ fun(JID) ->
+ ct:comment("Sending query with jid ~s", [jid:to_string(JID)]),
+ Query = if NS == ?NS_MAM_TMP ->
+ #mam_query{xmlns = NS, with = JID, id = QID};
+ true ->
+ Fs = mam_query:encode([{with, JID}]),
+ #mam_query{xmlns = NS, id = QID,
+ xdata = #xdata{type = submit,
+ fields = Fs}}
+ end,
+ ID = send_query(Config, Query),
+ recv_archived_messages(Config, From, To, QID, Range),
+ #rsm_set{count = 5} = recv_fin(Config, ID, QID, NS, true)
+ end, [Peer, BarePeer]).
+
+query_rsm_max(Config, From, To) ->
+ lists:foreach(
+ fun(NS) ->
+ query_rsm_max(Config, From, To, NS)
+ end, ?VERSIONS).
+
+query_rsm_max(Config, From, To, NS) ->
+ lists:foreach(
+ fun(Max) ->
+ QID = randoms:get_string(),
+ Range = lists:sublist(lists:seq(1, Max), 5),
+ Query = #mam_query{xmlns = NS, id = QID, rsm = #rsm_set{max = Max}},
+ ID = send_query(Config, Query),
+ recv_archived_messages(Config, From, To, QID, Range),
+ IsComplete = Max >= 5,
+ #rsm_set{count = 5} = recv_fin(Config, ID, QID, NS, IsComplete)
+ end, lists:seq(0, 6)).
+
+query_rsm_after(Config, From, To) ->
+ lists:foreach(
+ fun(NS) ->
+ query_rsm_after(Config, From, To, NS)
+ end, ?VERSIONS).
+
+query_rsm_after(Config, From, To, NS) ->
+ lists:foldl(
+ fun(Range, #rsm_first{data = After}) ->
+ ct:comment("Retrieving ~p messages after '~s'",
+ [length(Range), After]),
+ QID = randoms:get_string(),
+ Query = #mam_query{xmlns = NS, id = QID,
+ rsm = #rsm_set{'after' = After}},
+ ID = send_query(Config, Query),
+ recv_archived_messages(Config, From, To, QID, Range),
+ #rsm_set{count = 5, first = First} =
+ recv_fin(Config, ID, QID, NS, true),
+ First
+ end, #rsm_first{data = undefined},
+ [lists:seq(N, 5) || N <- lists:seq(1, 6)]).
+
+query_rsm_before(Config, From, To) ->
+ lists:foreach(
+ fun(NS) ->
+ query_rsm_before(Config, From, To, NS)
+ end, ?VERSIONS).
+
+query_rsm_before(Config, From, To, NS) ->
+ lists:foldl(
+ fun(Range, Before) ->
+ ct:comment("Retrieving ~p messages before '~s'",
+ [length(Range), Before]),
+ QID = randoms:get_string(),
+ Query = #mam_query{xmlns = NS, id = QID,
+ rsm = #rsm_set{before = Before}},
+ ID = send_query(Config, Query),
+ recv_archived_messages(Config, From, To, QID, Range),
+ #rsm_set{count = 5, last = Last} =
+ recv_fin(Config, ID, QID, NS, true),
+ Last
+ end, <<"">>, lists:reverse([lists:seq(1, N) || N <- lists:seq(0, 5)])).
diff --git a/test/mix_tests.erl b/test/mix_tests.erl
new file mode 100644
index 000000000..56b1b35d7
--- /dev/null
+++ b/test/mix_tests.erl
@@ -0,0 +1,139 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(mix_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [mix_jid/1, mix_room_jid/1, my_jid/1, is_feature_advertised/3,
+ disconnect/1, send_recv/2, recv_message/1, send/2,
+ put_event/2, get_event/1]).
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {mix_single, [sequence],
+ [single_test(feature_enabled)]}.
+
+feature_enabled(Config) ->
+ MIX = mix_jid(Config),
+ ct:comment("Checking if ~s is set", [?NS_MIX_0]),
+ true = is_feature_advertised(Config, ?NS_MIX_0, MIX),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {mix_master_slave, [sequence],
+ [master_slave_test(all)]}.
+
+all_master(Config) ->
+ MIX = mix_jid(Config),
+ Room = mix_room_jid(Config),
+ MyJID = my_jid(Config),
+ MyBareJID = jid:remove_resource(MyJID),
+ #iq{type = result,
+ sub_els =
+ [#disco_info{
+ identities = [#identity{category = <<"conference">>,
+ type = <<"text">>}],
+ xdata = [#xdata{type = result, fields = XFields}]}]} =
+ send_recv(Config, #iq{type = get, to = MIX, sub_els = [#disco_info{}]}),
+ true = lists:any(
+ fun(#xdata_field{var = <<"FORM_TYPE">>,
+ values = [?NS_MIX_SERVICEINFO_0]}) -> true;
+ (_) -> false
+ end, XFields),
+ %% Joining
+ Nodes = [?NS_MIX_NODES_MESSAGES, ?NS_MIX_NODES_PRESENCE,
+ ?NS_MIX_NODES_PARTICIPANTS, ?NS_MIX_NODES_SUBJECT,
+ ?NS_MIX_NODES_CONFIG],
+ #iq{type = result,
+ sub_els = [#mix_join{subscribe = Nodes, jid = MyBareJID}]} =
+ send_recv(Config, #iq{type = set, to = Room,
+ sub_els = [#mix_join{subscribe = Nodes}]}),
+ #message{from = Room,
+ sub_els =
+ [#ps_event{
+ items = #ps_items{
+ node = ?NS_MIX_NODES_PARTICIPANTS,
+ items = [#ps_item{
+ id = ParticipantID,
+ xml_els = [PXML]}]}}]} =
+ recv_message(Config),
+ #mix_participant{jid = MyBareJID} = xmpp:decode(PXML),
+ %% Coming online
+ PresenceID = randoms:get_string(),
+ Presence = xmpp:encode(#presence{}),
+ #iq{type = result,
+ sub_els =
+ [#pubsub{
+ publish = #ps_publish{
+ node = ?NS_MIX_NODES_PRESENCE,
+ items = [#ps_item{id = PresenceID}]}}]} =
+ send_recv(
+ Config,
+ #iq{type = set, to = Room,
+ sub_els =
+ [#pubsub{
+ publish = #ps_publish{
+ node = ?NS_MIX_NODES_PRESENCE,
+ items = [#ps_item{
+ id = PresenceID,
+ xml_els = [Presence]}]}}]}),
+ #message{from = Room,
+ sub_els =
+ [#ps_event{
+ items = #ps_items{
+ node = ?NS_MIX_NODES_PRESENCE,
+ items = [#ps_item{
+ id = PresenceID,
+ xml_els = [Presence]}]}}]} =
+ recv_message(Config),
+ %% Coming offline
+ send(Config, #presence{type = unavailable, to = Room}),
+ %% Receiving presence retract event
+ #message{from = Room,
+ sub_els = [#ps_event{
+ items = #ps_items{
+ node = ?NS_MIX_NODES_PRESENCE,
+ retract = PresenceID}}]} =
+ recv_message(Config),
+ %% Leaving
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, to = Room, sub_els = [#mix_leave{}]}),
+ #message{from = Room,
+ sub_els =
+ [#ps_event{
+ items = #ps_items{
+ node = ?NS_MIX_NODES_PARTICIPANTS,
+ retract = ParticipantID}}]} =
+ recv_message(Config),
+ put_event(Config, disconnect),
+ disconnect(Config).
+
+all_slave(Config) ->
+ disconnect = get_event(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("mix_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("mix_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("mix_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("mix_" ++ atom_to_list(T) ++ "_slave")]}.
diff --git a/test/mod_admin_extra_test.exs b/test/mod_admin_extra_test.exs
index fde66f03f..3baf4922f 100644
--- a/test/mod_admin_extra_test.exs
+++ b/test/mod_admin_extra_test.exs
@@ -38,7 +38,7 @@ defmodule EjabberdModAdminExtraTest do
setup_all do
try do
- :jid.start
+ :jid.start
:stringprep.start
:mnesia.start
:p1_sha.load_nif
diff --git a/test/mod_legacy.erl b/test/mod_legacy.erl
new file mode 100644
index 000000000..dba977554
--- /dev/null
+++ b/test/mod_legacy.erl
@@ -0,0 +1,38 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 25 Sep 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(mod_legacy).
+-behaviour(gen_mod).
+
+%% API
+-export([start/2, stop/1, mod_opt_type/1, depends/2, process_iq/3]).
+-include("jlib.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+start(Host, Opts) ->
+ IQDisc = gen_mod:get_opt(iqdisc, Opts, fun gen_iq_handler:check_type/1,
+ one_queue),
+ gen_iq_handler:add_iq_handler(ejabberd_local, Host, ?NS_EVENT,
+ ?MODULE, process_iq, IQDisc).
+
+stop(Host) ->
+ gen_iq_handler:remove_iq_handler(ejabberd_local, Host, ?MODULE).
+
+mod_opt_type(_) ->
+ [].
+
+depends(_, _) ->
+ [].
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+process_iq(_From, _To, IQ) ->
+ IQ#iq{type = result, sub_el = []}.
diff --git a/test/muc_tests.erl b/test/muc_tests.erl
new file mode 100644
index 000000000..d8e6dd8fb
--- /dev/null
+++ b/test/muc_tests.erl
@@ -0,0 +1,1885 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 15 Oct 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(muc_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [recv_presence/1, send_recv/2, my_jid/1, muc_room_jid/1,
+ send/2, recv_message/1, recv_iq/1, muc_jid/1,
+ alt_room_jid/1, wait_for_slave/1, wait_for_master/1,
+ disconnect/1, put_event/2, get_event/1, peer_muc_jid/1,
+ my_muc_jid/1, get_features/2, set_opt/3]).
+-include("suite.hrl").
+-include("jid.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single tests
+%%%===================================================================
+single_cases() ->
+ {muc_single, [sequence],
+ [single_test(service_presence_error),
+ single_test(service_message_error),
+ single_test(service_unknown_ns_iq_error),
+ single_test(service_iq_set_error),
+ single_test(service_improper_iq_error),
+ single_test(service_features),
+ single_test(service_disco_info_node_error),
+ single_test(service_disco_items),
+ single_test(service_unique),
+ single_test(service_vcard),
+ single_test(configure_non_existent),
+ single_test(cancel_configure_non_existent),
+ single_test(service_subscriptions)]}.
+
+service_presence_error(Config) ->
+ Service = muc_jid(Config),
+ ServiceResource = jid:replace_resource(Service, randoms:get_string()),
+ lists:foreach(
+ fun(To) ->
+ send(Config, #presence{type = error, to = To}),
+ lists:foreach(
+ fun(Type) ->
+ #presence{type = error} = Err =
+ send_recv(Config, #presence{type = Type, to = To}),
+ #stanza_error{reason = 'service-unavailable'} =
+ xmpp:get_error(Err)
+ end, [available, unavailable])
+ end, [Service, ServiceResource]),
+ disconnect(Config).
+
+service_message_error(Config) ->
+ Service = muc_jid(Config),
+ send(Config, #message{type = error, to = Service}),
+ lists:foreach(
+ fun(Type) ->
+ #message{type = error} = Err1 =
+ send_recv(Config, #message{type = Type, to = Service}),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(Err1)
+ end, [chat, normal, headline, groupchat]),
+ ServiceResource = jid:replace_resource(Service, randoms:get_string()),
+ send(Config, #message{type = error, to = ServiceResource}),
+ lists:foreach(
+ fun(Type) ->
+ #message{type = error} = Err2 =
+ send_recv(Config, #message{type = Type, to = ServiceResource}),
+ #stanza_error{reason = 'service-unavailable'} = xmpp:get_error(Err2)
+ end, [chat, normal, headline, groupchat]),
+ disconnect(Config).
+
+service_unknown_ns_iq_error(Config) ->
+ Service = muc_jid(Config),
+ ServiceResource = jid:replace_resource(Service, randoms:get_string()),
+ lists:foreach(
+ fun(To) ->
+ send(Config, #iq{type = result, to = To}),
+ send(Config, #iq{type = error, to = To}),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = error} = Err1 =
+ send_recv(Config, #iq{type = Type, to = To,
+ sub_els = [#presence{}]}),
+ #stanza_error{reason = 'service-unavailable'} =
+ xmpp:get_error(Err1)
+ end, [set, get])
+ end, [Service, ServiceResource]),
+ disconnect(Config).
+
+service_iq_set_error(Config) ->
+ Service = muc_jid(Config),
+ lists:foreach(
+ fun(SubEl) ->
+ send(Config, #iq{type = result, to = Service,
+ sub_els = [SubEl]}),
+ #iq{type = error} = Err2 =
+ send_recv(Config, #iq{type = set, to = Service,
+ sub_els = [SubEl]}),
+ #stanza_error{reason = 'not-allowed'} =
+ xmpp:get_error(Err2)
+ end, [#disco_items{}, #disco_info{}, #vcard_temp{},
+ #muc_unique{}, #muc_subscriptions{}]),
+ disconnect(Config).
+
+service_improper_iq_error(Config) ->
+ Service = muc_jid(Config),
+ lists:foreach(
+ fun(SubEl) ->
+ send(Config, #iq{type = result, to = Service,
+ sub_els = [SubEl]}),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = error} = Err3 =
+ send_recv(Config, #iq{type = Type, to = Service,
+ sub_els = [SubEl]}),
+ #stanza_error{reason = Reason} = xmpp:get_error(Err3),
+ true = Reason /= 'internal-server-error'
+ end, [set, get])
+ end, [#disco_item{jid = Service},
+ #identity{category = <<"category">>, type = <<"type">>},
+ #vcard_email{}, #muc_subscribe{nick = ?config(nick, Config)}]),
+ disconnect(Config).
+
+service_features(Config) ->
+ ServerHost = ?config(server_host, Config),
+ MUC = muc_jid(Config),
+ Features = sets:from_list(get_features(Config, MUC)),
+ MAMFeatures = case gen_mod:is_loaded(ServerHost, mod_mam) of
+ true -> [?NS_MAM_TMP, ?NS_MAM_0, ?NS_MAM_1];
+ false -> []
+ end,
+ RequiredFeatures = sets:from_list(
+ [?NS_DISCO_INFO, ?NS_DISCO_ITEMS,
+ ?NS_REGISTER, ?NS_MUC, ?NS_RSM,
+ ?NS_VCARD, ?NS_MUCSUB, ?NS_MUC_UNIQUE
+ | MAMFeatures]),
+ ct:comment("Checking if all needed disco features are set"),
+ true = sets:is_subset(RequiredFeatures, Features),
+ disconnect(Config).
+
+service_disco_info_node_error(Config) ->
+ MUC = muc_jid(Config),
+ Node = randoms:get_string(),
+ #iq{type = error} = Err =
+ send_recv(Config, #iq{type = get, to = MUC,
+ sub_els = [#disco_info{node = Node}]}),
+ #stanza_error{reason = 'item-not-found'} = xmpp:get_error(Err),
+ disconnect(Config).
+
+service_disco_items(Config) ->
+ #jid{server = Service} = muc_jid(Config),
+ Rooms = lists:sort(
+ lists:map(
+ fun(I) ->
+ RoomName = integer_to_binary(I),
+ jid:make(RoomName, Service)
+ end, lists:seq(1, 5))),
+ lists:foreach(
+ fun(Room) ->
+ ok = join_new(Config, Room)
+ end, Rooms),
+ Items = disco_items(Config),
+ Rooms = [J || #disco_item{jid = J} <- Items],
+ lists:foreach(
+ fun(Room) ->
+ ok = leave(Config, Room)
+ end, Rooms),
+ [] = disco_items(Config),
+ disconnect(Config).
+
+service_vcard(Config) ->
+ MUC = muc_jid(Config),
+ ct:comment("Retreiving vCard from ~s", [jid:to_string(MUC)]),
+ #iq{type = result, sub_els = [#vcard_temp{}]} =
+ send_recv(Config, #iq{type = get, to = MUC, sub_els = [#vcard_temp{}]}),
+ disconnect(Config).
+
+service_unique(Config) ->
+ MUC = muc_jid(Config),
+ ct:comment("Requesting muc unique from ~s", [jid:to_string(MUC)]),
+ #iq{type = result, sub_els = [#muc_unique{name = Name}]} =
+ send_recv(Config, #iq{type = get, to = MUC, sub_els = [#muc_unique{}]}),
+ ct:comment("Checking if unique name is set in the response"),
+ <<_, _/binary>> = Name,
+ disconnect(Config).
+
+configure_non_existent(Config) ->
+ [_|_] = get_config(Config),
+ disconnect(Config).
+
+cancel_configure_non_existent(Config) ->
+ Room = muc_room_jid(Config),
+ #iq{type = result, sub_els = []} =
+ send_recv(Config,
+ #iq{to = Room, type = set,
+ sub_els = [#muc_owner{config = #xdata{type = cancel}}]}),
+ disconnect(Config).
+
+service_subscriptions(Config) ->
+ MUC = #jid{server = Service} = muc_jid(Config),
+ Rooms = lists:sort(
+ lists:map(
+ fun(I) ->
+ RoomName = integer_to_binary(I),
+ jid:make(RoomName, Service)
+ end, lists:seq(1, 5))),
+ lists:foreach(
+ fun(Room) ->
+ ok = join_new(Config, Room),
+ [104] = set_config(Config, [{allow_subscription, true}], Room),
+ [] = subscribe(Config, [], Room)
+ end, Rooms),
+ #iq{type = result, sub_els = [#muc_subscriptions{list = JIDs}]} =
+ send_recv(Config, #iq{type = get, to = MUC,
+ sub_els = [#muc_subscriptions{}]}),
+ Rooms = lists:sort(JIDs),
+ lists:foreach(
+ fun(Room) ->
+ ok = unsubscribe(Config, Room),
+ ok = leave(Config, Room)
+ end, Rooms),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {muc_master_slave, [sequence],
+ [master_slave_test(register),
+ master_slave_test(groupchat_msg),
+ master_slave_test(private_msg),
+ master_slave_test(set_subject),
+ master_slave_test(history),
+ master_slave_test(invite),
+ master_slave_test(invite_members_only),
+ master_slave_test(invite_password_protected),
+ master_slave_test(voice_request),
+ master_slave_test(change_role),
+ master_slave_test(kick),
+ master_slave_test(change_affiliation),
+ master_slave_test(destroy),
+ master_slave_test(vcard),
+ master_slave_test(nick_change),
+ master_slave_test(config_title_desc),
+ master_slave_test(config_public_list),
+ master_slave_test(config_password),
+ master_slave_test(config_whois),
+ master_slave_test(config_members_only),
+ master_slave_test(config_moderated),
+ master_slave_test(config_private_messages),
+ master_slave_test(config_query),
+ master_slave_test(config_allow_invites),
+ master_slave_test(config_visitor_status),
+ master_slave_test(config_allow_voice_requests),
+ master_slave_test(config_voice_request_interval),
+ master_slave_test(config_visitor_nickchange),
+ master_slave_test(join_conflict)]}.
+
+join_conflict_master(Config) ->
+ ok = join_new(Config),
+ put_event(Config, join),
+ ct:comment("Waiting for 'leave' command from the slave"),
+ leave = get_event(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+join_conflict_slave(Config) ->
+ NewConfig = set_opt(nick, ?config(peer_nick, Config), Config),
+ ct:comment("Waiting for 'join' command from the master"),
+ join = get_event(Config),
+ ct:comment("Fail trying to join the room with conflicting nick"),
+ #stanza_error{reason = 'conflict'} = join(NewConfig),
+ put_event(Config, leave),
+ disconnect(NewConfig).
+
+groupchat_msg_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ ok = master_join(Config),
+ lists:foreach(
+ fun(I) ->
+ Body = xmpp:mk_text(integer_to_binary(I)),
+ send(Config, #message{type = groupchat, to = Room,
+ body = Body}),
+ #message{type = groupchat, from = MyNickJID,
+ body = Body} = recv_message(Config)
+ end, lists:seq(1, 5)),
+ #muc_user{items = [#muc_item{jid = PeerJID,
+ role = none,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+groupchat_msg_slave(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(master_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ {[], _, _} = slave_join(Config),
+ lists:foreach(
+ fun(I) ->
+ Body = xmpp:mk_text(integer_to_binary(I)),
+ #message{type = groupchat, from = PeerNickJID,
+ body = Body} = recv_message(Config)
+ end, lists:seq(1, 5)),
+ ok = leave(Config),
+ disconnect(Config).
+
+private_msg_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = master_join(Config),
+ lists:foreach(
+ fun(I) ->
+ Body = xmpp:mk_text(integer_to_binary(I)),
+ send(Config, #message{type = chat, to = PeerNickJID,
+ body = Body})
+ end, lists:seq(1, 5)),
+ #muc_user{items = [#muc_item{jid = PeerJID,
+ role = none,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ct:comment("Fail trying to send a private message to non-existing occupant"),
+ send(Config, #message{type = chat, to = PeerNickJID}),
+ #message{from = PeerNickJID, type = error} = ErrMsg = recv_message(Config),
+ #stanza_error{reason = 'item-not-found'} = xmpp:get_error(ErrMsg),
+ ok = leave(Config),
+ disconnect(Config).
+
+private_msg_slave(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(master_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ {[], _, _} = slave_join(Config),
+ lists:foreach(
+ fun(I) ->
+ Body = xmpp:mk_text(integer_to_binary(I)),
+ #message{type = chat, from = PeerNickJID,
+ body = Body} = recv_message(Config)
+ end, lists:seq(1, 5)),
+ ok = leave(Config),
+ disconnect(Config).
+
+set_subject_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ Subject1 = xmpp:mk_text(?config(room_subject, Config)),
+ Subject2 = xmpp:mk_text(<<"new-", (?config(room_subject, Config))/binary>>),
+ ok = master_join(Config),
+ ct:comment("Setting 1st subject"),
+ send(Config, #message{type = groupchat, to = Room,
+ subject = Subject1}),
+ #message{type = groupchat, from = MyNickJID,
+ subject = Subject1} = recv_message(Config),
+ ct:comment("Waiting for the slave to leave"),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ct:comment("Setting 2nd subject"),
+ send(Config, #message{type = groupchat, to = Room,
+ subject = Subject2}),
+ #message{type = groupchat, from = MyNickJID,
+ subject = Subject2} = recv_message(Config),
+ ct:comment("Asking the slave to join"),
+ put_event(Config, join),
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Receiving 1st subject set by the slave"),
+ #message{type = groupchat, from = PeerNickJID,
+ subject = Subject1} = recv_message(Config),
+ ct:comment("Disallow subject change"),
+ [104] = set_config(Config, [{changesubject, false}]),
+ ct:comment("Waiting for the slave to leave"),
+ #muc_user{items = [#muc_item{jid = PeerJID,
+ role = none,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+set_subject_slave(Config) ->
+ Room = muc_room_jid(Config),
+ MyNickJID = my_muc_jid(Config),
+ PeerNick = ?config(master_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ Subject1 = xmpp:mk_text(?config(room_subject, Config)),
+ Subject2 = xmpp:mk_text(<<"new-", (?config(room_subject, Config))/binary>>),
+ {[], _, _} = slave_join(Config),
+ ct:comment("Receiving 1st subject set by the master"),
+ #message{type = groupchat, from = PeerNickJID,
+ subject = Subject1} = recv_message(Config),
+ ok = leave(Config),
+ ct:comment("Waiting for 'join' command from the master"),
+ join = get_event(Config),
+ {[], SubjMsg2, _} = join(Config),
+ ct:comment("Checking if the master has set 2nd subject during our absence"),
+ #message{type = groupchat, from = PeerNickJID,
+ subject = Subject2} = SubjMsg2,
+ ct:comment("Setting 1st subject"),
+ send(Config, #message{to = Room, type = groupchat, subject = Subject1}),
+ #message{type = groupchat, from = MyNickJID,
+ subject = Subject1} = recv_message(Config),
+ ct:comment("Waiting for the master to disallow subject change"),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Fail trying to change the subject"),
+ send(Config, #message{to = Room, type = groupchat, subject = Subject2}),
+ #message{from = Room, type = error} = ErrMsg = recv_message(Config),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(ErrMsg),
+ ok = leave(Config),
+ disconnect(Config).
+
+history_master(Config) ->
+ Room = muc_room_jid(Config),
+ ServerHost = ?config(server_host, Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ PeerNickJID = peer_muc_jid(Config),
+ Size = gen_mod:get_module_opt(ServerHost, mod_muc, history_size,
+ fun(I) when is_integer(I), I>=0 -> I end,
+ 20),
+ ok = join_new(Config),
+ ct:comment("Putting ~p+1 messages in the history", [Size]),
+ %% Only Size messages will be stored
+ lists:foreach(
+ fun(I) ->
+ Body = xmpp:mk_text(integer_to_binary(I)),
+ send(Config, #message{to = Room, type = groupchat,
+ body = Body}),
+ #message{type = groupchat, from = MyNickJID,
+ body = Body} = recv_message(Config)
+ end, lists:seq(0, Size)),
+ put_event(Config, join),
+ lists:foreach(
+ fun(Type) ->
+ recv_muc_presence(Config, PeerNickJID, Type)
+ end, [available, unavailable,
+ available, unavailable,
+ available, unavailable,
+ available, unavailable]),
+ ok = leave(Config),
+ disconnect(Config).
+
+history_slave(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(peer_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ServerHost = ?config(server_host, Config),
+ Size = gen_mod:get_module_opt(ServerHost, mod_muc, history_size,
+ fun(I) when is_integer(I), I>=0 -> I end,
+ 20),
+ ct:comment("Waiting for 'join' command from the master"),
+ join = get_event(Config),
+ {History, _, _} = join(Config),
+ ct:comment("Checking ordering of history events"),
+ BodyList = [binary_to_integer(xmpp:get_text(Body))
+ || #message{type = groupchat, from = From,
+ body = Body} <- History,
+ From == PeerNickJID],
+ BodyList = lists:seq(1, Size),
+ ok = leave(Config),
+ %% If the client wishes to receive no history, it MUST set the 'maxchars'
+ %% attribute to a value of "0" (zero)
+ %% (http://xmpp.org/extensions/xep-0045.html#enter-managehistory)
+ ct:comment("Checking if maxchars=0 yields to no history"),
+ {[], _, _} = join(Config, #muc{history = #muc_history{maxchars = 0}}),
+ ok = leave(Config),
+ ct:comment("Receiving only 10 last stanzas"),
+ {History10, _, _} = join(Config,
+ #muc{history = #muc_history{maxstanzas = 10}}),
+ BodyList10 = [binary_to_integer(xmpp:get_text(Body))
+ || #message{type = groupchat, from = From,
+ body = Body} <- History10,
+ From == PeerNickJID],
+ BodyList10 = lists:nthtail(Size-10, lists:seq(1, Size)),
+ ok = leave(Config),
+ #delay{stamp = TS} = xmpp:get_subtag(hd(History), #delay{}),
+ ct:comment("Receiving all history without the very first element"),
+ {HistoryWithoutFirst, _, _} = join(Config,
+ #muc{history = #muc_history{since = TS}}),
+ BodyListWithoutFirst = [binary_to_integer(xmpp:get_text(Body))
+ || #message{type = groupchat, from = From,
+ body = Body} <- HistoryWithoutFirst,
+ From == PeerNickJID],
+ BodyListWithoutFirst = lists:nthtail(1, lists:seq(1, Size)),
+ ok = leave(Config),
+ disconnect(Config).
+
+invite_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(peer, Config),
+ ok = join_new(Config),
+ wait_for_slave(Config),
+ %% Inviting the peer
+ send(Config, #message{to = Room, type = normal,
+ sub_els =
+ [#muc_user{
+ invites =
+ [#muc_invite{to = PeerJID}]}]}),
+ #message{from = Room} = DeclineMsg = recv_message(Config),
+ #muc_user{decline = #muc_decline{from = PeerJID}} =
+ xmpp:get_subtag(DeclineMsg, #muc_user{}),
+ ok = leave(Config),
+ disconnect(Config).
+
+invite_slave(Config) ->
+ Room = muc_room_jid(Config),
+ wait_for_master(Config),
+ PeerJID = ?config(master, Config),
+ #message{from = Room, type = normal} = Msg = recv_message(Config),
+ #muc_user{invites = [#muc_invite{from = PeerJID}]} =
+ xmpp:get_subtag(Msg, #muc_user{}),
+ %% Decline invitation
+ send(Config,
+ #message{to = Room,
+ sub_els = [#muc_user{
+ decline = #muc_decline{to = PeerJID}}]}),
+ disconnect(Config).
+
+invite_members_only_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ ok = join_new(Config),
+ %% Setting the room to members-only
+ [_|_] = set_config(Config, [{membersonly, true}]),
+ wait_for_slave(Config),
+ %% Inviting the peer
+ send(Config, #message{to = Room, type = normal,
+ sub_els =
+ [#muc_user{
+ invites =
+ [#muc_invite{to = PeerJID}]}]}),
+ #message{from = Room, type = normal} = AffMsg = recv_message(Config),
+ #muc_user{items = [#muc_item{jid = PeerJID, affiliation = member}]} =
+ xmpp:get_subtag(AffMsg, #muc_user{}),
+ ok = leave(Config),
+ disconnect(Config).
+
+invite_members_only_slave(Config) ->
+ Room = muc_room_jid(Config),
+ wait_for_master(Config),
+ %% Receiving invitation
+ #message{from = Room, type = normal} = recv_message(Config),
+ disconnect(Config).
+
+invite_password_protected_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ Password = randoms:get_string(),
+ ok = join_new(Config),
+ [104] = set_config(Config, [{passwordprotectedroom, true},
+ {roomsecret, Password}]),
+ put_event(Config, Password),
+ %% Inviting the peer
+ send(Config, #message{to = Room, type = normal,
+ sub_els =
+ [#muc_user{
+ invites =
+ [#muc_invite{to = PeerJID}]}]}),
+ ok = leave(Config),
+ disconnect(Config).
+
+invite_password_protected_slave(Config) ->
+ Room = muc_room_jid(Config),
+ Password = get_event(Config),
+ %% Receiving invitation
+ #message{from = Room, type = normal} = Msg = recv_message(Config),
+ #muc_user{password = Password} = xmpp:get_subtag(Msg, #muc_user{}),
+ disconnect(Config).
+
+voice_request_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = join_new(Config),
+ [104] = set_config(Config, [{members_by_default, false}]),
+ wait_for_slave(Config),
+ #muc_user{
+ items = [#muc_item{role = visitor,
+ jid = PeerJID,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Receiving voice request"),
+ #message{from = Room, type = normal} = VoiceReq = recv_message(Config),
+ #xdata{type = form, fields = Fs} = xmpp:get_subtag(VoiceReq, #xdata{}),
+ [{jid, PeerJID},
+ {request_allow, false},
+ {role, participant},
+ {roomnick, PeerNick}] = lists:sort(muc_request:decode(Fs)),
+ ct:comment("Approving voice request"),
+ ApprovalFs = muc_request:encode([{jid, PeerJID}, {role, participant},
+ {nick, PeerNick}, {request_allow, true}]),
+ send(Config, #message{to = Room, sub_els = [#xdata{type = submit,
+ fields = ApprovalFs}]}),
+ #muc_user{
+ items = [#muc_item{role = participant,
+ jid = PeerJID,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Waiting for the slave to leave"),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+voice_request_slave(Config) ->
+ Room = muc_room_jid(Config),
+ MyJID = my_jid(Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ wait_for_master(Config),
+ {[], _, _} = join(Config, visitor),
+ ct:comment("Requesting voice"),
+ Fs = muc_request:encode([{role, participant}]),
+ X = #xdata{type = submit, fields = Fs},
+ send(Config, #message{to = Room, sub_els = [X]}),
+ ct:comment("Waiting to become a participant"),
+ #muc_user{
+ items = [#muc_item{role = participant,
+ jid = MyJID,
+ affiliation = none}]} =
+ recv_muc_presence(Config, MyNickJID, available),
+ ok = leave(Config),
+ disconnect(Config).
+
+change_role_master(Config) ->
+ Room = muc_room_jid(Config),
+ MyJID = my_jid(Config),
+ MyNick = ?config(nick, Config),
+ PeerJID = ?config(slave, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = join_new(Config),
+ ct:comment("Waiting for the slave to join"),
+ wait_for_slave(Config),
+ #muc_user{items = [#muc_item{role = participant,
+ jid = PeerJID,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ lists:foreach(
+ fun(Role) ->
+ ct:comment("Checking if the slave is not in the roles list"),
+ case get_role(Config, Role) of
+ [#muc_item{jid = MyJID, affiliation = owner,
+ role = moderator, nick = MyNick}] when Role == moderator ->
+ ok;
+ [] ->
+ ok
+ end,
+ Reason = randoms:get_string(),
+ put_event(Config, {Role, Reason}),
+ ok = set_role(Config, Role, Reason),
+ ct:comment("Receiving role change to ~s", [Role]),
+ #muc_user{
+ items = [#muc_item{role = Role,
+ affiliation = none,
+ reason = Reason}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ [#muc_item{role = Role, affiliation = none,
+ nick = PeerNick}|_] = get_role(Config, Role)
+ end, [visitor, participant, moderator]),
+ put_event(Config, disconnect),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+change_role_slave(Config) ->
+ wait_for_master(Config),
+ {[], _, _} = join(Config),
+ change_role_slave(Config, get_event(Config)).
+
+change_role_slave(Config, {Role, Reason}) ->
+ Room = muc_room_jid(Config),
+ MyNick = ?config(slave_nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ ct:comment("Receiving role change to ~s", [Role]),
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{role = Role,
+ affiliation = none,
+ reason = Reason}]} =
+ recv_muc_presence(Config, MyNickJID, available),
+ true = lists:member(110, Codes),
+ change_role_slave(Config, get_event(Config));
+change_role_slave(Config, disconnect) ->
+ ok = leave(Config),
+ disconnect(Config).
+
+change_affiliation_master(Config) ->
+ Room = muc_room_jid(Config),
+ MyJID = my_jid(Config),
+ MyBareJID = jid:remove_resource(MyJID),
+ MyNick = ?config(nick, Config),
+ PeerJID = ?config(slave, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = join_new(Config),
+ ct:comment("Waiting for the slave to join"),
+ wait_for_slave(Config),
+ #muc_user{items = [#muc_item{role = participant,
+ jid = PeerJID,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ lists:foreach(
+ fun({Aff, Role, Status}) ->
+ ct:comment("Checking if slave is not in affiliation list"),
+ case get_affiliation(Config, Aff) of
+ [#muc_item{jid = MyBareJID,
+ affiliation = owner}] when Aff == owner ->
+ ok;
+ [] ->
+ ok
+ end,
+ Reason = randoms:get_string(),
+ put_event(Config, {Aff, Role, Status, Reason}),
+ ok = set_affiliation(Config, Aff, Reason),
+ ct:comment("Receiving affiliation change to ~s", [Aff]),
+ #muc_user{
+ items = [#muc_item{role = Role,
+ affiliation = Aff,
+ actor = Actor,
+ reason = Reason}]} =
+ recv_muc_presence(Config, PeerNickJID, Status),
+ if Aff == outcast ->
+ ct:comment("Checking if actor is set"),
+ #muc_actor{nick = MyNick} = Actor;
+ true ->
+ ok
+ end,
+ Affs = get_affiliation(Config, Aff),
+ ct:comment("Checking if the affiliation was correctly set"),
+ case lists:keyfind(PeerBareJID, #muc_item.jid, Affs) of
+ false when Aff == none ->
+ ok;
+ #muc_item{affiliation = Aff} ->
+ ok
+ end
+ end, [{member, participant, available}, {none, participant, available},
+ {admin, moderator, available}, {owner, moderator, available},
+ {outcast, none, unavailable}]),
+ ok = leave(Config),
+ disconnect(Config).
+
+change_affiliation_slave(Config) ->
+ wait_for_master(Config),
+ {[], _, _} = join(Config),
+ change_affiliation_slave(Config, get_event(Config)).
+
+change_affiliation_slave(Config, {Aff, Role, Status, Reason}) ->
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(master_nick, Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ ct:comment("Receiving affiliation change to ~s", [Aff]),
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{role = Role,
+ actor = Actor,
+ affiliation = Aff,
+ reason = Reason}]} =
+ recv_muc_presence(Config, MyNickJID, Status),
+ true = lists:member(110, Codes),
+ if Aff == outcast ->
+ ct:comment("Checking for status code '301' (banned)"),
+ true = lists:member(301, Codes),
+ ct:comment("Checking if actor is set"),
+ #muc_actor{nick = PeerNick} = Actor,
+ disconnect(Config);
+ true ->
+ change_affiliation_slave(Config, get_event(Config))
+ end.
+
+kick_master(Config) ->
+ Room = muc_room_jid(Config),
+ MyNick = ?config(nick, Config),
+ PeerJID = ?config(slave, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ Reason = <<"Testing">>,
+ ok = join_new(Config),
+ ct:comment("Waiting for the slave to join"),
+ wait_for_slave(Config),
+ #muc_user{items = [#muc_item{role = participant,
+ jid = PeerJID,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ [#muc_item{role = participant, affiliation = none,
+ nick = PeerNick}|_] = get_role(Config, participant),
+ ct:comment("Kicking slave"),
+ ok = set_role(Config, none, Reason),
+ ct:comment("Receiving role change to 'none'"),
+ #muc_user{
+ status_codes = Codes,
+ items = [#muc_item{role = none,
+ affiliation = none,
+ actor = #muc_actor{nick = MyNick},
+ reason = Reason}]} =
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ [] = get_role(Config, participant),
+ ct:comment("Checking if the code is '307' (kicked)"),
+ true = lists:member(307, Codes),
+ ok = leave(Config),
+ disconnect(Config).
+
+kick_slave(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(master_nick, Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ Reason = <<"Testing">>,
+ wait_for_master(Config),
+ {[], _, _} = join(Config),
+ ct:comment("Receiving role change to 'none'"),
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{role = none,
+ affiliation = none,
+ actor = #muc_actor{nick = PeerNick},
+ reason = Reason}]} =
+ recv_muc_presence(Config, MyNickJID, unavailable),
+ ct:comment("Checking if codes '110' (self-presence) "
+ "and '307' (kicked) are present"),
+ true = lists:member(110, Codes),
+ true = lists:member(307, Codes),
+ disconnect(Config).
+
+destroy_master(Config) ->
+ Reason = <<"Testing">>,
+ Room = muc_room_jid(Config),
+ AltRoom = alt_room_jid(Config),
+ PeerJID = ?config(peer, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ ok = join_new(Config),
+ ct:comment("Waiting for slave to join"),
+ wait_for_slave(Config),
+ #muc_user{items = [#muc_item{role = participant,
+ jid = PeerJID,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ wait_for_slave(Config),
+ ok = destroy(Config, Reason),
+ ct:comment("Receiving destruction presence"),
+ #muc_user{items = [#muc_item{role = none,
+ affiliation = none}],
+ destroy = #muc_destroy{jid = AltRoom,
+ reason = Reason}} =
+ recv_muc_presence(Config, MyNickJID, unavailable),
+ disconnect(Config).
+
+destroy_slave(Config) ->
+ Reason = <<"Testing">>,
+ Room = muc_room_jid(Config),
+ AltRoom = alt_room_jid(Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ wait_for_master(Config),
+ {[], _, _} = join(Config),
+ #stanza_error{reason = 'forbidden'} = destroy(Config, Reason),
+ wait_for_master(Config),
+ ct:comment("Receiving destruction presence"),
+ #muc_user{items = [#muc_item{role = none,
+ affiliation = none}],
+ destroy = #muc_destroy{jid = AltRoom,
+ reason = Reason}} =
+ recv_muc_presence(Config, MyNickJID, unavailable),
+ disconnect(Config).
+
+vcard_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ FN = randoms:get_string(),
+ VCard = #vcard_temp{fn = FN},
+ ok = join_new(Config),
+ ct:comment("Waiting for slave to join"),
+ wait_for_slave(Config),
+ #muc_user{items = [#muc_item{role = participant,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ #stanza_error{reason = 'item-not-found'} = get_vcard(Config),
+ ok = set_vcard(Config, VCard),
+ VCard = get_vcard(Config),
+ put_event(Config, VCard),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ leave = get_event(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+vcard_slave(Config) ->
+ wait_for_master(Config),
+ {[], _, _} = join(Config),
+ VCard = get_event(Config),
+ VCard = get_vcard(Config),
+ #stanza_error{reason = 'forbidden'} = set_vcard(Config, VCard),
+ ok = leave(Config),
+ VCard = get_vcard(Config),
+ put_event(Config, leave),
+ disconnect(Config).
+
+nick_change_master(Config) ->
+ NewNick = randoms:get_string(),
+ PeerJID = ?config(peer, Config),
+ PeerNickJID = peer_muc_jid(Config),
+ ok = master_join(Config),
+ put_event(Config, {new_nick, NewNick}),
+ ct:comment("Waiting for nickchange presence from the slave"),
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{jid = PeerJID,
+ nick = NewNick}]} =
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ct:comment("Checking if code '303' (nick change) is set"),
+ true = lists:member(303, Codes),
+ ct:comment("Waiting for updated presence from the slave"),
+ PeerNewNickJID = jid:replace_resource(PeerNickJID, NewNick),
+ recv_muc_presence(Config, PeerNewNickJID, available),
+ ct:comment("Waiting for the slave to leave"),
+ recv_muc_presence(Config, PeerNewNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+nick_change_slave(Config) ->
+ MyJID = my_jid(Config),
+ MyNickJID = my_muc_jid(Config),
+ {[], _, _} = slave_join(Config),
+ {new_nick, NewNick} = get_event(Config),
+ MyNewNickJID = jid:replace_resource(MyNickJID, NewNick),
+ ct:comment("Sending new presence"),
+ send(Config, #presence{to = MyNewNickJID}),
+ ct:comment("Receiving nickchange self-presence"),
+ #muc_user{status_codes = Codes1,
+ items = [#muc_item{role = participant,
+ jid = MyJID,
+ nick = NewNick}]} =
+ recv_muc_presence(Config, MyNickJID, unavailable),
+ ct:comment("Checking if codes '110' (self-presence) and "
+ "'303' (nickchange) are present"),
+ lists:member(110, Codes1),
+ lists:member(303, Codes1),
+ ct:comment("Receiving self-presence update"),
+ #muc_user{status_codes = Codes2,
+ items = [#muc_item{jid = MyJID,
+ role = participant}]} =
+ recv_muc_presence(Config, MyNewNickJID, available),
+ ct:comment("Checking if code '110' (self-presence) is set"),
+ lists:member(110, Codes2),
+ NewConfig = set_opt(nick, NewNick, Config),
+ ok = leave(NewConfig),
+ disconnect(NewConfig).
+
+config_title_desc_master(Config) ->
+ Title = randoms:get_string(),
+ Desc = randoms:get_string(),
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = master_join(Config),
+ [104] = set_config(Config, [{roomname, Title}, {roomdesc, Desc}]),
+ RoomCfg = get_config(Config),
+ Title = proplists:get_value(roomname, RoomCfg),
+ Desc = proplists:get_value(roomdesc, RoomCfg),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_title_desc_slave(Config) ->
+ {[], _, _} = slave_join(Config),
+ [104] = recv_config_change_message(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_public_list_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = join_new(Config),
+ wait_for_slave(Config),
+ recv_muc_presence(Config, PeerNickJID, available),
+ lists:member(<<"muc_public">>, get_features(Config, Room)),
+ [104] = set_config(Config, [{public_list, false},
+ {publicroom, false}]),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ lists:member(<<"muc_hidden">>, get_features(Config, Room)),
+ wait_for_slave(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_public_list_slave(Config) ->
+ Room = muc_room_jid(Config),
+ wait_for_master(Config),
+ PeerNick = ?config(peer_nick, Config),
+ PeerNickJID = peer_muc_jid(Config),
+ [#disco_item{jid = Room}] = disco_items(Config),
+ [#disco_item{jid = PeerNickJID,
+ name = PeerNick}] = disco_room_items(Config),
+ {[], _, _} = join(Config),
+ [104] = recv_config_change_message(Config),
+ ok = leave(Config),
+ [] = disco_items(Config),
+ [] = disco_room_items(Config),
+ wait_for_master(Config),
+ disconnect(Config).
+
+config_password_master(Config) ->
+ Password = randoms:get_string(),
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = join_new(Config),
+ lists:member(<<"muc_unsecured">>, get_features(Config, Room)),
+ [104] = set_config(Config, [{passwordprotectedroom, true},
+ {roomsecret, Password}]),
+ lists:member(<<"muc_passwordprotected">>, get_features(Config, Room)),
+ put_event(Config, Password),
+ recv_muc_presence(Config, PeerNickJID, available),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_password_slave(Config) ->
+ Password = get_event(Config),
+ #stanza_error{reason = 'not-authorized'} = join(Config),
+ #stanza_error{reason = 'not-authorized'} =
+ join(Config, #muc{password = randoms:get_string()}),
+ {[], _, _} = join(Config, #muc{password = Password}),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_whois_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNickJID = peer_muc_jid(Config),
+ MyNickJID = my_muc_jid(Config),
+ ok = master_join(Config),
+ lists:member(<<"muc_semianonymous">>, get_features(Config, Room)),
+ [172] = set_config(Config, [{whois, anyone}]),
+ lists:member(<<"muc_nonanonymous">>, get_features(Config, Room)),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ recv_muc_presence(Config, PeerNickJID, available),
+ send(Config, #presence{to = Room}),
+ recv_muc_presence(Config, MyNickJID, available),
+ [173] = set_config(Config, [{whois, moderators}]),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_whois_slave(Config) ->
+ PeerJID = ?config(peer, Config),
+ PeerNickJID = peer_muc_jid(Config),
+ {[], _, _} = slave_join(Config),
+ ct:comment("Checking if the room becomes non-anonymous (code '172')"),
+ [172] = recv_config_change_message(Config),
+ ct:comment("Re-joining in order to check status codes"),
+ ok = leave(Config),
+ {[], _, Codes} = join(Config),
+ ct:comment("Checking if code '100' (non-anonymous) present"),
+ true = lists:member(100, Codes),
+ ct:comment("Receiving presence from peer with JID exposed"),
+ #muc_user{items = [#muc_item{jid = PeerJID}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Waiting for the room to become anonymous again (code '173')"),
+ [173] = recv_config_change_message(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_members_only_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ PeerNickJID = peer_muc_jid(Config),
+ ok = master_join(Config),
+ lists:member(<<"muc_open">>, get_features(Config, Room)),
+ [104] = set_config(Config, [{membersonly, true}]),
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{jid = PeerJID,
+ affiliation = none,
+ role = none}]} =
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ct:comment("Checking if code '322' (non-member) is set"),
+ true = lists:member(322, Codes),
+ lists:member(<<"muc_membersonly">>, get_features(Config, Room)),
+ ct:comment("Waiting for slave to fail joining the room"),
+ set_member = get_event(Config),
+ ok = set_affiliation(Config, member, randoms:get_string()),
+ #message{from = Room, type = normal} = Msg = recv_message(Config),
+ #muc_user{items = [#muc_item{jid = PeerBareJID,
+ affiliation = member}]} =
+ xmpp:get_subtag(Msg, #muc_user{}),
+ ct:comment("Asking peer to join"),
+ put_event(Config, join),
+ ct:comment("Waiting for peer to join"),
+ recv_muc_presence(Config, PeerNickJID, available),
+ ok = set_affiliation(Config, none, randoms:get_string()),
+ ct:comment("Waiting for peer to be kicked"),
+ #muc_user{status_codes = NewCodes,
+ items = [#muc_item{affiliation = none,
+ role = none}]} =
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ct:comment("Checking if code '321' (became non-member in "
+ "members-only room) is set"),
+ true = lists:member(321, NewCodes),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_members_only_slave(Config) ->
+ MyJID = my_jid(Config),
+ MyNickJID = my_muc_jid(Config),
+ {[], _, _} = slave_join(Config),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Getting kicked because the room has become members-only"),
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{jid = MyJID,
+ role = none,
+ affiliation = none}]} =
+ recv_muc_presence(Config, MyNickJID, unavailable),
+ ct:comment("Checking if the code '110' (self-presence) "
+ "and '322' (non-member) is set"),
+ true = lists:member(110, Codes),
+ true = lists:member(322, Codes),
+ ct:comment("Fail trying to join members-only room"),
+ #stanza_error{reason = 'registration-required'} = join(Config),
+ ct:comment("Asking the peer to set us member"),
+ put_event(Config, set_member),
+ ct:comment("Waiting for the peer to ask for join"),
+ join = get_event(Config),
+ {[], _, _} = join(Config, participant, member),
+ #muc_user{status_codes = NewCodes,
+ items = [#muc_item{jid = MyJID,
+ role = none,
+ affiliation = none}]} =
+ recv_muc_presence(Config, MyNickJID, unavailable),
+ ct:comment("Checking if the code '110' (self-presence) "
+ "and '321' (became non-member in members-only room) is set"),
+ true = lists:member(110, NewCodes),
+ true = lists:member(321, NewCodes),
+ disconnect(Config).
+
+config_moderated_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerNickJID = peer_muc_jid(Config),
+ ok = master_join(Config),
+ lists:member(<<"muc_moderated">>, get_features(Config, Room)),
+ ok = set_role(Config, visitor, randoms:get_string()),
+ #muc_user{items = [#muc_item{role = visitor}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ set_unmoderated = get_event(Config),
+ [104] = set_config(Config, [{moderatedroom, false}]),
+ #message{from = PeerNickJID, type = groupchat} = recv_message(Config),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ lists:member(<<"muc_unmoderated">>, get_features(Config, Room)),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_moderated_slave(Config) ->
+ Room = muc_room_jid(Config),
+ MyNickJID = my_muc_jid(Config),
+ {[], _, _} = slave_join(Config),
+ #muc_user{items = [#muc_item{role = visitor}]} =
+ recv_muc_presence(Config, MyNickJID, available),
+ send(Config, #message{to = Room, type = groupchat}),
+ ErrMsg = #message{from = Room, type = error} = recv_message(Config),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(ErrMsg),
+ put_event(Config, set_unmoderated),
+ [104] = recv_config_change_message(Config),
+ send(Config, #message{to = Room, type = groupchat}),
+ #message{from = MyNickJID, type = groupchat} = recv_message(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_private_messages_master(Config) ->
+ PeerNickJID = peer_muc_jid(Config),
+ ok = master_join(Config),
+ ct:comment("Waiting for a private message from the slave"),
+ #message{from = PeerNickJID, type = chat} = recv_message(Config),
+ ok = set_role(Config, visitor, <<>>),
+ ct:comment("Waiting for the peer to become a visitor"),
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Waiting for a private message from the slave"),
+ #message{from = PeerNickJID, type = chat} = recv_message(Config),
+ [104] = set_config(Config, [{allow_private_messages_from_visitors, moderators}]),
+ ct:comment("Waiting for a private message from the slave"),
+ #message{from = PeerNickJID, type = chat} = recv_message(Config),
+ [104] = set_config(Config, [{allow_private_messages_from_visitors, nobody}]),
+ wait_for_slave(Config),
+ [104] = set_config(Config, [{allow_private_messages_from_visitors, anyone},
+ {allow_private_messages, false}]),
+ ct:comment("Fail trying to send a private message"),
+ send(Config, #message{to = PeerNickJID, type = chat}),
+ #message{from = PeerNickJID, type = error} = ErrMsg = recv_message(Config),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(ErrMsg),
+ ok = set_role(Config, participant, <<>>),
+ ct:comment("Waiting for the peer to become a participant"),
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Waiting for the peer to leave"),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_private_messages_slave(Config) ->
+ MyNickJID = my_muc_jid(Config),
+ PeerNickJID = peer_muc_jid(Config),
+ {[], _, _} = slave_join(Config),
+ ct:comment("Sending a private message"),
+ send(Config, #message{to = PeerNickJID, type = chat}),
+ ct:comment("Waiting to become a visitor"),
+ #muc_user{items = [#muc_item{role = visitor}]} =
+ recv_muc_presence(Config, MyNickJID, available),
+ ct:comment("Sending a private message"),
+ send(Config, #message{to = PeerNickJID, type = chat}),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Sending a private message"),
+ send(Config, #message{to = PeerNickJID, type = chat}),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Fail trying to send a private message"),
+ send(Config, #message{to = PeerNickJID, type = chat}),
+ #message{from = PeerNickJID, type = error} = ErrMsg1 = recv_message(Config),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(ErrMsg1),
+ wait_for_master(Config),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Waiting to become a participant again"),
+ #muc_user{items = [#muc_item{role = participant}]} =
+ recv_muc_presence(Config, MyNickJID, available),
+ ct:comment("Fail trying to send a private message"),
+ send(Config, #message{to = PeerNickJID, type = chat}),
+ #message{from = PeerNickJID, type = error} = ErrMsg2 = recv_message(Config),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(ErrMsg2),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_query_master(Config) ->
+ PeerNickJID = peer_muc_jid(Config),
+ ok = join_new(Config),
+ wait_for_slave(Config),
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Receiving IQ query from the slave"),
+ #iq{type = get, from = PeerNickJID, id = I,
+ sub_els = [#ping{}]} = recv_iq(Config),
+ send(Config, #iq{type = result, to = PeerNickJID, id = I}),
+ [104] = set_config(Config, [{allow_query_users, false}]),
+ ct:comment("Fail trying to send IQ"),
+ #iq{type = error, from = PeerNickJID} = Err =
+ send_recv(Config, #iq{type = get, to = PeerNickJID,
+ sub_els = [#ping{}]}),
+ #stanza_error{reason = 'not-allowed'} = xmpp:get_error(Err),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_query_slave(Config) ->
+ PeerNickJID = peer_muc_jid(Config),
+ wait_for_master(Config),
+ ct:comment("Checking if IQ queries are denied from non-occupants"),
+ #iq{type = error, from = PeerNickJID} = Err1 =
+ send_recv(Config, #iq{type = get, to = PeerNickJID,
+ sub_els = [#ping{}]}),
+ #stanza_error{reason = 'not-acceptable'} = xmpp:get_error(Err1),
+ {[], _, _} = join(Config),
+ ct:comment("Sending IQ to the master"),
+ #iq{type = result, from = PeerNickJID, sub_els = []} =
+ send_recv(Config, #iq{to = PeerNickJID, type = get, sub_els = [#ping{}]}),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Fail trying to send IQ"),
+ #iq{type = error, from = PeerNickJID} = Err2 =
+ send_recv(Config, #iq{type = get, to = PeerNickJID,
+ sub_els = [#ping{}]}),
+ #stanza_error{reason = 'not-allowed'} = xmpp:get_error(Err2),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_allow_invites_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(peer, Config),
+ PeerNickJID = peer_muc_jid(Config),
+ ok = master_join(Config),
+ [104] = set_config(Config, [{allowinvites, true}]),
+ ct:comment("Receiving an invitation from the slave"),
+ #message{from = Room, type = normal} = recv_message(Config),
+ [104] = set_config(Config, [{allowinvites, false}]),
+ send_invitation = get_event(Config),
+ ct:comment("Sending an invitation"),
+ send(Config, #message{to = Room, type = normal,
+ sub_els =
+ [#muc_user{
+ invites =
+ [#muc_invite{to = PeerJID}]}]}),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_allow_invites_slave(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(peer, Config),
+ InviteMsg = #message{to = Room, type = normal,
+ sub_els =
+ [#muc_user{
+ invites =
+ [#muc_invite{to = PeerJID}]}]},
+ {[], _, _} = slave_join(Config),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Sending an invitation"),
+ send(Config, InviteMsg),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Fail sending an invitation"),
+ send(Config, InviteMsg),
+ #message{from = Room, type = error} = Err = recv_message(Config),
+ #stanza_error{reason = 'not-allowed'} = xmpp:get_error(Err),
+ ct:comment("Checking if the master is still able to send invitations"),
+ put_event(Config, send_invitation),
+ #message{from = Room, type = normal} = recv_message(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_visitor_status_master(Config) ->
+ PeerNickJID = peer_muc_jid(Config),
+ Status = xmpp:mk_text(randoms:get_string()),
+ ok = join_new(Config),
+ [104] = set_config(Config, [{members_by_default, false}]),
+ ct:comment("Asking the slave to join as a visitor"),
+ put_event(Config, {join, Status}),
+ #muc_user{items = [#muc_item{role = visitor}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ ct:comment("Receiving status change from the visitor"),
+ #presence{from = PeerNickJID, status = Status} = recv_presence(Config),
+ [104] = set_config(Config, [{allow_visitor_status, false}]),
+ ct:comment("Receiving status change with <status/> stripped"),
+ #presence{from = PeerNickJID, status = []} = recv_presence(Config),
+ ct:comment("Waiting for the slave to leave"),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_visitor_status_slave(Config) ->
+ Room = muc_room_jid(Config),
+ MyNickJID = my_muc_jid(Config),
+ ct:comment("Waiting for 'join' command from the master"),
+ {join, Status} = get_event(Config),
+ {[], _, _} = join(Config, visitor, none),
+ ct:comment("Sending status change"),
+ send(Config, #presence{to = Room, status = Status}),
+ #presence{from = MyNickJID, status = Status} = recv_presence(Config),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Sending status change again"),
+ send(Config, #presence{to = Room, status = Status}),
+ #presence{from = MyNickJID, status = []} = recv_presence(Config),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_allow_voice_requests_master(Config) ->
+ PeerNickJID = peer_muc_jid(Config),
+ ok = join_new(Config),
+ [104] = set_config(Config, [{members_by_default, false}]),
+ ct:comment("Asking the slave to join as a visitor"),
+ put_event(Config, join),
+ #muc_user{items = [#muc_item{role = visitor}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ [104] = set_config(Config, [{allow_voice_requests, false}]),
+ ct:comment("Waiting for the slave to leave"),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_allow_voice_requests_slave(Config) ->
+ Room = muc_room_jid(Config),
+ ct:comment("Waiting for 'join' command from the master"),
+ join = get_event(Config),
+ {[], _, _} = join(Config, visitor),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Fail sending voice request"),
+ Fs = muc_request:encode([{role, participant}]),
+ X = #xdata{type = submit, fields = Fs},
+ send(Config, #message{to = Room, sub_els = [X]}),
+ #message{from = Room, type = error} = Err = recv_message(Config),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(Err),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_voice_request_interval_master(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(peer, Config),
+ PeerNick = ?config(peer_nick, Config),
+ PeerNickJID = peer_muc_jid(Config),
+ ok = join_new(Config),
+ [104] = set_config(Config, [{members_by_default, false}]),
+ ct:comment("Asking the slave to join as a visitor"),
+ put_event(Config, join),
+ #muc_user{items = [#muc_item{role = visitor}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ [104] = set_config(Config, [{voice_request_min_interval, 5}]),
+ ct:comment("Receiving a voice request from slave"),
+ #message{from = Room, type = normal} = recv_message(Config),
+ ct:comment("Deny voice request at first"),
+ Fs = muc_request:encode([{jid, PeerJID}, {role, participant},
+ {nick, PeerNick}, {request_allow, false}]),
+ send(Config, #message{to = Room, sub_els = [#xdata{type = submit,
+ fields = Fs}]}),
+ put_event(Config, denied),
+ ct:comment("Waiting for repeated voice request from the slave"),
+ #message{from = Room, type = normal} = recv_message(Config),
+ ct:comment("Waiting for the slave to leave"),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_voice_request_interval_slave(Config) ->
+ Room = muc_room_jid(Config),
+ Fs = muc_request:encode([{role, participant}]),
+ X = #xdata{type = submit, fields = Fs},
+ ct:comment("Waiting for 'join' command from the master"),
+ join = get_event(Config),
+ {[], _, _} = join(Config, visitor),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Sending voice request"),
+ send(Config, #message{to = Room, sub_els = [X]}),
+ ct:comment("Waiting for the master to deny our voice request"),
+ denied = get_event(Config),
+ ct:comment("Requesting voice again"),
+ send(Config, #message{to = Room, sub_els = [X]}),
+ ct:comment("Receving voice request error because we're sending to fast"),
+ #message{from = Room, type = error} = Err = recv_message(Config),
+ #stanza_error{reason = 'resource-constraint'} = xmpp:get_error(Err),
+ ct:comment("Waiting for 5 seconds"),
+ timer:sleep(timer:seconds(5)),
+ ct:comment("Repeating again"),
+ send(Config, #message{to = Room, sub_els = [X]}),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_visitor_nickchange_master(Config) ->
+ PeerNickJID = peer_muc_jid(Config),
+ ok = join_new(Config),
+ [104] = set_config(Config, [{members_by_default, false}]),
+ ct:comment("Asking the slave to join as a visitor"),
+ put_event(Config, join),
+ ct:comment("Waiting for the slave to join"),
+ #muc_user{items = [#muc_item{role = visitor}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ [104] = set_config(Config, [{allow_visitor_nickchange, false}]),
+ ct:comment("Waiting for the slave to leave"),
+ recv_muc_presence(Config, PeerNickJID, unavailable),
+ ok = leave(Config),
+ disconnect(Config).
+
+config_visitor_nickchange_slave(Config) ->
+ NewNick = randoms:get_string(),
+ MyNickJID = my_muc_jid(Config),
+ MyNewNickJID = jid:replace_resource(MyNickJID, NewNick),
+ ct:comment("Waiting for 'join' command from the master"),
+ join = get_event(Config),
+ {[], _, _} = join(Config, visitor),
+ [104] = recv_config_change_message(Config),
+ ct:comment("Fail trying to change nickname"),
+ send(Config, #presence{to = MyNewNickJID}),
+ #presence{from = MyNewNickJID, type = error} = Err = recv_presence(Config),
+ #stanza_error{reason = 'not-allowed'} = xmpp:get_error(Err),
+ ok = leave(Config),
+ disconnect(Config).
+
+register_master(Config) ->
+ MUC = muc_jid(Config),
+ %% Register nick "master1"
+ register_nick(Config, MUC, <<"">>, <<"master1">>),
+ %% Unregister nick "master1" via jabber:register
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, to = MUC,
+ sub_els = [#register{remove = true}]}),
+ %% Register nick "master2"
+ register_nick(Config, MUC, <<"">>, <<"master2">>),
+ %% Now register nick "master"
+ register_nick(Config, MUC, <<"master2">>, <<"master">>),
+ %% Wait for slave to fail trying to register nick "master"
+ wait_for_slave(Config),
+ wait_for_slave(Config),
+ %% Now register empty ("") nick, which means we're unregistering
+ register_nick(Config, MUC, <<"master">>, <<"">>),
+ disconnect(Config).
+
+register_slave(Config) ->
+ MUC = muc_jid(Config),
+ wait_for_master(Config),
+ %% Trying to register occupied nick "master"
+ Fs = muc_register:encode([{roomnick, <<"master">>}]),
+ X = #xdata{type = submit, fields = Fs},
+ #iq{type = error} =
+ send_recv(Config, #iq{type = set, to = MUC,
+ sub_els = [#register{xdata = X}]}),
+ wait_for_master(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("muc_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("muc_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("muc_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("muc_" ++ atom_to_list(T) ++ "_slave")]}.
+
+recv_muc_presence(Config, From, Type) ->
+ Pres = #presence{from = From, type = Type} = recv_presence(Config),
+ xmpp:get_subtag(Pres, #muc_user{}).
+
+join_new(Config) ->
+ join_new(Config, muc_room_jid(Config)).
+
+join_new(Config, Room) ->
+ MyJID = my_jid(Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ ct:comment("Joining new room"),
+ send(Config, #presence{to = MyNickJID, sub_els = [#muc{}]}),
+ %% As per XEP-0045 we MUST receive stanzas in the following order:
+ %% 1. In-room presence from other occupants
+ %% 2. In-room presence from the joining entity itself (so-called "self-presence")
+ %% 3. Room history (if any)
+ %% 4. The room subject
+ %% 5. Live messages, presence updates, new user joins, etc.
+ %% As this is the newly created room, we receive only the 2nd and 4th stanza.
+ #muc_user{
+ status_codes = Codes,
+ items = [#muc_item{role = moderator,
+ jid = MyJID,
+ affiliation = owner}]} =
+ recv_muc_presence(Config, MyNickJID, available),
+ ct:comment("Checking if codes '110' (self-presence) and "
+ "'201' (new room) is set"),
+ true = lists:member(110, Codes),
+ true = lists:member(201, Codes),
+ ct:comment("Receiving empty room subject"),
+ #message{from = Room, type = groupchat, body = [],
+ subject = [#text{data = <<>>}]} = recv_message(Config),
+ case ?config(persistent_room, Config) of
+ true ->
+ [104] = set_config(Config, [{persistentroom, true}], Room),
+ ok;
+ false ->
+ ok
+ end.
+
+recv_history_and_subject(Config) ->
+ ct:comment("Receiving room history and/or subject"),
+ recv_history_and_subject(Config, []).
+
+recv_history_and_subject(Config, History) ->
+ Room = muc_room_jid(Config),
+ #message{type = groupchat, subject = Subj,
+ body = Body, thread = Thread} = Msg = recv_message(Config),
+ case xmpp:get_subtag(Msg, #delay{}) of
+ #delay{from = Room} ->
+ recv_history_and_subject(Config, [Msg|History]);
+ false when Subj /= [], Body == [], Thread == undefined ->
+ {lists:reverse(History), Msg}
+ end.
+
+join(Config) ->
+ join(Config, participant, none, #muc{}).
+
+join(Config, Role) when is_atom(Role) ->
+ join(Config, Role, none, #muc{});
+join(Config, #muc{} = SubEl) ->
+ join(Config, participant, none, SubEl).
+
+join(Config, Role, Aff) when is_atom(Role), is_atom(Aff) ->
+ join(Config, Role, Aff, #muc{});
+join(Config, Role, #muc{} = SubEl) when is_atom(Role) ->
+ join(Config, Role, none, SubEl).
+
+join(Config, Role, Aff, SubEl) ->
+ ct:comment("Joining existing room as ~s/~s", [Aff, Role]),
+ MyJID = my_jid(Config),
+ Room = muc_room_jid(Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ PeerNick = ?config(peer_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ send(Config, #presence{to = MyNickJID, sub_els = [SubEl]}),
+ case recv_presence(Config) of
+ #presence{type = error, from = MyNickJID} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{});
+ #presence{type = available, from = PeerNickJID} = Pres ->
+ #muc_user{items = [#muc_item{role = moderator,
+ affiliation = owner}]} =
+ xmpp:get_subtag(Pres, #muc_user{}),
+ ct:comment("Receiving initial self-presence"),
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{role = Role,
+ jid = MyJID,
+ affiliation = Aff}]} =
+ recv_muc_presence(Config, MyNickJID, available),
+ ct:comment("Checking if code '110' (self-presence) is set"),
+ true = lists:member(110, Codes),
+ {History, Subj} = recv_history_and_subject(Config),
+ {History, Subj, Codes};
+ #presence{type = available, from = MyNickJID} = Pres ->
+ #muc_user{status_codes = Codes,
+ items = [#muc_item{role = Role,
+ jid = MyJID,
+ affiliation = Aff}]} =
+ xmpp:get_subtag(Pres, #muc_user{}),
+ ct:comment("Checking if code '110' (self-presence) is set"),
+ true = lists:member(110, Codes),
+ {History, Subj} = recv_history_and_subject(Config),
+ {empty, History, Subj, Codes}
+ end.
+
+leave(Config) ->
+ leave(Config, muc_room_jid(Config)).
+
+leave(Config, Room) ->
+ MyJID = my_jid(Config),
+ MyNick = ?config(nick, Config),
+ MyNickJID = jid:replace_resource(Room, MyNick),
+ Mode = ?config(mode, Config),
+ IsPersistent = ?config(persistent_room, Config),
+ if Mode /= slave, IsPersistent ->
+ [104] = set_config(Config, [{persistentroom, false}], Room);
+ true ->
+ ok
+ end,
+ ct:comment("Leaving the room"),
+ send(Config, #presence{to = MyNickJID, type = unavailable}),
+ #muc_user{
+ status_codes = Codes,
+ items = [#muc_item{role = none, jid = MyJID}]} =
+ recv_muc_presence(Config, MyNickJID, unavailable),
+ ct:comment("Checking if code '110' (self-presence) is set"),
+ true = lists:member(110, Codes),
+ ok.
+
+get_config(Config) ->
+ ct:comment("Get room config"),
+ Room = muc_room_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = Room,
+ sub_els = [#muc_owner{}]}) of
+ #iq{type = result,
+ sub_els = [#muc_owner{config = #xdata{type = form} = X}]} ->
+ muc_roomconfig:decode(X#xdata.fields);
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+set_config(Config, RoomConfig) ->
+ set_config(Config, RoomConfig, muc_room_jid(Config)).
+
+set_config(Config, RoomConfig, Room) ->
+ ct:comment("Set room config: ~p", [RoomConfig]),
+ Fs = case RoomConfig of
+ [] -> [];
+ _ -> muc_roomconfig:encode(RoomConfig)
+ end,
+ case send_recv(Config,
+ #iq{type = set, to = Room,
+ sub_els = [#muc_owner{config = #xdata{type = submit,
+ fields = Fs}}]}) of
+ #iq{type = result, sub_els = []} ->
+ #message{from = Room, type = groupchat} = Msg = recv_message(Config),
+ #muc_user{status_codes = Codes} = xmpp:get_subtag(Msg, #muc_user{}),
+ lists:sort(Codes);
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+create_persistent(Config) ->
+ [_|_] = get_config(Config),
+ [] = set_config(Config, [{persistentroom, true}], false),
+ ok.
+
+destroy(Config) ->
+ destroy(Config, <<>>).
+
+destroy(Config, Reason) ->
+ Room = muc_room_jid(Config),
+ AltRoom = alt_room_jid(Config),
+ ct:comment("Destroying a room"),
+ case send_recv(Config,
+ #iq{type = set, to = Room,
+ sub_els = [#muc_owner{destroy = #muc_destroy{
+ reason = Reason,
+ jid = AltRoom}}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+disco_items(Config) ->
+ MUC = muc_jid(Config),
+ ct:comment("Performing disco#items request to ~s", [jid:to_string(MUC)]),
+ #iq{type = result, from = MUC, sub_els = [DiscoItems]} =
+ send_recv(Config, #iq{type = get, to = MUC,
+ sub_els = [#disco_items{}]}),
+ lists:keysort(#disco_item.jid, DiscoItems#disco_items.items).
+
+disco_room_items(Config) ->
+ Room = muc_room_jid(Config),
+ #iq{type = result, from = Room, sub_els = [DiscoItems]} =
+ send_recv(Config, #iq{type = get, to = Room,
+ sub_els = [#disco_items{}]}),
+ DiscoItems#disco_items.items.
+
+get_affiliations(Config, Aff) ->
+ Room = muc_room_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = Room,
+ sub_els = [#muc_admin{items = [#muc_item{affiliation = Aff}]}]}) of
+ #iq{type = result, sub_els = [#muc_admin{items = Items}]} ->
+ Items;
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+master_join(Config) ->
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ PeerNick = ?config(slave_nick, Config),
+ PeerNickJID = jid:replace_resource(Room, PeerNick),
+ ok = join_new(Config),
+ wait_for_slave(Config),
+ #muc_user{items = [#muc_item{jid = PeerJID,
+ role = participant,
+ affiliation = none}]} =
+ recv_muc_presence(Config, PeerNickJID, available),
+ ok.
+
+slave_join(Config) ->
+ wait_for_master(Config),
+ join(Config).
+
+set_role(Config, Role, Reason) ->
+ ct:comment("Changing role to ~s", [Role]),
+ Room = muc_room_jid(Config),
+ PeerNick = ?config(slave_nick, Config),
+ case send_recv(
+ Config,
+ #iq{type = set, to = Room,
+ sub_els =
+ [#muc_admin{
+ items = [#muc_item{role = Role,
+ reason = Reason,
+ nick = PeerNick}]}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+get_role(Config, Role) ->
+ ct:comment("Requesting list for role '~s'", [Role]),
+ Room = muc_room_jid(Config),
+ case send_recv(
+ Config,
+ #iq{type = get, to = Room,
+ sub_els = [#muc_admin{
+ items = [#muc_item{role = Role}]}]}) of
+ #iq{type = result, sub_els = [#muc_admin{items = Items}]} ->
+ lists:keysort(#muc_item.affiliation, Items);
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+set_affiliation(Config, Aff, Reason) ->
+ ct:comment("Changing affiliation to ~s", [Aff]),
+ Room = muc_room_jid(Config),
+ PeerJID = ?config(slave, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ case send_recv(
+ Config,
+ #iq{type = set, to = Room,
+ sub_els =
+ [#muc_admin{
+ items = [#muc_item{affiliation = Aff,
+ reason = Reason,
+ jid = PeerBareJID}]}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+get_affiliation(Config, Aff) ->
+ ct:comment("Requesting list for affiliation '~s'", [Aff]),
+ Room = muc_room_jid(Config),
+ case send_recv(
+ Config,
+ #iq{type = get, to = Room,
+ sub_els = [#muc_admin{
+ items = [#muc_item{affiliation = Aff}]}]}) of
+ #iq{type = result, sub_els = [#muc_admin{items = Items}]} ->
+ Items;
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+set_vcard(Config, VCard) ->
+ Room = muc_room_jid(Config),
+ ct:comment("Setting vCard for ~s", [jid:to_string(Room)]),
+ case send_recv(Config, #iq{type = set, to = Room,
+ sub_els = [VCard]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+get_vcard(Config) ->
+ Room = muc_room_jid(Config),
+ ct:comment("Retreiving vCard from ~s", [jid:to_string(Room)]),
+ case send_recv(Config, #iq{type = get, to = Room,
+ sub_els = [#vcard_temp{}]}) of
+ #iq{type = result, sub_els = [VCard]} ->
+ VCard;
+ #iq{type = error} = Err ->
+ xmpp:get_subtag(Err, #stanza_error{})
+ end.
+
+recv_config_change_message(Config) ->
+ ct:comment("Receiving configuration change notification message"),
+ Room = muc_room_jid(Config),
+ #message{type = groupchat, from = Room} = Msg = recv_message(Config),
+ #muc_user{status_codes = Codes} = xmpp:get_subtag(Msg, #muc_user{}),
+ lists:sort(Codes).
+
+register_nick(Config, MUC, PrevNick, Nick) ->
+ PrevRegistered = if PrevNick /= <<"">> -> true;
+ true -> false
+ end,
+ NewRegistered = if Nick /= <<"">> -> true;
+ true -> false
+ end,
+ ct:comment("Requesting registration form"),
+ #iq{type = result,
+ sub_els = [#register{registered = PrevRegistered,
+ xdata = #xdata{type = form,
+ fields = FsWithoutNick}}]} =
+ send_recv(Config, #iq{type = get, to = MUC,
+ sub_els = [#register{}]}),
+ ct:comment("Checking if previous nick is registered"),
+ PrevNick = proplists:get_value(
+ roomnick, muc_register:decode(FsWithoutNick)),
+ X = #xdata{type = submit, fields = muc_register:encode([{roomnick, Nick}])},
+ ct:comment("Submitting registration form"),
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, to = MUC,
+ sub_els = [#register{xdata = X}]}),
+ ct:comment("Checking if new nick was registered"),
+ #iq{type = result,
+ sub_els = [#register{registered = NewRegistered,
+ xdata = #xdata{type = form,
+ fields = FsWithNick}}]} =
+ send_recv(Config, #iq{type = get, to = MUC,
+ sub_els = [#register{}]}),
+ Nick = proplists:get_value(
+ roomnick, muc_register:decode(FsWithNick)).
+
+subscribe(Config, Events, Room) ->
+ MyNick = ?config(nick, Config),
+ case send_recv(Config,
+ #iq{type = set, to = Room,
+ sub_els = [#muc_subscribe{nick = MyNick,
+ events = Events}]}) of
+ #iq{type = result, sub_els = [#muc_subscribe{events = ResEvents}]} ->
+ lists:sort(ResEvents);
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+unsubscribe(Config, Room) ->
+ case send_recv(Config, #iq{type = set, to = Room,
+ sub_els = [#muc_unsubscribe{}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
diff --git a/test/offline_tests.erl b/test/offline_tests.erl
new file mode 100644
index 000000000..ea34544e3
--- /dev/null
+++ b/test/offline_tests.erl
@@ -0,0 +1,406 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 7 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(offline_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [send/2, disconnect/1, my_jid/1, send_recv/2, recv_message/1,
+ get_features/1, recv/1, get_event/1, server_jid/1,
+ wait_for_master/1, wait_for_slave/1]).
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+single_cases() ->
+ {offline_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(check_identity),
+ single_test(send_non_existent),
+ single_test(view_non_existent),
+ single_test(remove_non_existent),
+ single_test(view_non_integer),
+ single_test(remove_non_integer),
+ single_test(malformed_iq),
+ single_test(wrong_user),
+ single_test(unsupported_iq)]}.
+
+feature_enabled(Config) ->
+ Features = get_features(Config),
+ ct:comment("Checking if offline features are set"),
+ true = lists:member(?NS_FEATURE_MSGOFFLINE, Features),
+ true = lists:member(?NS_FLEX_OFFLINE, Features),
+ disconnect(Config).
+
+check_identity(Config) ->
+ #iq{type = result,
+ sub_els = [#disco_info{
+ node = ?NS_FLEX_OFFLINE,
+ identities = Ids}]} =
+ send_recv(Config, #iq{type = get,
+ sub_els = [#disco_info{
+ node = ?NS_FLEX_OFFLINE}]}),
+ true = lists:any(
+ fun(#identity{category = <<"automation">>,
+ type = <<"message-list">>}) -> true;
+ (_) -> false
+ end, Ids),
+ disconnect(Config).
+
+send_non_existent(Config) ->
+ Server = ?config(server, Config),
+ To = jid:make(<<"non-existent">>, Server),
+ #message{type = error} = Err = send_recv(Config, #message{to = To}),
+ #stanza_error{reason = 'service-unavailable'} = xmpp:get_error(Err),
+ disconnect(Config).
+
+view_non_existent(Config) ->
+ #stanza_error{reason = 'item-not-found'} = view(Config, [randoms:get_string()], false),
+ disconnect(Config).
+
+remove_non_existent(Config) ->
+ ok = remove(Config, [randoms:get_string()]),
+ disconnect(Config).
+
+view_non_integer(Config) ->
+ #stanza_error{reason = 'item-not-found'} = view(Config, [<<"foo">>], false),
+ disconnect(Config).
+
+remove_non_integer(Config) ->
+ #stanza_error{reason = 'item-not-found'} = remove(Config, [<<"foo">>]),
+ disconnect(Config).
+
+malformed_iq(Config) ->
+ Item = #offline_item{node = randoms:get_string()},
+ Range = [{Type, SubEl} || Type <- [set, get],
+ SubEl <- [#offline{items = [], _ = false},
+ #offline{items = [Item], _ = true}]]
+ ++ [{set, #offline{items = [], fetch = true, purge = false}},
+ {set, #offline{items = [Item], fetch = true, purge = false}},
+ {get, #offline{items = [], fetch = false, purge = true}},
+ {get, #offline{items = [Item], fetch = false, purge = true}}],
+ lists:foreach(
+ fun({Type, SubEl}) ->
+ #iq{type = error} = Err =
+ send_recv(Config, #iq{type = Type, sub_els = [SubEl]}),
+ #stanza_error{reason = 'bad-request'} = xmpp:get_error(Err)
+ end, Range),
+ disconnect(Config).
+
+wrong_user(Config) ->
+ Server = ?config(server, Config),
+ To = jid:make(<<"foo">>, Server),
+ Item = #offline_item{node = randoms:get_string()},
+ Range = [{Type, Items, Purge, Fetch} ||
+ Type <- [set, get],
+ Items <- [[], [Item]],
+ Purge <- [false, true],
+ Fetch <- [false, true]],
+ lists:foreach(
+ fun({Type, Items, Purge, Fetch}) ->
+ #iq{type = error} = Err =
+ send_recv(Config, #iq{type = Type, to = To,
+ sub_els = [#offline{items = Items,
+ purge = Purge,
+ fetch = Fetch}]}),
+ #stanza_error{reason = 'forbidden'} = xmpp:get_error(Err)
+ end, Range),
+ disconnect(Config).
+
+unsupported_iq(Config) ->
+ Item = #offline_item{node = randoms:get_string()},
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = error} = Err =
+ send_recv(Config, #iq{type = Type, sub_els = [Item]}),
+ #stanza_error{reason = 'service-unavailable'} = xmpp:get_error(Err)
+ end, [set, get]),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {offline_master_slave, [sequence],
+ [master_slave_test(flex),
+ master_slave_test(send_all)]}.
+
+flex_master(Config) ->
+ send_messages(Config, 5),
+ disconnect(Config).
+
+flex_slave(Config) ->
+ wait_for_master(Config),
+ peer_down = get_event(Config),
+ 5 = get_number(Config),
+ Nodes = get_nodes(Config),
+ %% Since headers are received we can send initial presence without a risk
+ %% of getting offline messages flood
+ #presence{} = send_recv(Config, #presence{}),
+ ct:comment("Checking fetch"),
+ Nodes = fetch(Config, lists:seq(1, 5)),
+ ct:comment("Fetching 2nd and 4th message"),
+ [2, 4] = view(Config, [lists:nth(2, Nodes), lists:nth(4, Nodes)]),
+ ct:comment("Deleting 2nd and 4th message"),
+ ok = remove(Config, [lists:nth(2, Nodes), lists:nth(4, Nodes)]),
+ ct:comment("Checking if messages were deleted"),
+ [1, 3, 5] = view(Config, [lists:nth(1, Nodes),
+ lists:nth(3, Nodes),
+ lists:nth(5, Nodes)]),
+ ct:comment("Purging everything left"),
+ ok = purge(Config),
+ ct:comment("Checking if there are no offline messages"),
+ 0 = get_number(Config),
+ clean(disconnect(Config)).
+
+send_all_master(Config) ->
+ wait_for_slave(Config),
+ Peer = ?config(peer, Config),
+ BarePeer = jid:remove_resource(Peer),
+ {Deliver, Errors} = message_iterator(Config),
+ N = lists:foldl(
+ fun(#message{type = error} = Msg, Acc) ->
+ send(Config, Msg#message{to = BarePeer}),
+ Acc;
+ (Msg, Acc) ->
+ I = send(Config, Msg#message{to = BarePeer}),
+ case xmpp:get_subtag(Msg, #xevent{}) of
+ #xevent{offline = true, id = undefined} ->
+ ct:comment("Receiving event-reply for:~n~s",
+ [xmpp:pp(Msg)]),
+ #message{} = Reply = recv_message(Config),
+ #xevent{id = I} = xmpp:get_subtag(Reply, #xevent{});
+ _ ->
+ ok
+ end,
+ Acc + 1
+ end, 0, Deliver),
+ lists:foreach(
+ fun(Msg) ->
+ #message{type = error} = Err =
+ send_recv(Config, Msg#message{to = BarePeer}),
+ #stanza_error{reason = 'service-unavailable'} = xmpp:get_error(Err)
+ end, Errors),
+ ok = wait_for_complete(Config, N),
+ disconnect(Config).
+
+send_all_slave(Config) ->
+ ServerJID = server_jid(Config),
+ Peer = ?config(peer, Config),
+ wait_for_master(Config),
+ peer_down = get_event(Config),
+ #presence{} = send_recv(Config, #presence{}),
+ {Deliver, _Errors} = message_iterator(Config),
+ lists:foreach(
+ fun(#message{type = error}) ->
+ ok;
+ (#message{type = Type, body = Body, subject = Subject} = Msg) ->
+ ct:comment("Receiving message:~n~s", [xmpp:pp(Msg)]),
+ #message{from = Peer,
+ type = Type,
+ body = Body,
+ subject = Subject} = RecvMsg = recv_message(Config),
+ ct:comment("Checking if delay tag is correctly set"),
+ #delay{from = ServerJID} = xmpp:get_subtag(RecvMsg, #delay{})
+ end, Deliver),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("offline_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("offline_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("offline_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("offline_" ++ atom_to_list(T) ++ "_slave")]}.
+
+clean(Config) ->
+ {U, S, _} = jid:tolower(my_jid(Config)),
+ mod_offline:remove_user(U, S),
+ Config.
+
+send_messages(Config, Num) ->
+ send_messages(Config, Num, normal, []).
+
+send_messages(Config, Num, Type, SubEls) ->
+ wait_for_slave(Config),
+ Peer = ?config(peer, Config),
+ BarePeer = jid:remove_resource(Peer),
+ lists:foreach(
+ fun(I) ->
+ Body = integer_to_binary(I),
+ send(Config,
+ #message{to = BarePeer,
+ type = Type,
+ body = [#text{data = Body}],
+ subject = [#text{data = <<"subject">>}],
+ sub_els = SubEls})
+ end, lists:seq(1, Num)),
+ ct:comment("Waiting for all messages to be delivered to offline spool"),
+ ok = wait_for_complete(Config, Num).
+
+recv_messages(Config, Num) ->
+ wait_for_master(Config),
+ peer_down = get_event(Config),
+ Peer = ?config(peer, Config),
+ #presence{} = send_recv(Config, #presence{}),
+ lists:foreach(
+ fun(I) ->
+ Text = integer_to_binary(I),
+ #message{sub_els = SubEls,
+ from = Peer,
+ body = [#text{data = Text}],
+ subject = [#text{data = <<"subject">>}]} =
+ recv_message(Config),
+ true = lists:keymember(delay, 1, SubEls)
+ end, lists:seq(1, Num)),
+ clean(disconnect(Config)).
+
+get_number(Config) ->
+ ct:comment("Getting offline message number"),
+ #iq{type = result,
+ sub_els = [#disco_info{
+ node = ?NS_FLEX_OFFLINE,
+ xdata = [X]}]} =
+ send_recv(Config, #iq{type = get,
+ sub_els = [#disco_info{
+ node = ?NS_FLEX_OFFLINE}]}),
+ Form = flex_offline:decode(X#xdata.fields),
+ proplists:get_value(number_of_messages, Form).
+
+get_nodes(Config) ->
+ MyJID = my_jid(Config),
+ MyBareJID = jid:remove_resource(MyJID),
+ Peer = ?config(peer, Config),
+ Peer_s = jid:to_string(Peer),
+ ct:comment("Getting headers"),
+ #iq{type = result,
+ sub_els = [#disco_items{
+ node = ?NS_FLEX_OFFLINE,
+ items = DiscoItems}]} =
+ send_recv(Config, #iq{type = get,
+ sub_els = [#disco_items{
+ node = ?NS_FLEX_OFFLINE}]}),
+ ct:comment("Checking if headers are correct"),
+ lists:sort(
+ lists:map(
+ fun(#disco_item{jid = J, name = P, node = N})
+ when (J == MyBareJID) and (P == Peer_s) ->
+ N
+ end, DiscoItems)).
+
+fetch(Config, Range) ->
+ ID = send(Config, #iq{type = get, sub_els = [#offline{fetch = true}]}),
+ Nodes = lists:map(
+ fun(I) ->
+ Text = integer_to_binary(I),
+ #message{body = Body, sub_els = SubEls} = recv(Config),
+ [#text{data = Text}] = Body,
+ #offline{items = [#offline_item{node = Node}]} =
+ lists:keyfind(offline, 1, SubEls),
+ #delay{} = lists:keyfind(delay, 1, SubEls),
+ Node
+ end, Range),
+ #iq{id = ID, type = result, sub_els = []} = recv(Config),
+ Nodes.
+
+view(Config, Nodes) ->
+ view(Config, Nodes, true).
+
+view(Config, Nodes, NeedReceive) ->
+ Items = lists:map(
+ fun(Node) ->
+ #offline_item{action = view, node = Node}
+ end, Nodes),
+ I = send(Config,
+ #iq{type = get, sub_els = [#offline{items = Items}]}),
+ Range = if NeedReceive ->
+ lists:map(
+ fun(Node) ->
+ #message{body = [#text{data = Text}],
+ sub_els = SubEls} = recv(Config),
+ #offline{items = [#offline_item{node = Node}]} =
+ lists:keyfind(offline, 1, SubEls),
+ binary_to_integer(Text)
+ end, Nodes);
+ true ->
+ []
+ end,
+ case recv(Config) of
+ #iq{id = I, type = result, sub_els = []} -> Range;
+ #iq{id = I, type = error} = Err -> xmpp:get_error(Err)
+ end.
+
+remove(Config, Nodes) ->
+ Items = lists:map(
+ fun(Node) ->
+ #offline_item{action = remove, node = Node}
+ end, Nodes),
+ case send_recv(Config, #iq{type = set,
+ sub_els = [#offline{items = Items}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+purge(Config) ->
+ case send_recv(Config, #iq{type = set,
+ sub_els = [#offline{purge = true}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+wait_for_complete(_Config, 0) ->
+ ok;
+wait_for_complete(Config, N) ->
+ {U, S, _} = jid:tolower(?config(peer, Config)),
+ lists:foldl(
+ fun(_Time, ok) ->
+ ok;
+ (Time, Acc) ->
+ timer:sleep(Time),
+ case mod_offline:count_offline_messages(U, S) of
+ N -> ok;
+ _ -> Acc
+ end
+ end, error, [0, 100, 200, 2000, 5000, 10000]).
+
+message_iterator(Config) ->
+ ServerJID = server_jid(Config),
+ ChatStates = [[#chatstate{type = composing}]],
+ Offline = [[#offline{}]],
+ Hints = [[#hint{type = T}] || T <- [store, 'no-store']],
+ XEvent = [[#xevent{id = ID, offline = OfflineFlag}]
+ || ID <- [undefined, randoms:get_string()],
+ OfflineFlag <- [false, true]],
+ Delay = [[#delay{stamp = p1_time_compat:timestamp(), from = ServerJID}]],
+ AllEls = [Els1 ++ Els2 || Els1 <- [[]] ++ ChatStates ++ Delay ++ Hints ++ Offline,
+ Els2 <- [[]] ++ XEvent],
+ All = [#message{type = Type, body = Body, subject = Subject, sub_els = Els}
+ || %%Type <- [chat],
+ Type <- [error, chat, normal, groupchat, headline],
+ Body <- [[], xmpp:mk_text(<<"body">>)],
+ Subject <- [[], xmpp:mk_text(<<"subject">>)],
+ Els <- AllEls],
+ lists:partition(
+ fun(#message{type = error}) -> true;
+ (#message{sub_els = [#offline{}|_]}) -> false;
+ (#message{sub_els = [_, #xevent{id = I}]}) when I /= undefined -> false;
+ (#message{sub_els = [#xevent{id = I}]}) when I /= undefined -> false;
+ (#message{sub_els = [#hint{type = store}|_]}) -> true;
+ (#message{sub_els = [#hint{type = 'no-store'}|_]}) -> false;
+ (#message{body = [], subject = []}) -> false;
+ (#message{type = Type}) -> (Type == chat) or (Type == normal);
+ (_) -> false
+ end, All).
diff --git a/test/privacy_tests.erl b/test/privacy_tests.erl
new file mode 100644
index 000000000..640f53d48
--- /dev/null
+++ b/test/privacy_tests.erl
@@ -0,0 +1,822 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 18 Oct 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(privacy_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [disconnect/1, send_recv/2, get_event/1, put_event/2,
+ recv_iq/1, recv_presence/1, recv_message/1, recv/1,
+ send/2, my_jid/1, server_jid/1, get_features/1,
+ set_roster/3, del_roster/1, get_roster/1]).
+-include("suite.hrl").
+-include("mod_roster.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single cases
+%%%===================================================================
+single_cases() ->
+ {privacy_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(set_get_list),
+ single_test(get_list_non_existent),
+ single_test(set_default),
+ single_test(del_default),
+ single_test(set_default_non_existent),
+ single_test(set_active),
+ single_test(del_active),
+ single_test(set_active_non_existent),
+ single_test(remove_list),
+ single_test(remove_default_list),
+ single_test(remove_active_list),
+ %% TODO: this should be fixed
+ %% single_test(remove_list_non_existent),
+ single_test(allow_local_server),
+ single_test(malformed_iq_query),
+ single_test(malformed_get),
+ single_test(malformed_set),
+ single_test(malformed_type_value),
+ single_test(set_get_block)]}.
+
+feature_enabled(Config) ->
+ Features = get_features(Config),
+ true = lists:member(?NS_PRIVACY, Features),
+ true = lists:member(?NS_BLOCKING, Features),
+ disconnect(Config).
+
+set_get_list(Config) ->
+ ListName = <<"set-get-list">>,
+ Items = [#privacy_item{order = 0, action = deny,
+ type = jid, value = <<"user@jabber.org">>,
+ iq = true},
+ #privacy_item{order = 1, action = allow,
+ type = group, value = <<"group">>,
+ message = true},
+ #privacy_item{order = 2, action = allow,
+ type = subscription, value = <<"both">>,
+ presence_in = true},
+ #privacy_item{order = 3, action = deny,
+ type = subscription, value = <<"from">>,
+ presence_out = true},
+ #privacy_item{order = 4, action = deny,
+ type = subscription, value = <<"to">>,
+ iq = true, message = true},
+ #privacy_item{order = 5, action = deny,
+ type = subscription, value = <<"none">>,
+ _ = true},
+ #privacy_item{order = 6, action = deny}],
+ ok = set_items(Config, ListName, Items),
+ #privacy_list{name = ListName, items = Items1} = get_list(Config, ListName),
+ Items = lists:keysort(#privacy_item.order, Items1),
+ del_privacy(disconnect(Config)).
+
+get_list_non_existent(Config) ->
+ ListName = <<"get-list-non-existent">>,
+ #stanza_error{reason = 'item-not-found'} = get_list(Config, ListName),
+ disconnect(Config).
+
+set_default(Config) ->
+ ListName = <<"set-default">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_default(Config, ListName),
+ #privacy_query{default = ListName} = get_lists(Config),
+ del_privacy(disconnect(Config)).
+
+del_default(Config) ->
+ ListName = <<"del-default">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_default(Config, ListName),
+ #privacy_query{default = ListName} = get_lists(Config),
+ ok = set_default(Config, none),
+ #privacy_query{default = none} = get_lists(Config),
+ del_privacy(disconnect(Config)).
+
+set_default_non_existent(Config) ->
+ ListName = <<"set-default-non-existent">>,
+ #stanza_error{reason = 'item-not-found'} = set_default(Config, ListName),
+ disconnect(Config).
+
+set_active(Config) ->
+ ListName = <<"set-active">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_active(Config, ListName),
+ #privacy_query{active = ListName} = get_lists(Config),
+ del_privacy(disconnect(Config)).
+
+del_active(Config) ->
+ ListName = <<"del-active">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_active(Config, ListName),
+ #privacy_query{active = ListName} = get_lists(Config),
+ ok = set_active(Config, none),
+ #privacy_query{active = none} = get_lists(Config),
+ del_privacy(disconnect(Config)).
+
+set_active_non_existent(Config) ->
+ ListName = <<"set-active-non-existent">>,
+ #stanza_error{reason = 'item-not-found'} = set_active(Config, ListName),
+ disconnect(Config).
+
+remove_list(Config) ->
+ ListName = <<"remove-list">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = del_list(Config, ListName),
+ #privacy_query{lists = []} = get_lists(Config),
+ del_privacy(disconnect(Config)).
+
+remove_active_list(Config) ->
+ ListName = <<"remove-active-list">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_active(Config, ListName),
+ #stanza_error{reason = 'conflict'} = del_list(Config, ListName),
+ del_privacy(disconnect(Config)).
+
+remove_default_list(Config) ->
+ ListName = <<"remove-default-list">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_default(Config, ListName),
+ #stanza_error{reason = 'conflict'} = del_list(Config, ListName),
+ del_privacy(disconnect(Config)).
+
+remove_list_non_existent(Config) ->
+ ListName = <<"remove-list-non-existent">>,
+ #stanza_error{reason = 'item-not-found'} = del_list(Config, ListName),
+ disconnect(Config).
+
+allow_local_server(Config) ->
+ ListName = <<"allow-local-server">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_active(Config, ListName),
+ %% Whatever privacy rules are set, we should always communicate
+ %% with our home server
+ server_send_iqs(Config),
+ server_recv_iqs(Config),
+ send_stanzas_to_server_resource(Config),
+ del_privacy(disconnect(Config)).
+
+malformed_iq_query(Config) ->
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = error} =
+ send_recv(Config,
+ #iq{type = Type,
+ sub_els = [#privacy_list{name = <<"foo">>}]})
+ end, [get, set]),
+ disconnect(Config).
+
+malformed_get(Config) ->
+ JID = jid:make(randoms:get_string()),
+ lists:foreach(
+ fun(SubEl) ->
+ #iq{type = error} =
+ send_recv(Config, #iq{type = get, sub_els = [SubEl]})
+ end, [#privacy_query{active = none},
+ #privacy_query{default = none},
+ #privacy_query{lists = [#privacy_list{name = <<"1">>},
+ #privacy_list{name = <<"2">>}]},
+ #block{items = [JID]}, #unblock{items = [JID]},
+ #block{}, #unblock{}]),
+ disconnect(Config).
+
+malformed_set(Config) ->
+ lists:foreach(
+ fun(SubEl) ->
+ #iq{type = error} =
+ send_recv(Config, #iq{type = set, sub_els = [SubEl]})
+ end, [#privacy_query{active = none, default = none},
+ #privacy_query{lists = [#privacy_list{name = <<"1">>},
+ #privacy_list{name = <<"2">>}]},
+ #block{},
+ #block_list{},
+ #block_list{items = [jid:make(randoms:get_string())]}]).
+
+malformed_type_value(Config) ->
+ Item = #privacy_item{order = 0, action = deny},
+ #stanza_error{reason = 'bad-request'} =
+ set_items(Config, <<"malformed-jid">>,
+ [Item#privacy_item{type = jid, value = <<"@bad">>}]),
+ #stanza_error{reason = 'bad-request'} =
+ set_items(Config, <<"malformed-group">>,
+ [Item#privacy_item{type = group, value = <<"">>}]),
+ #stanza_error{reason = 'bad-request'} =
+ set_items(Config, <<"malformed-subscription">>,
+ [Item#privacy_item{type = subscription, value = <<"bad">>}]),
+ disconnect(Config).
+
+set_get_block(Config) ->
+ J1 = jid:make(randoms:get_string(), randoms:get_string()),
+ J2 = jid:make(randoms:get_string(), randoms:get_string()),
+ {ok, ListName} = set_block(Config, [J1, J2]),
+ JIDs = get_block(Config),
+ JIDs = lists:sort([J1, J2]),
+ {ok, ListName} = set_unblock(Config, [J2, J1]),
+ [] = get_block(Config),
+ del_privacy(disconnect(Config)).
+
+%%%===================================================================
+%%% Master-slave cases
+%%%===================================================================
+master_slave_cases() ->
+ {privacy_master_slave, [sequence],
+ [master_slave_test(deny_bare_jid),
+ master_slave_test(deny_full_jid),
+ master_slave_test(deny_server_jid),
+ master_slave_test(deny_group),
+ master_slave_test(deny_sub_both),
+ master_slave_test(deny_sub_from),
+ master_slave_test(deny_sub_to),
+ master_slave_test(deny_sub_none),
+ master_slave_test(deny_all),
+ master_slave_test(deny_offline),
+ master_slave_test(block),
+ master_slave_test(unblock),
+ master_slave_test(unblock_all)]}.
+
+deny_bare_jid_master(Config) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ deny_master(Config, {jid, jid:to_string(PeerBareJID)}).
+
+deny_bare_jid_slave(Config) ->
+ deny_slave(Config).
+
+deny_full_jid_master(Config) ->
+ PeerJID = ?config(peer, Config),
+ deny_master(Config, {jid, jid:to_string(PeerJID)}).
+
+deny_full_jid_slave(Config) ->
+ deny_slave(Config).
+
+deny_server_jid_master(Config) ->
+ {_, Server, _} = jid:tolower(?config(peer, Config)),
+ deny_master(Config, {jid, Server}).
+
+deny_server_jid_slave(Config) ->
+ deny_slave(Config).
+
+deny_group_master(Config) ->
+ Group = randoms:get_string(),
+ deny_master(Config, {group, Group}).
+
+deny_group_slave(Config) ->
+ deny_slave(Config).
+
+deny_sub_both_master(Config) ->
+ deny_master(Config, {subscription, <<"both">>}).
+
+deny_sub_both_slave(Config) ->
+ deny_slave(Config).
+
+deny_sub_from_master(Config) ->
+ deny_master(Config, {subscription, <<"from">>}).
+
+deny_sub_from_slave(Config) ->
+ deny_slave(Config).
+
+deny_sub_to_master(Config) ->
+ deny_master(Config, {subscription, <<"to">>}).
+
+deny_sub_to_slave(Config) ->
+ deny_slave(Config).
+
+deny_sub_none_master(Config) ->
+ deny_master(Config, {subscription, <<"none">>}).
+
+deny_sub_none_slave(Config) ->
+ deny_slave(Config).
+
+deny_all_master(Config) ->
+ deny_master(Config, {undefined, <<"">>}).
+
+deny_all_slave(Config) ->
+ deny_slave(Config).
+
+deny_master(Config, {Type, Value}) ->
+ Sub = if Type == subscription ->
+ erlang:binary_to_atom(Value, utf8);
+ true ->
+ both
+ end,
+ Groups = if Type == group -> [Value];
+ true -> []
+ end,
+ set_roster(Config, Sub, Groups),
+ lists:foreach(
+ fun(Opts) ->
+ ct:pal("Set list for ~s, ~s, ~w", [Type, Value, Opts]),
+ ListName = randoms:get_string(),
+ Item = #privacy_item{order = 0,
+ action = deny,
+ iq = proplists:get_bool(iq, Opts),
+ message = proplists:get_bool(message, Opts),
+ presence_in = proplists:get_bool(presence_in, Opts),
+ presence_out = proplists:get_bool(presence_out, Opts),
+ type = Type,
+ value = Value},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_active(Config, ListName),
+ put_event(Config, Opts),
+ case is_presence_in_blocked(Opts) of
+ true -> ok;
+ false -> recv_presences(Config)
+ end,
+ case is_iq_in_blocked(Opts) of
+ true -> ok;
+ false -> recv_iqs(Config)
+ end,
+ case is_message_in_blocked(Opts) of
+ true -> ok;
+ false -> recv_messages(Config)
+ end,
+ ct:comment("Waiting for 'send' command from the slave"),
+ send = get_event(Config),
+ case is_presence_out_blocked(Opts) of
+ true -> check_presence_blocked(Config, 'not-acceptable');
+ false -> ok
+ end,
+ case is_iq_out_blocked(Opts) of
+ true -> check_iq_blocked(Config, 'not-acceptable');
+ false -> send_iqs(Config)
+ end,
+ case is_message_out_blocked(Opts) of
+ true -> check_message_blocked(Config, 'not-acceptable');
+ false -> send_messages(Config)
+ end,
+ case is_other_blocked(Opts) of
+ true -> check_other_blocked(Config, 'not-acceptable');
+ false -> ok
+ end,
+ ct:comment("Waiting for slave to finish processing our stanzas"),
+ done = get_event(Config)
+ end,
+ [[iq], [message], [presence_in], [presence_out],
+ [iq, message, presence_in, presence_out], []]),
+ put_event(Config, disconnect),
+ clean_up(disconnect(Config)).
+
+deny_slave(Config) ->
+ set_roster(Config, both, []),
+ deny_slave(Config, get_event(Config)).
+
+deny_slave(Config, disconnect) ->
+ clean_up(disconnect(Config));
+deny_slave(Config, Opts) ->
+ send_presences(Config),
+ case is_iq_in_blocked(Opts) of
+ true -> check_iq_blocked(Config, 'service-unavailable');
+ false -> send_iqs(Config)
+ end,
+ case is_message_in_blocked(Opts) of
+ true -> check_message_blocked(Config, 'service-unavailable');
+ false -> send_messages(Config)
+ end,
+ put_event(Config, send),
+ case is_iq_out_blocked(Opts) of
+ true -> ok;
+ false -> recv_iqs(Config)
+ end,
+ case is_message_out_blocked(Opts) of
+ true -> ok;
+ false -> recv_messages(Config)
+ end,
+ put_event(Config, done),
+ deny_slave(Config, get_event(Config)).
+
+deny_offline_master(Config) ->
+ set_roster(Config, both, []),
+ ListName = <<"deny-offline">>,
+ Item = #privacy_item{order = 0, action = deny},
+ ok = set_items(Config, ListName, [Item]),
+ ok = set_default(Config, ListName),
+ NewConfig = disconnect(Config),
+ put_event(NewConfig, send),
+ ct:comment("Waiting for the slave to finish"),
+ done = get_event(NewConfig),
+ clean_up(NewConfig).
+
+deny_offline_slave(Config) ->
+ set_roster(Config, both, []),
+ ct:comment("Waiting for 'send' command from the master"),
+ send = get_event(Config),
+ send_presences(Config),
+ check_iq_blocked(Config, 'service-unavailable'),
+ check_message_blocked(Config, 'service-unavailable'),
+ put_event(Config, done),
+ clean_up(disconnect(Config)).
+
+block_master(Config) ->
+ PeerJID = ?config(peer, Config),
+ set_roster(Config, both, []),
+ {ok, _} = set_block(Config, [PeerJID]),
+ check_presence_blocked(Config, 'not-acceptable'),
+ check_iq_blocked(Config, 'not-acceptable'),
+ check_message_blocked(Config, 'not-acceptable'),
+ check_other_blocked(Config, 'not-acceptable'),
+ %% We should always be able to communicate with our home server
+ server_send_iqs(Config),
+ server_recv_iqs(Config),
+ send_stanzas_to_server_resource(Config),
+ put_event(Config, send),
+ done = get_event(Config),
+ clean_up(disconnect(Config)).
+
+block_slave(Config) ->
+ set_roster(Config, both, []),
+ ct:comment("Waiting for 'send' command from master"),
+ send = get_event(Config),
+ send_presences(Config),
+ check_iq_blocked(Config, 'service-unavailable'),
+ check_message_blocked(Config, 'service-unavailable'),
+ put_event(Config, done),
+ clean_up(disconnect(Config)).
+
+unblock_master(Config) ->
+ PeerJID = ?config(peer, Config),
+ set_roster(Config, both, []),
+ {ok, ListName} = set_block(Config, [PeerJID]),
+ {ok, ListName} = set_unblock(Config, [PeerJID]),
+ put_event(Config, send),
+ recv_presences(Config),
+ recv_iqs(Config),
+ recv_messages(Config),
+ clean_up(disconnect(Config)).
+
+unblock_slave(Config) ->
+ set_roster(Config, both, []),
+ ct:comment("Waiting for 'send' command from master"),
+ send = get_event(Config),
+ send_presences(Config),
+ send_iqs(Config),
+ send_messages(Config),
+ clean_up(disconnect(Config)).
+
+unblock_all_master(Config) ->
+ PeerJID = ?config(peer, Config),
+ set_roster(Config, both, []),
+ {ok, ListName} = set_block(Config, [PeerJID]),
+ {ok, ListName} = set_unblock(Config, []),
+ put_event(Config, send),
+ recv_presences(Config),
+ recv_iqs(Config),
+ recv_messages(Config),
+ clean_up(disconnect(Config)).
+
+unblock_all_slave(Config) ->
+ set_roster(Config, both, []),
+ ct:comment("Waiting for 'send' command from master"),
+ send = get_event(Config),
+ send_presences(Config),
+ send_iqs(Config),
+ send_messages(Config),
+ clean_up(disconnect(Config)).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("privacy_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("privacy_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("privacy_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("privacy_" ++ atom_to_list(T) ++ "_slave")]}.
+
+set_items(Config, Name, Items) ->
+ ct:comment("Setting privacy list ~s with items = ~p", [Name, Items]),
+ case send_recv(
+ Config,
+ #iq{type = set, sub_els = [#privacy_query{
+ lists = [#privacy_list{
+ name = Name,
+ items = Items}]}]}) of
+ #iq{type = result, sub_els = []} ->
+ ct:comment("Receiving privacy list push"),
+ #iq{type = set, id = ID,
+ sub_els = [#privacy_query{lists = [#privacy_list{
+ name = Name}]}]} =
+ recv_iq(Config),
+ send(Config, #iq{type = result, id = ID}),
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+get_list(Config, Name) ->
+ ct:comment("Requesting privacy list ~s", [Name]),
+ case send_recv(Config,
+ #iq{type = get,
+ sub_els = [#privacy_query{
+ lists = [#privacy_list{name = Name}]}]}) of
+ #iq{type = result, sub_els = [#privacy_query{lists = [List]}]} ->
+ List;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+get_lists(Config) ->
+ ct:comment("Requesting privacy lists"),
+ case send_recv(Config, #iq{type = get, sub_els = [#privacy_query{}]}) of
+ #iq{type = result, sub_els = [SubEl]} ->
+ SubEl;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+del_list(Config, Name) ->
+ case send_recv(
+ Config,
+ #iq{type = set, sub_els = [#privacy_query{
+ lists = [#privacy_list{
+ name = Name}]}]}) of
+ #iq{type = result, sub_els = []} ->
+ ct:comment("Receiving privacy list push"),
+ #iq{type = set, id = ID,
+ sub_els = [#privacy_query{lists = [#privacy_list{
+ name = Name}]}]} =
+ recv_iq(Config),
+ send(Config, #iq{type = result, id = ID}),
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+set_active(Config, Name) ->
+ ct:comment("Setting active privacy list ~s", [Name]),
+ case send_recv(
+ Config,
+ #iq{type = set, sub_els = [#privacy_query{active = Name}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+set_default(Config, Name) ->
+ ct:comment("Setting default privacy list ~s", [Name]),
+ case send_recv(
+ Config,
+ #iq{type = set, sub_els = [#privacy_query{default = Name}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+get_block(Config) ->
+ case send_recv(Config, #iq{type = get, sub_els = [#block_list{}]}) of
+ #iq{type = result, sub_els = [#block_list{items = JIDs}]} ->
+ lists:sort(JIDs);
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+set_block(Config, JIDs) ->
+ case send_recv(Config, #iq{type = set,
+ sub_els = [#block{items = JIDs}]}) of
+ #iq{type = result, sub_els = []} ->
+ {#iq{id = I1, sub_els = [#block{items = Items}]},
+ #iq{id = I2, sub_els = [#privacy_query{lists = Lists}]}} =
+ ?recv2(#iq{type = set, sub_els = [#block{}]},
+ #iq{type = set, sub_els = [#privacy_query{}]}),
+ send(Config, #iq{type = result, id = I1}),
+ send(Config, #iq{type = result, id = I2}),
+ ct:comment("Checking if all JIDs present in the push"),
+ true = lists:sort(JIDs) == lists:sort(Items),
+ ct:comment("Getting name of the corresponding privacy list"),
+ [#privacy_list{name = Name}] = Lists,
+ {ok, Name};
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+set_unblock(Config, JIDs) ->
+ ct:comment("Unblocking ~p", [JIDs]),
+ case send_recv(Config, #iq{type = set,
+ sub_els = [#unblock{items = JIDs}]}) of
+ #iq{type = result, sub_els = []} ->
+ {#iq{id = I1, sub_els = [#unblock{items = Items}]},
+ #iq{id = I2, sub_els = [#privacy_query{lists = Lists}]}} =
+ ?recv2(#iq{type = set, sub_els = [#unblock{}]},
+ #iq{type = set, sub_els = [#privacy_query{}]}),
+ send(Config, #iq{type = result, id = I1}),
+ send(Config, #iq{type = result, id = I2}),
+ ct:comment("Checking if all JIDs present in the push"),
+ true = lists:sort(JIDs) == lists:sort(Items),
+ ct:comment("Getting name of the corresponding privacy list"),
+ [#privacy_list{name = Name}] = Lists,
+ {ok, Name};
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+del_privacy(Config) ->
+ {U, S, _} = jid:tolower(my_jid(Config)),
+ ct:comment("Removing all privacy data"),
+ mod_privacy:remove_user(U, S),
+ Config.
+
+clean_up(Config) ->
+ del_privacy(del_roster(Config)).
+
+check_iq_blocked(Config, Reason) ->
+ PeerJID = ?config(peer, Config),
+ ct:comment("Checking if all IQs are blocked"),
+ lists:foreach(
+ fun(Type) ->
+ send(Config, #iq{type = Type, to = PeerJID})
+ end, [error, result]),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = error} = Err =
+ send_recv(Config, #iq{type = Type, to = PeerJID,
+ sub_els = [#ping{}]}),
+ #stanza_error{reason = Reason} = xmpp:get_error(Err)
+ end, [set, get]).
+
+check_message_blocked(Config, Reason) ->
+ PeerJID = ?config(peer, Config),
+ ct:comment("Checking if all messages are blocked"),
+ %% TODO: do something with headline and groupchat.
+ %% The hack from 64d96778b452aad72349b21d2ac94e744617b07a
+ %% screws this up.
+ lists:foreach(
+ fun(Type) ->
+ send(Config, #message{type = Type, to = PeerJID})
+ end, [error]),
+ lists:foreach(
+ fun(Type) ->
+ #message{type = error} = Err =
+ send_recv(Config, #message{type = Type, to = PeerJID}),
+ #stanza_error{reason = Reason} = xmpp:get_error(Err)
+ end, [chat, normal]).
+
+check_presence_blocked(Config, Reason) ->
+ PeerJID = ?config(peer, Config),
+ ct:comment("Checking if all presences are blocked"),
+ lists:foreach(
+ fun(Type) ->
+ #presence{type = error} = Err =
+ send_recv(Config, #presence{type = Type, to = PeerJID}),
+ #stanza_error{reason = Reason} = xmpp:get_error(Err)
+ end, [available, unavailable]).
+
+check_other_blocked(Config, Reason) ->
+ PeerJID = ?config(peer, Config),
+ ct:comment("Checking if subscriptions and presence-errors are blocked"),
+ send(Config, #presence{type = error, to = PeerJID}),
+ lists:foreach(
+ fun(Type) ->
+ #presence{type = error} = Err =
+ send_recv(Config, #presence{type = Type, to = PeerJID}),
+ #stanza_error{reason = Reason} = xmpp:get_error(Err)
+ end, [subscribe, subscribed, unsubscribe, unsubscribed]).
+
+send_presences(Config) ->
+ PeerJID = ?config(peer, Config),
+ ct:comment("Sending all types of presences to the peer"),
+ lists:foreach(
+ fun(Type) ->
+ send(Config, #presence{type = Type, to = PeerJID})
+ end, [available, unavailable]).
+
+send_iqs(Config) ->
+ PeerJID = ?config(peer, Config),
+ ct:comment("Sending all types of IQs to the peer"),
+ lists:foreach(
+ fun(Type) ->
+ send(Config, #iq{type = Type, to = PeerJID})
+ end, [set, get, error, result]).
+
+send_messages(Config) ->
+ PeerJID = ?config(peer, Config),
+ ct:comment("Sending all types of messages to the peer"),
+ lists:foreach(
+ fun(Type) ->
+ send(Config, #message{type = Type, to = PeerJID})
+ end, [chat, error, groupchat, headline, normal]).
+
+recv_presences(Config) ->
+ PeerJID = ?config(peer, Config),
+ lists:foreach(
+ fun(Type) ->
+ #presence{type = Type, from = PeerJID} =
+ recv_presence(Config)
+ end, [available, unavailable]).
+
+recv_iqs(Config) ->
+ PeerJID = ?config(peer, Config),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = Type, from = PeerJID} = recv_iq(Config)
+ end, [set, get, error, result]).
+
+recv_messages(Config) ->
+ PeerJID = ?config(peer, Config),
+ lists:foreach(
+ fun(Type) ->
+ #message{type = Type, from = PeerJID} = recv_message(Config)
+ end, [chat, error, groupchat, headline, normal]).
+
+match_all(Opts) ->
+ IQ = proplists:get_bool(iq, Opts),
+ Message = proplists:get_bool(message, Opts),
+ PresenceIn = proplists:get_bool(presence_in, Opts),
+ PresenceOut = proplists:get_bool(presence_out, Opts),
+ not (IQ or Message or PresenceIn or PresenceOut).
+
+is_message_in_blocked(Opts) ->
+ proplists:get_bool(message, Opts) or match_all(Opts).
+
+is_message_out_blocked(Opts) ->
+ match_all(Opts).
+
+is_iq_in_blocked(Opts) ->
+ proplists:get_bool(iq, Opts) or match_all(Opts).
+
+is_iq_out_blocked(Opts) ->
+ match_all(Opts).
+
+is_presence_in_blocked(Opts) ->
+ proplists:get_bool(presence_in, Opts) or match_all(Opts).
+
+is_presence_out_blocked(Opts) ->
+ proplists:get_bool(presence_out, Opts) or match_all(Opts).
+
+is_other_blocked(Opts) ->
+ %% 'other' means subscriptions and presence-errors
+ match_all(Opts).
+
+server_send_iqs(Config) ->
+ ServerJID = server_jid(Config),
+ MyJID = my_jid(Config),
+ ct:comment("Sending IQs from ~s to ~s",
+ [jid:to_string(ServerJID), jid:to_string(MyJID)]),
+ lists:foreach(
+ fun(Type) ->
+ ejabberd_router:route(
+ ServerJID, MyJID, #iq{type = Type})
+ end, [error, result]),
+ lists:foreach(
+ fun(Type) ->
+ ejabberd_local:route_iq(
+ ServerJID, MyJID, #iq{type = Type},
+ fun(#iq{type = result, sub_els = []}) -> ok;
+ (IQ) -> ct:fail({unexpected_iq_result, IQ})
+ end)
+ end, [set, get]).
+
+server_recv_iqs(Config) ->
+ ServerJID = server_jid(Config),
+ ct:comment("Receiving IQs from ~s", [jid:to_string(ServerJID)]),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = Type, from = ServerJID} = recv_iq(Config)
+ end, [error, result]),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = Type, from = ServerJID, id = I} = recv_iq(Config),
+ send(Config, #iq{to = ServerJID, type = result, id = I})
+ end, [set, get]).
+
+send_stanzas_to_server_resource(Config) ->
+ ServerJID = server_jid(Config),
+ ServerJIDResource = jid:replace_resource(ServerJID, <<"resource">>),
+ %% All stanzas sent should be handled by local_send_to_resource_hook
+ %% and should be bounced with item-not-found error
+ ct:comment("Sending IQs to ~s", [jid:to_string(ServerJIDResource)]),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = error} = Err =
+ send_recv(Config, #iq{type = Type, to = ServerJIDResource}),
+ #stanza_error{reason = 'item-not-found'} = xmpp:get_error(Err)
+ end, [set, get]),
+ ct:comment("Sending messages to ~s", [jid:to_string(ServerJIDResource)]),
+ lists:foreach(
+ fun(Type) ->
+ #message{type = error} = Err =
+ send_recv(Config, #message{type = Type, to = ServerJIDResource}),
+ #stanza_error{reason = 'item-not-found'} = xmpp:get_error(Err)
+ end, [normal, chat, groupchat, headline]),
+ ct:comment("Sending presences to ~s", [jid:to_string(ServerJIDResource)]),
+ lists:foreach(
+ fun(Type) ->
+ #presence{type = error} = Err =
+ send_recv(Config, #presence{type = Type, to = ServerJIDResource}),
+ #stanza_error{reason = 'item-not-found'} = xmpp:get_error(Err)
+ end, [available, unavailable]).
diff --git a/test/proxy65_tests.erl b/test/proxy65_tests.erl
new file mode 100644
index 000000000..49e195d38
--- /dev/null
+++ b/test/proxy65_tests.erl
@@ -0,0 +1,113 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(proxy65_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [disconnect/1, is_feature_advertised/3, proxy_jid/1,
+ my_jid/1, wait_for_slave/1, wait_for_master/1,
+ send_recv/2, put_event/2, get_event/1]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {proxy65_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(service_vcard)]}.
+
+feature_enabled(Config) ->
+ true = is_feature_advertised(Config, ?NS_BYTESTREAMS, proxy_jid(Config)),
+ disconnect(Config).
+
+service_vcard(Config) ->
+ JID = proxy_jid(Config),
+ ct:comment("Retreiving vCard from ~s", [jid:to_string(JID)]),
+ #iq{type = result, sub_els = [#vcard_temp{}]} =
+ send_recv(Config, #iq{type = get, to = JID, sub_els = [#vcard_temp{}]}),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {proxy65_master_slave, [sequence],
+ [master_slave_test(all)]}.
+
+all_master(Config) ->
+ Proxy = proxy_jid(Config),
+ MyJID = my_jid(Config),
+ Peer = ?config(slave, Config),
+ wait_for_slave(Config),
+ #presence{} = send_recv(Config, #presence{}),
+ #iq{type = result, sub_els = [#bytestreams{hosts = [StreamHost]}]} =
+ send_recv(
+ Config,
+ #iq{type = get, sub_els = [#bytestreams{}], to = Proxy}),
+ SID = randoms:get_string(),
+ Data = randoms:bytes(1024),
+ put_event(Config, {StreamHost, SID, Data}),
+ Socks5 = socks5_connect(StreamHost, {SID, MyJID, Peer}),
+ wait_for_slave(Config),
+ #iq{type = result, sub_els = []} =
+ send_recv(Config,
+ #iq{type = set, to = Proxy,
+ sub_els = [#bytestreams{activate = Peer, sid = SID}]}),
+ socks5_send(Socks5, Data),
+ disconnect(Config).
+
+all_slave(Config) ->
+ MyJID = my_jid(Config),
+ Peer = ?config(master, Config),
+ #presence{} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ {StreamHost, SID, Data} = get_event(Config),
+ Socks5 = socks5_connect(StreamHost, {SID, Peer, MyJID}),
+ wait_for_master(Config),
+ socks5_recv(Socks5, Data),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("proxy65_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("proxy65_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("proxy65_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("proxy65_" ++ atom_to_list(T) ++ "_slave")]}.
+
+socks5_connect(#streamhost{host = Host, port = Port},
+ {SID, JID1, JID2}) ->
+ Hash = p1_sha:sha([SID, jid:to_string(JID1), jid:to_string(JID2)]),
+ {ok, Sock} = gen_tcp:connect(binary_to_list(Host), Port,
+ [binary, {active, false}]),
+ Init = <<?VERSION_5, 1, ?AUTH_ANONYMOUS>>,
+ InitAck = <<?VERSION_5, ?AUTH_ANONYMOUS>>,
+ Req = <<?VERSION_5, ?CMD_CONNECT, 0,
+ ?ATYP_DOMAINNAME, 40, Hash:40/binary, 0, 0>>,
+ Resp = <<?VERSION_5, ?SUCCESS, 0, ?ATYP_DOMAINNAME,
+ 40, Hash:40/binary, 0, 0>>,
+ gen_tcp:send(Sock, Init),
+ {ok, InitAck} = gen_tcp:recv(Sock, size(InitAck)),
+ gen_tcp:send(Sock, Req),
+ {ok, Resp} = gen_tcp:recv(Sock, size(Resp)),
+ Sock.
+
+socks5_send(Sock, Data) ->
+ ok = gen_tcp:send(Sock, Data).
+
+socks5_recv(Sock, Data) ->
+ {ok, Data} = gen_tcp:recv(Sock, size(Data)).
diff --git a/test/pubsub_tests.erl b/test/pubsub_tests.erl
new file mode 100644
index 000000000..daffc29ec
--- /dev/null
+++ b/test/pubsub_tests.erl
@@ -0,0 +1,737 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(pubsub_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [pubsub_jid/1, send_recv/2, get_features/2, disconnect/1,
+ put_event/2, get_event/1, wait_for_master/1, wait_for_slave/1,
+ recv_message/1, my_jid/1, send/2, recv_presence/1, recv/1]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {pubsub_single, [sequence],
+ [single_test(test_features),
+ single_test(test_vcard),
+ single_test(test_create),
+ single_test(test_configure),
+ single_test(test_delete),
+ single_test(test_get_affiliations),
+ single_test(test_get_subscriptions),
+ single_test(test_create_instant),
+ single_test(test_default),
+ single_test(test_create_configure),
+ single_test(test_publish),
+ single_test(test_auto_create),
+ single_test(test_get_items),
+ single_test(test_delete_item),
+ single_test(test_purge),
+ single_test(test_subscribe),
+ single_test(test_unsubscribe)]}.
+
+test_features(Config) ->
+ PJID = pubsub_jid(Config),
+ AllFeatures = sets:from_list(get_features(Config, PJID)),
+ NeededFeatures = sets:from_list(
+ [?NS_PUBSUB,
+ ?PUBSUB("access-open"),
+ ?PUBSUB("access-authorize"),
+ ?PUBSUB("create-nodes"),
+ ?PUBSUB("instant-nodes"),
+ ?PUBSUB("config-node"),
+ ?PUBSUB("retrieve-default"),
+ ?PUBSUB("create-and-configure"),
+ ?PUBSUB("publish"),
+ ?PUBSUB("auto-create"),
+ ?PUBSUB("retrieve-items"),
+ ?PUBSUB("delete-items"),
+ ?PUBSUB("subscribe"),
+ ?PUBSUB("retrieve-affiliations"),
+ ?PUBSUB("modify-affiliations"),
+ ?PUBSUB("retrieve-subscriptions"),
+ ?PUBSUB("manage-subscriptions"),
+ ?PUBSUB("purge-nodes"),
+ ?PUBSUB("delete-nodes")]),
+ true = sets:is_subset(NeededFeatures, AllFeatures),
+ disconnect(Config).
+
+test_vcard(Config) ->
+ JID = pubsub_jid(Config),
+ ct:comment("Retreiving vCard from ~s", [jid:to_string(JID)]),
+ #iq{type = result, sub_els = [#vcard_temp{}]} =
+ send_recv(Config, #iq{type = get, to = JID, sub_els = [#vcard_temp{}]}),
+ disconnect(Config).
+
+test_create(Config) ->
+ Node = ?config(pubsub_node, Config),
+ Node = create_node(Config, Node),
+ disconnect(Config).
+
+test_create_instant(Config) ->
+ Node = create_node(Config, <<>>),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_configure(Config) ->
+ Node = ?config(pubsub_node, Config),
+ NodeTitle = ?config(pubsub_node_title, Config),
+ NodeConfig = get_node_config(Config, Node),
+ MyNodeConfig = set_opts(NodeConfig,
+ [{title, NodeTitle}]),
+ set_node_config(Config, Node, MyNodeConfig),
+ NewNodeConfig = get_node_config(Config, Node),
+ NodeTitle = proplists:get_value(title, NewNodeConfig),
+ disconnect(Config).
+
+test_default(Config) ->
+ get_default_node_config(Config),
+ disconnect(Config).
+
+test_create_configure(Config) ->
+ NodeTitle = ?config(pubsub_node_title, Config),
+ DefaultNodeConfig = get_default_node_config(Config),
+ CustomNodeConfig = set_opts(DefaultNodeConfig,
+ [{title, NodeTitle}]),
+ Node = create_node(Config, <<>>, CustomNodeConfig),
+ NodeConfig = get_node_config(Config, Node),
+ NodeTitle = proplists:get_value(title, NodeConfig),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_publish(Config) ->
+ Node = create_node(Config, <<>>),
+ publish_item(Config, Node),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_auto_create(Config) ->
+ Node = randoms:get_string(),
+ publish_item(Config, Node),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_get_items(Config) ->
+ Node = create_node(Config, <<>>),
+ ItemsIn = [publish_item(Config, Node) || _ <- lists:seq(1, 5)],
+ ItemsOut = get_items(Config, Node),
+ true = [I || #ps_item{id = I} <- lists:sort(ItemsIn)]
+ == [I || #ps_item{id = I} <- lists:sort(ItemsOut)],
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_delete_item(Config) ->
+ Node = create_node(Config, <<>>),
+ #ps_item{id = I} = publish_item(Config, Node),
+ [#ps_item{id = I}] = get_items(Config, Node),
+ delete_item(Config, Node, I),
+ [] = get_items(Config, Node),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_subscribe(Config) ->
+ Node = create_node(Config, <<>>),
+ #ps_subscription{type = subscribed} = subscribe_node(Config, Node),
+ [#ps_subscription{node = Node}] = get_subscriptions(Config),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_unsubscribe(Config) ->
+ Node = create_node(Config, <<>>),
+ subscribe_node(Config, Node),
+ [#ps_subscription{node = Node}] = get_subscriptions(Config),
+ unsubscribe_node(Config, Node),
+ [] = get_subscriptions(Config),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_get_affiliations(Config) ->
+ Nodes = lists:sort([create_node(Config, <<>>) || _ <- lists:seq(1, 5)]),
+ Affs = get_affiliations(Config),
+ Nodes = lists:sort([Node || #ps_affiliation{node = Node,
+ type = owner} <- Affs]),
+ [delete_node(Config, Node) || Node <- Nodes],
+ disconnect(Config).
+
+test_get_subscriptions(Config) ->
+ Nodes = lists:sort([create_node(Config, <<>>) || _ <- lists:seq(1, 5)]),
+ [subscribe_node(Config, Node) || Node <- Nodes],
+ Subs = get_subscriptions(Config),
+ Nodes = lists:sort([Node || #ps_subscription{node = Node} <- Subs]),
+ [delete_node(Config, Node) || Node <- Nodes],
+ disconnect(Config).
+
+test_purge(Config) ->
+ Node = create_node(Config, <<>>),
+ ItemsIn = [publish_item(Config, Node) || _ <- lists:seq(1, 5)],
+ ItemsOut = get_items(Config, Node),
+ true = [I || #ps_item{id = I} <- lists:sort(ItemsIn)]
+ == [I || #ps_item{id = I} <- lists:sort(ItemsOut)],
+ purge_node(Config, Node),
+ [] = get_items(Config, Node),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+test_delete(Config) ->
+ Node = ?config(pubsub_node, Config),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {pubsub_master_slave, [sequence],
+ [master_slave_test(publish),
+ master_slave_test(subscriptions),
+ master_slave_test(affiliations),
+ master_slave_test(authorize)]}.
+
+publish_master(Config) ->
+ Node = create_node(Config, <<>>),
+ put_event(Config, Node),
+ wait_for_slave(Config),
+ #ps_item{id = ID} = publish_item(Config, Node),
+ #ps_item{id = ID} = get_event(Config),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+publish_slave(Config) ->
+ Node = get_event(Config),
+ subscribe_node(Config, Node),
+ wait_for_master(Config),
+ #message{
+ sub_els =
+ [#ps_event{
+ items = #ps_items{node = Node,
+ items = [Item]}}]} = recv_message(Config),
+ put_event(Config, Item),
+ disconnect(Config).
+
+subscriptions_master(Config) ->
+ Peer = ?config(slave, Config),
+ Node = ?config(pubsub_node, Config),
+ Node = create_node(Config, Node),
+ [] = get_subscriptions(Config, Node),
+ wait_for_slave(Config),
+ lists:foreach(
+ fun(Type) ->
+ ok = set_subscriptions(Config, Node, [{Peer, Type}]),
+ #ps_item{} = publish_item(Config, Node),
+ case get_subscriptions(Config, Node) of
+ [] when Type == none; Type == pending ->
+ ok;
+ [#ps_subscription{jid = Peer, type = Type}] ->
+ ok
+ end
+ end, [subscribed, unconfigured, pending, none]),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+subscriptions_slave(Config) ->
+ wait_for_master(Config),
+ MyJID = my_jid(Config),
+ Node = ?config(pubsub_node, Config),
+ lists:foreach(
+ fun(subscribed = Type) ->
+ ?recv2(#message{
+ sub_els =
+ [#ps_event{
+ subscription = #ps_subscription{
+ node = Node,
+ jid = MyJID,
+ type = Type}}]},
+ #message{sub_els = [#ps_event{}]});
+ (Type) ->
+ #message{
+ sub_els =
+ [#ps_event{
+ subscription = #ps_subscription{
+ node = Node,
+ jid = MyJID,
+ type = Type}}]} =
+ recv_message(Config)
+ end, [subscribed, unconfigured, pending, none]),
+ disconnect(Config).
+
+affiliations_master(Config) ->
+ Peer = ?config(slave, Config),
+ BarePeer = jid:remove_resource(Peer),
+ lists:foreach(
+ fun(Aff) ->
+ Node = <<(atom_to_binary(Aff, utf8))/binary,
+ $-, (randoms:get_string())/binary>>,
+ create_node(Config, Node, default_node_config(Config)),
+ #ps_item{id = I} = publish_item(Config, Node),
+ ok = set_affiliations(Config, Node, [{Peer, Aff}]),
+ Affs = get_affiliations(Config, Node),
+ case lists:keyfind(BarePeer, #ps_affiliation.jid, Affs) of
+ false when Aff == none ->
+ ok;
+ #ps_affiliation{type = Aff} ->
+ ok
+ end,
+ put_event(Config, {Aff, Node, I}),
+ wait_for_slave(Config),
+ delete_node(Config, Node)
+ end, [outcast, none, member, publish_only, publisher, owner]),
+ put_event(Config, disconnect),
+ disconnect(Config).
+
+affiliations_slave(Config) ->
+ affiliations_slave(Config, get_event(Config)).
+
+affiliations_slave(Config, {outcast, Node, ItemID}) ->
+ #stanza_error{reason = 'forbidden'} = subscribe_node(Config, Node),
+ #stanza_error{} = unsubscribe_node(Config, Node),
+ #stanza_error{reason = 'forbidden'} = get_items(Config, Node),
+ #stanza_error{reason = 'forbidden'} = publish_item(Config, Node),
+ #stanza_error{reason = 'forbidden'} = delete_item(Config, Node, ItemID),
+ #stanza_error{reason = 'forbidden'} = purge_node(Config, Node),
+ #stanza_error{reason = 'forbidden'} = get_node_config(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_node_config(Config, Node, default_node_config(Config)),
+ #stanza_error{reason = 'forbidden'} = get_subscriptions(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_subscriptions(Config, Node, [{my_jid(Config), subscribed}]),
+ #stanza_error{reason = 'forbidden'} = get_affiliations(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_affiliations(Config, Node, [{?config(master, Config), outcast},
+ {my_jid(Config), owner}]),
+ #stanza_error{reason = 'forbidden'} = delete_node(Config, Node),
+ wait_for_master(Config),
+ affiliations_slave(Config, get_event(Config));
+affiliations_slave(Config, {none, Node, ItemID}) ->
+ #ps_subscription{type = subscribed} = subscribe_node(Config, Node),
+ ok = unsubscribe_node(Config, Node),
+ %% This violates the affiliation char from section 4.1
+ [_|_] = get_items(Config, Node),
+ #stanza_error{reason = 'forbidden'} = publish_item(Config, Node),
+ #stanza_error{reason = 'forbidden'} = delete_item(Config, Node, ItemID),
+ #stanza_error{reason = 'forbidden'} = purge_node(Config, Node),
+ #stanza_error{reason = 'forbidden'} = get_node_config(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_node_config(Config, Node, default_node_config(Config)),
+ #stanza_error{reason = 'forbidden'} = get_subscriptions(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_subscriptions(Config, Node, [{my_jid(Config), subscribed}]),
+ #stanza_error{reason = 'forbidden'} = get_affiliations(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_affiliations(Config, Node, [{?config(master, Config), outcast},
+ {my_jid(Config), owner}]),
+ #stanza_error{reason = 'forbidden'} = delete_node(Config, Node),
+ wait_for_master(Config),
+ affiliations_slave(Config, get_event(Config));
+affiliations_slave(Config, {member, Node, ItemID}) ->
+ #ps_subscription{type = subscribed} = subscribe_node(Config, Node),
+ ok = unsubscribe_node(Config, Node),
+ [_|_] = get_items(Config, Node),
+ #stanza_error{reason = 'forbidden'} = publish_item(Config, Node),
+ #stanza_error{reason = 'forbidden'} = delete_item(Config, Node, ItemID),
+ #stanza_error{reason = 'forbidden'} = purge_node(Config, Node),
+ #stanza_error{reason = 'forbidden'} = get_node_config(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_node_config(Config, Node, default_node_config(Config)),
+ #stanza_error{reason = 'forbidden'} = get_subscriptions(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_subscriptions(Config, Node, [{my_jid(Config), subscribed}]),
+ #stanza_error{reason = 'forbidden'} = get_affiliations(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_affiliations(Config, Node, [{?config(master, Config), outcast},
+ {my_jid(Config), owner}]),
+ #stanza_error{reason = 'forbidden'} = delete_node(Config, Node),
+ wait_for_master(Config),
+ affiliations_slave(Config, get_event(Config));
+affiliations_slave(Config, {publish_only, Node, ItemID}) ->
+ #stanza_error{reason = 'forbidden'} = subscribe_node(Config, Node),
+ #stanza_error{} = unsubscribe_node(Config, Node),
+ #stanza_error{reason = 'forbidden'} = get_items(Config, Node),
+ #ps_item{id = _MyItemID} = publish_item(Config, Node),
+ %% BUG: This should be fixed
+ %% ?match(ok, delete_item(Config, Node, MyItemID)),
+ #stanza_error{reason = 'forbidden'} = delete_item(Config, Node, ItemID),
+ #stanza_error{reason = 'forbidden'} = purge_node(Config, Node),
+ #stanza_error{reason = 'forbidden'} = get_node_config(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_node_config(Config, Node, default_node_config(Config)),
+ #stanza_error{reason = 'forbidden'} = get_subscriptions(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_subscriptions(Config, Node, [{my_jid(Config), subscribed}]),
+ #stanza_error{reason = 'forbidden'} = get_affiliations(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_affiliations(Config, Node, [{?config(master, Config), outcast},
+ {my_jid(Config), owner}]),
+ #stanza_error{reason = 'forbidden'} = delete_node(Config, Node),
+ wait_for_master(Config),
+ affiliations_slave(Config, get_event(Config));
+affiliations_slave(Config, {publisher, Node, _ItemID}) ->
+ #ps_subscription{type = subscribed} = subscribe_node(Config, Node),
+ ok = unsubscribe_node(Config, Node),
+ [_|_] = get_items(Config, Node),
+ #ps_item{id = MyItemID} = publish_item(Config, Node),
+ ok = delete_item(Config, Node, MyItemID),
+ %% BUG: this should be fixed
+ %% #stanza_error{reason = 'forbidden'} = delete_item(Config, Node, ItemID),
+ #stanza_error{reason = 'forbidden'} = purge_node(Config, Node),
+ #stanza_error{reason = 'forbidden'} = get_node_config(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_node_config(Config, Node, default_node_config(Config)),
+ #stanza_error{reason = 'forbidden'} = get_subscriptions(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_subscriptions(Config, Node, [{my_jid(Config), subscribed}]),
+ #stanza_error{reason = 'forbidden'} = get_affiliations(Config, Node),
+ #stanza_error{reason = 'forbidden'} =
+ set_affiliations(Config, Node, [{?config(master, Config), outcast},
+ {my_jid(Config), owner}]),
+ #stanza_error{reason = 'forbidden'} = delete_node(Config, Node),
+ wait_for_master(Config),
+ affiliations_slave(Config, get_event(Config));
+affiliations_slave(Config, {owner, Node, ItemID}) ->
+ MyJID = my_jid(Config),
+ Peer = ?config(master, Config),
+ #ps_subscription{type = subscribed} = subscribe_node(Config, Node),
+ ok = unsubscribe_node(Config, Node),
+ [_|_] = get_items(Config, Node),
+ #ps_item{id = MyItemID} = publish_item(Config, Node),
+ ok = delete_item(Config, Node, MyItemID),
+ ok = delete_item(Config, Node, ItemID),
+ ok = purge_node(Config, Node),
+ [_|_] = get_node_config(Config, Node),
+ ok = set_node_config(Config, Node, default_node_config(Config)),
+ ok = set_subscriptions(Config, Node, []),
+ [] = get_subscriptions(Config, Node),
+ ok = set_affiliations(Config, Node, [{Peer, outcast}, {MyJID, owner}]),
+ [_, _] = get_affiliations(Config, Node),
+ ok = delete_node(Config, Node),
+ wait_for_master(Config),
+ affiliations_slave(Config, get_event(Config));
+affiliations_slave(Config, disconnect) ->
+ disconnect(Config).
+
+authorize_master(Config) ->
+ send(Config, #presence{}),
+ #presence{} = recv_presence(Config),
+ Peer = ?config(slave, Config),
+ PJID = pubsub_jid(Config),
+ NodeConfig = set_opts(default_node_config(Config),
+ [{access_model, authorize}]),
+ Node = ?config(pubsub_node, Config),
+ Node = create_node(Config, Node, NodeConfig),
+ wait_for_slave(Config),
+ #message{sub_els = [#xdata{fields = F1}]} = recv_message(Config),
+ C1 = pubsub_subscribe_authorization:decode(F1),
+ Node = proplists:get_value(node, C1),
+ Peer = proplists:get_value(subscriber_jid, C1),
+ %% Deny it at first
+ Deny = #xdata{type = submit,
+ fields = pubsub_subscribe_authorization:encode(
+ [{node, Node},
+ {subscriber_jid, Peer},
+ {allow, false}])},
+ send(Config, #message{to = PJID, sub_els = [Deny]}),
+ %% We should not have any subscriptions
+ [] = get_subscriptions(Config, Node),
+ wait_for_slave(Config),
+ #message{sub_els = [#xdata{fields = F2}]} = recv_message(Config),
+ C2 = pubsub_subscribe_authorization:decode(F2),
+ Node = proplists:get_value(node, C2),
+ Peer = proplists:get_value(subscriber_jid, C2),
+ %% Now we accept is as the peer is very insisting ;)
+ Approve = #xdata{type = submit,
+ fields = pubsub_subscribe_authorization:encode(
+ [{node, Node},
+ {subscriber_jid, Peer},
+ {allow, true}])},
+ send(Config, #message{to = PJID, sub_els = [Approve]}),
+ wait_for_slave(Config),
+ delete_node(Config, Node),
+ disconnect(Config).
+
+authorize_slave(Config) ->
+ Node = ?config(pubsub_node, Config),
+ MyJID = my_jid(Config),
+ wait_for_master(Config),
+ #ps_subscription{type = pending} = subscribe_node(Config, Node),
+ %% We're denied at first
+ #message{
+ sub_els =
+ [#ps_event{
+ subscription = #ps_subscription{type = none,
+ jid = MyJID}}]} =
+ recv_message(Config),
+ wait_for_master(Config),
+ #ps_subscription{type = pending} = subscribe_node(Config, Node),
+ %% Now much better!
+ #message{
+ sub_els =
+ [#ps_event{
+ subscription = #ps_subscription{type = subscribed,
+ jid = MyJID}}]} =
+ recv_message(Config),
+ wait_for_master(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("pubsub_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("pubsub_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("pubsub_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("pubsub_" ++ atom_to_list(T) ++ "_slave")]}.
+
+set_opts(Config, Options) ->
+ lists:foldl(
+ fun({Opt, Val}, Acc) ->
+ lists:keystore(Opt, 1, Acc, {Opt, Val})
+ end, Config, Options).
+
+create_node(Config, Node) ->
+ create_node(Config, Node, undefined).
+
+create_node(Config, Node, Options) ->
+ PJID = pubsub_jid(Config),
+ NodeConfig = if is_list(Options) ->
+ #xdata{type = submit,
+ fields = pubsub_node_config:encode(Options)};
+ true ->
+ undefined
+ end,
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub{create = Node,
+ configure = {<<>>, NodeConfig}}]}) of
+ #iq{type = result, sub_els = [#pubsub{create = NewNode}]} ->
+ NewNode;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+delete_node(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub_owner{delete = {Node, <<>>}}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+purge_node(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub_owner{purge = Node}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+get_default_node_config(Config) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = PJID,
+ sub_els = [#pubsub_owner{default = {<<>>, undefined}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub_owner{default = {<<>>, NodeConfig}}]} ->
+ pubsub_node_config:decode(NodeConfig#xdata.fields);
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+get_node_config(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = PJID,
+ sub_els = [#pubsub_owner{configure = {Node, undefined}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub_owner{configure = {Node, NodeConfig}}]} ->
+ pubsub_node_config:decode(NodeConfig#xdata.fields);
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+set_node_config(Config, Node, Options) ->
+ PJID = pubsub_jid(Config),
+ NodeConfig = #xdata{type = submit,
+ fields = pubsub_node_config:encode(Options)},
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub_owner{configure =
+ {Node, NodeConfig}}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+publish_item(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ ItemID = randoms:get_string(),
+ Item = #ps_item{id = ItemID, xml_els = [xmpp:encode(#presence{id = ItemID})]},
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub{publish = #ps_publish{
+ node = Node,
+ items = [Item]}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub{publish = #ps_publish{
+ node = Node,
+ items = [#ps_item{id = ItemID}]}}]} ->
+ Item;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+get_items(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = PJID,
+ sub_els = [#pubsub{items = #ps_items{node = Node}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub{items = #ps_items{node = Node, items = Items}}]} ->
+ Items;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+delete_item(Config, Node, I) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub{retract =
+ #ps_retract{
+ node = Node,
+ items = [#ps_item{id = I}]}}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+subscribe_node(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ MyJID = my_jid(Config),
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub{subscribe = #ps_subscribe{
+ node = Node,
+ jid = MyJID}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub{
+ subscription = #ps_subscription{
+ node = Node,
+ jid = MyJID} = Sub}]} ->
+ Sub;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+unsubscribe_node(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ MyJID = my_jid(Config),
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub{
+ unsubscribe = #ps_unsubscribe{
+ node = Node,
+ jid = MyJID}}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+get_affiliations(Config) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = PJID,
+ sub_els = [#pubsub{affiliations = {<<>>, []}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub{affiliations = {<<>>, Affs}}]} ->
+ Affs;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+get_affiliations(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = PJID,
+ sub_els = [#pubsub_owner{affiliations = {Node, []}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub_owner{affiliations = {Node, Affs}}]} ->
+ Affs;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+set_affiliations(Config, Node, JTs) ->
+ PJID = pubsub_jid(Config),
+ Affs = [#ps_affiliation{jid = J, type = T} || {J, T} <- JTs],
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub_owner{affiliations =
+ {Node, Affs}}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+get_subscriptions(Config) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = PJID,
+ sub_els = [#pubsub{subscriptions = {<<>>, []}}]}) of
+ #iq{type = result, sub_els = [#pubsub{subscriptions = {<<>>, Subs}}]} ->
+ Subs;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+get_subscriptions(Config, Node) ->
+ PJID = pubsub_jid(Config),
+ case send_recv(Config,
+ #iq{type = get, to = PJID,
+ sub_els = [#pubsub_owner{subscriptions = {Node, []}}]}) of
+ #iq{type = result,
+ sub_els = [#pubsub_owner{subscriptions = {Node, Subs}}]} ->
+ Subs;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+set_subscriptions(Config, Node, JTs) ->
+ PJID = pubsub_jid(Config),
+ Subs = [#ps_subscription{jid = J, type = T} || {J, T} <- JTs],
+ case send_recv(Config,
+ #iq{type = set, to = PJID,
+ sub_els = [#pubsub_owner{subscriptions =
+ {Node, Subs}}]}) of
+ #iq{type = result, sub_els = []} ->
+ ok;
+ #iq{type = error} = IQ ->
+ xmpp:get_subtag(IQ, #stanza_error{})
+ end.
+
+default_node_config(Config) ->
+ [{title, ?config(pubsub_node_title, Config)},
+ {notify_delete, false},
+ {send_last_published_item, never}].
diff --git a/test/replaced_tests.erl b/test/replaced_tests.erl
new file mode 100644
index 000000000..e50c27f05
--- /dev/null
+++ b/test/replaced_tests.erl
@@ -0,0 +1,57 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(replaced_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [bind/1, wait_for_slave/1, wait_for_master/1, recv/1,
+ close_socket/1, disconnect/1]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {replaced_single, [sequence], []}.
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {replaced_master_slave, [sequence], []}.
+%% Disable tests for now due to a race condition
+%% because ejabberd_sm:sid() is generated in ejabberd_s2s:init()
+%%[master_slave_test(conflict)]}.
+
+conflict_master(Config0) ->
+ Config = bind(Config0),
+ wait_for_slave(Config),
+ #stream_error{reason = conflict} = recv(Config),
+ {xmlstreamend, <<"stream:stream">>} = recv(Config),
+ close_socket(Config).
+
+conflict_slave(Config0) ->
+ wait_for_master(Config0),
+ Config = bind(Config0),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("replaced_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("replaced_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("replaced_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("replaced_" ++ atom_to_list(T) ++ "_slave")]}.
diff --git a/test/roster_tests.erl b/test/roster_tests.erl
new file mode 100644
index 000000000..4aa06b953
--- /dev/null
+++ b/test/roster_tests.erl
@@ -0,0 +1,527 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 22 Oct 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(roster_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [send_recv/2, recv_iq/1, send/2, disconnect/1, del_roster/1,
+ del_roster/2, make_iq_result/1, wait_for_slave/1,
+ wait_for_master/1, recv_presence/1, self_presence/2,
+ put_event/2, get_event/1, match_failure/2, get_roster/1]).
+-include("suite.hrl").
+-include("mod_roster.hrl").
+
+-record(state, {subscription = none :: none | from | to | both,
+ peer_available = false,
+ pending_in = false :: boolean(),
+ pending_out = false :: boolean()}).
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+init(_TestCase, Config) ->
+ Config.
+
+stop(_TestCase, Config) ->
+ Config.
+
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {roster_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(iq_set_many_items),
+ single_test(iq_set_duplicated_groups),
+ single_test(iq_get_item),
+ single_test(iq_unexpected_element),
+ single_test(iq_set_ask),
+ single_test(set_item),
+ single_test(version)]}.
+
+feature_enabled(Config) ->
+ ct:comment("Checking if roster versioning stream feature is set"),
+ true = ?config(rosterver, Config),
+ disconnect(Config).
+
+set_item(Config) ->
+ JID = jid:from_string(<<"nurse@example.com">>),
+ Item = #roster_item{jid = JID},
+ {V1, Item} = set_items(Config, [Item]),
+ {V1, [Item]} = get_items(Config),
+ ItemWithGroups = Item#roster_item{groups = [<<"G1">>, <<"G2">>]},
+ {V2, ItemWithGroups} = set_items(Config, [ItemWithGroups]),
+ {V2, [ItemWithGroups]} = get_items(Config),
+ {V3, Item} = set_items(Config, [Item]),
+ {V3, [Item]} = get_items(Config),
+ ItemWithName = Item#roster_item{name = <<"some name">>},
+ {V4, ItemWithName} = set_items(Config, [ItemWithName]),
+ {V4, [ItemWithName]} = get_items(Config),
+ ItemRemoved = Item#roster_item{subscription = remove},
+ {V5, ItemRemoved} = set_items(Config, [ItemRemoved]),
+ {V5, []} = get_items(Config),
+ del_roster(disconnect(Config), JID).
+
+iq_set_many_items(Config) ->
+ J1 = jid:from_string(<<"nurse1@example.com">>),
+ J2 = jid:from_string(<<"nurse2@example.com">>),
+ ct:comment("Trying to send roster-set with many <item/> elements"),
+ Items = [#roster_item{jid = J1}, #roster_item{jid = J2}],
+ #stanza_error{reason = 'bad-request'} = set_items(Config, Items),
+ disconnect(Config).
+
+iq_set_duplicated_groups(Config) ->
+ JID = jid:from_string(<<"nurse@example.com">>),
+ G = randoms:get_string(),
+ ct:comment("Trying to send roster-set with duplicated groups"),
+ Item = #roster_item{jid = JID, groups = [G, G]},
+ #stanza_error{reason = 'bad-request'} = set_items(Config, [Item]),
+ disconnect(Config).
+
+iq_set_ask(Config) ->
+ JID = jid:from_string(<<"nurse@example.com">>),
+ ct:comment("Trying to send roster-set with 'ask' included"),
+ Item = #roster_item{jid = JID, ask = subscribe},
+ #stanza_error{reason = 'bad-request'} = set_items(Config, [Item]),
+ disconnect(Config).
+
+iq_get_item(Config) ->
+ JID = jid:from_string(<<"nurse@example.com">>),
+ ct:comment("Trying to send roster-get with <item/> element"),
+ #iq{type = error} = Err3 =
+ send_recv(Config, #iq{type = get,
+ sub_els = [#roster_query{
+ items = [#roster_item{jid = JID}]}]}),
+ #stanza_error{reason = 'bad-request'} = xmpp:get_error(Err3),
+ disconnect(Config).
+
+iq_unexpected_element(Config) ->
+ JID = jid:from_string(<<"nurse@example.com">>),
+ ct:comment("Trying to send IQs with unexpected element"),
+ lists:foreach(
+ fun(Type) ->
+ #iq{type = error} = Err4 =
+ send_recv(Config, #iq{type = Type,
+ sub_els = [#roster_item{jid = JID}]}),
+ #stanza_error{reason = 'service-unavailable'} = xmpp:get_error(Err4)
+ end, [get, set]),
+ disconnect(Config).
+
+version(Config) ->
+ JID = jid:from_string(<<"nurse@example.com">>),
+ ct:comment("Requesting roster"),
+ {InitialVersion, _} = get_items(Config, <<"">>),
+ ct:comment("Requesting roster with initial version"),
+ {empty, []} = get_items(Config, InitialVersion),
+ ct:comment("Adding JID to the roster"),
+ {NewVersion, _} = set_items(Config, [#roster_item{jid = JID}]),
+ ct:comment("Requesting roster with initial version"),
+ {NewVersion, _} = get_items(Config, InitialVersion),
+ ct:comment("Requesting roster with new version"),
+ {empty, []} = get_items(Config, NewVersion),
+ del_roster(disconnect(Config), JID).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {roster_master_slave, [sequence],
+ [master_slave_test(subscribe)]}.
+
+subscribe_master(Config) ->
+ Actions = actions(),
+ process_subscriptions_master(Config, Actions),
+ del_roster(disconnect(Config)).
+
+subscribe_slave(Config) ->
+ process_subscriptions_slave(Config),
+ del_roster(disconnect(Config)).
+
+process_subscriptions_master(Config, Actions) ->
+ EnumeratedActions = lists:zip(lists:seq(1, length(Actions)), Actions),
+ self_presence(Config, available),
+ lists:foldl(
+ fun({N, {Dir, Type}}, State) ->
+ timer:sleep(100),
+ if Dir == out -> put_event(Config, {N, in, Type});
+ Dir == in -> put_event(Config, {N, out, Type})
+ end,
+ wait_for_slave(Config),
+ ct:pal("Performing ~s-~s (#~p) "
+ "in state:~n~s~nwith roster:~n~s",
+ [Dir, Type, N, pp(State),
+ pp(get_roster(Config))]),
+ transition(Config, Dir, Type, State)
+ end, #state{}, EnumeratedActions),
+ put_event(Config, done),
+ wait_for_slave(Config),
+ Config.
+
+process_subscriptions_slave(Config) ->
+ self_presence(Config, available),
+ process_subscriptions_slave(Config, get_event(Config), #state{}).
+
+process_subscriptions_slave(Config, done, _State) ->
+ wait_for_master(Config),
+ Config;
+process_subscriptions_slave(Config, {N, Dir, Type}, State) ->
+ wait_for_master(Config),
+ ct:pal("Performing ~s-~s (#~p) "
+ "in state:~n~s~nwith roster:~n~s",
+ [Dir, Type, N, pp(State), pp(get_roster(Config))]),
+ NewState = transition(Config, Dir, Type, State),
+ process_subscriptions_slave(Config, get_event(Config), NewState).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("roster_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("roster_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("roster_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("roster_" ++ atom_to_list(T) ++ "_slave")]}.
+
+get_items(Config) ->
+ get_items(Config, <<"">>).
+
+get_items(Config, Version) ->
+ case send_recv(Config, #iq{type = get,
+ sub_els = [#roster_query{ver = Version}]}) of
+ #iq{type = result,
+ sub_els = [#roster_query{ver = NewVersion, items = Items}]} ->
+ {NewVersion, Items};
+ #iq{type = result, sub_els = []} ->
+ {empty, []};
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+get_item(Config, JID) ->
+ case get_items(Config) of
+ {_Ver, Items} when is_list(Items) ->
+ lists:keyfind(JID, #roster_item.jid, Items);
+ _ ->
+ false
+ end.
+
+set_items(Config, Items) ->
+ case send_recv(Config, #iq{type = set,
+ sub_els = [#roster_query{items = Items}]}) of
+ #iq{type = result, sub_els = []} ->
+ recv_push(Config);
+ #iq{type = error} = Err ->
+ xmpp:get_error(Err)
+ end.
+
+recv_push(Config) ->
+ ct:comment("Receiving roster push"),
+ Push = #iq{type = set,
+ sub_els = [#roster_query{ver = Ver, items = [PushItem]}]}
+ = recv_iq(Config),
+ send(Config, make_iq_result(Push)),
+ {Ver, PushItem}.
+
+recv_push(Config, Subscription, Ask) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ Match = #roster_item{jid = PeerBareJID,
+ subscription = Subscription,
+ ask = Ask,
+ groups = [],
+ name = <<"">>},
+ ct:comment("Receiving roster push"),
+ Push = #iq{type = set, sub_els = [#roster_query{items = [Item]}]} =
+ recv_iq(Config),
+ case Item of
+ Match -> send(Config, make_iq_result(Push));
+ _ -> match_failure(Item, Match)
+ end.
+
+recv_presence(Config, Type) ->
+ PeerJID = ?config(peer, Config),
+ case recv_presence(Config) of
+ #presence{from = PeerJID, type = Type} -> ok;
+ Pres -> match_failure(Pres, #presence{from = PeerJID, type = Type})
+ end.
+
+recv_subscription(Config, Type) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ case recv_presence(Config) of
+ #presence{from = PeerBareJID, type = Type} -> ok;
+ Pres -> match_failure(Pres, #presence{from = PeerBareJID, type = Type})
+ end.
+
+pp(Term) ->
+ io_lib_pretty:print(Term, fun pp/2).
+
+pp(state, N) ->
+ Fs = record_info(fields, state),
+ try N = length(Fs), Fs
+ catch _:_ -> no end;
+pp(roster, N) ->
+ Fs = record_info(fields, roster),
+ try N = length(Fs), Fs
+ catch _:_ -> no end;
+pp(_, _) -> no.
+
+%% RFC6121, A.2.1
+transition(Config, out, subscribe,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ send(Config, #presence{to = PeerBareJID, type = subscribe}),
+ case {Sub, Out, In} of
+ {none, false, _} ->
+ recv_push(Config, none, subscribe),
+ State#state{pending_out = true};
+ {none, true, false} ->
+ %% BUG: we should not receive roster push here
+ recv_push(Config, none, subscribe),
+ State;
+ {from, false, false} ->
+ recv_push(Config, from, subscribe),
+ State#state{pending_out = true};
+ _ ->
+ State
+ end;
+%% RFC6121, A.2.2
+transition(Config, out, unsubscribe,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ send(Config, #presence{to = PeerBareJID, type = unsubscribe}),
+ case {Sub, Out, In} of
+ {none, true, _} ->
+ recv_push(Config, none, undefined),
+ State#state{pending_out = false};
+ {to, false, _} ->
+ recv_push(Config, none, undefined),
+ recv_presence(Config, unavailable),
+ State#state{subscription = none, peer_available = false};
+ {from, true, false} ->
+ recv_push(Config, from, undefined),
+ State#state{pending_out = false};
+ {both, false, false} ->
+ recv_push(Config, from, undefined),
+ recv_presence(Config, unavailable),
+ State#state{subscription = from, peer_available = false};
+ _ ->
+ State
+ end;
+%% RFC6121, A.2.3
+transition(Config, out, subscribed,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ send(Config, #presence{to = PeerBareJID, type = subscribed}),
+ case {Sub, Out, In} of
+ {none, false, true} ->
+ recv_push(Config, from, undefined),
+ State#state{subscription = from, pending_in = false};
+ {none, true, true} ->
+ recv_push(Config, from, subscribe),
+ State#state{subscription = from, pending_in = false};
+ {to, false, true} ->
+ recv_push(Config, both, undefined),
+ State#state{subscription = both, pending_in = false};
+ {to, false, _} ->
+ %% BUG: we should not transition to 'both' state
+ recv_push(Config, both, undefined),
+ State#state{subscription = both};
+ _ ->
+ State
+ end;
+%% RFC6121, A.2.4
+transition(Config, out, unsubscribed,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ send(Config, #presence{to = PeerBareJID, type = unsubscribed}),
+ case {Sub, Out, In} of
+ {none, false, true} ->
+ State#state{subscription = none, pending_in = false};
+ {none, true, true} ->
+ recv_push(Config, none, subscribe),
+ State#state{subscription = none, pending_in = false};
+ {to, _, true} ->
+ State#state{pending_in = false};
+ {from, false, _} ->
+ recv_push(Config, none, undefined),
+ State#state{subscription = none};
+ {from, true, _} ->
+ recv_push(Config, none, subscribe),
+ State#state{subscription = none};
+ {both, _, _} ->
+ recv_push(Config, to, undefined),
+ State#state{subscription = to};
+ _ ->
+ State
+ end;
+%% RFC6121, A.3.1
+transition(Config, in, subscribe = Type,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ case {Sub, Out, In} of
+ {none, false, false} ->
+ recv_subscription(Config, Type),
+ State#state{pending_in = true};
+ {none, true, false} ->
+ recv_push(Config, none, subscribe),
+ recv_subscription(Config, Type),
+ State#state{pending_in = true};
+ {to, false, false} ->
+ %% BUG: we should not receive roster push in this state!
+ recv_push(Config, to, undefined),
+ recv_subscription(Config, Type),
+ State#state{pending_in = true};
+ _ ->
+ State
+ end;
+%% RFC6121, A.3.2
+transition(Config, in, unsubscribe = Type,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ case {Sub, Out, In} of
+ {none, _, true} ->
+ State#state{pending_in = false};
+ {to, _, true} ->
+ recv_push(Config, to, undefined),
+ recv_subscription(Config, Type),
+ State#state{pending_in = false};
+ {from, false, _} ->
+ recv_push(Config, none, undefined),
+ recv_subscription(Config, Type),
+ State#state{subscription = none};
+ {from, true, _} ->
+ recv_push(Config, none, subscribe),
+ recv_subscription(Config, Type),
+ State#state{subscription = none};
+ {both, _, _} ->
+ recv_push(Config, to, undefined),
+ recv_subscription(Config, Type),
+ State#state{subscription = to};
+ _ ->
+ State
+ end;
+%% RFC6121, A.3.3
+transition(Config, in, subscribed = Type,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ case {Sub, Out, In} of
+ {none, true, _} ->
+ recv_push(Config, to, undefined),
+ recv_subscription(Config, Type),
+ recv_presence(Config, available),
+ State#state{subscription = to, pending_out = false, peer_available = true};
+ {from, true, _} ->
+ recv_push(Config, both, undefined),
+ recv_subscription(Config, Type),
+ recv_presence(Config, available),
+ State#state{subscription = both, pending_out = false, peer_available = true};
+ {from, false, _} ->
+ %% BUG: we should not transition to 'both' in this state
+ recv_push(Config, both, undefined),
+ recv_subscription(Config, Type),
+ recv_presence(Config, available),
+ State#state{subscription = both, pending_out = false, peer_available = true};
+ _ ->
+ State
+ end;
+%% RFC6121, A.3.4
+transition(Config, in, unsubscribed = Type,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ case {Sub, Out, In} of
+ {none, true, true} ->
+ %% BUG: we should receive roster push in this state!
+ recv_subscription(Config, Type),
+ State#state{subscription = none, pending_out = false};
+ {none, true, false} ->
+ recv_push(Config, none, undefined),
+ recv_subscription(Config, Type),
+ State#state{subscription = none, pending_out = false};
+ {none, false, false} ->
+ State;
+ {to, false, _} ->
+ recv_push(Config, none, undefined),
+ recv_subscription(Config, Type),
+ recv_presence(Config, unavailable),
+ State#state{subscription = none, peer_available = false};
+ {from, true, false} ->
+ recv_push(Config, from, undefined),
+ recv_subscription(Config, Type),
+ State#state{subscription = from, pending_out = false};
+ {both, _, _} ->
+ recv_push(Config, from, undefined),
+ recv_subscription(Config, Type),
+ recv_presence(Config, unavailable),
+ State#state{subscription = from, peer_available = false};
+ _ ->
+ State
+ end;
+%% Outgoing roster remove
+transition(Config, out, remove,
+ #state{subscription = Sub, pending_in = In, pending_out = Out}) ->
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ Item = #roster_item{jid = PeerBareJID, subscription = remove},
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set,
+ sub_els = [#roster_query{items = [Item]}]}),
+ recv_push(Config, remove, undefined),
+ case {Sub, Out, In} of
+ {to, _, _} ->
+ recv_presence(Config, unavailable);
+ {both, _, _} ->
+ recv_presence(Config, unavailable);
+ _ ->
+ ok
+ end,
+ #state{};
+%% Incoming roster remove
+transition(Config, in, remove,
+ #state{subscription = Sub, pending_in = In, pending_out = Out} = State) ->
+ case {Sub, Out, In} of
+ {none, true, _} ->
+ ok;
+ {from, false, _} ->
+ recv_push(Config, none, undefined),
+ recv_subscription(Config, unsubscribe);
+ {from, true, _} ->
+ recv_push(Config, none, subscribe),
+ recv_subscription(Config, unsubscribe);
+ {to, false, _} ->
+ %% BUG: we should receive push here
+ %% recv_push(Config, none, undefined),
+ recv_presence(Config, unavailable),
+ recv_subscription(Config, unsubscribed);
+ {both, _, _} ->
+ recv_presence(Config, unavailable),
+ recv_push(Config, to, undefined),
+ recv_subscription(Config, unsubscribe),
+ recv_push(Config, none, undefined),
+ recv_subscription(Config, unsubscribed);
+ _ ->
+ ok
+ end,
+ State#state{subscription = none}.
+
+actions() ->
+ States = [{Dir, Type} || Dir <- [out, in],
+ Type <- [subscribe, subscribed,
+ unsubscribe, unsubscribed,
+ remove]],
+ Actions = lists:flatten([[X, Y] || X <- States, Y <- States]),
+ remove_dups(Actions, []).
+
+remove_dups([X|T], [X,X|_] = Acc) ->
+ remove_dups(T, Acc);
+remove_dups([X|T], Acc) ->
+ remove_dups(T, [X|Acc]);
+remove_dups([], Acc) ->
+ lists:reverse(Acc).
diff --git a/test/sm_tests.erl b/test/sm_tests.erl
new file mode 100644
index 000000000..0a74d392a
--- /dev/null
+++ b/test/sm_tests.erl
@@ -0,0 +1,99 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(sm_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [send/2, recv/1, close_socket/1, set_opt/3, my_jid/1,
+ recv_message/1, disconnect/1]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {sm_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(enable),
+ single_test(resume),
+ single_test(resume_failed)]}.
+
+feature_enabled(Config) ->
+ true = ?config(sm, Config),
+ disconnect(Config).
+
+enable(Config) ->
+ Server = ?config(server, Config),
+ ServerJID = jid:make(<<"">>, Server, <<"">>),
+ %% Send messages of type 'headline' so the server discards them silently
+ Msg = #message{to = ServerJID, type = headline,
+ body = [#text{data = <<"body">>}]},
+ %% Enable the session management with resumption enabled
+ send(Config, #sm_enable{resume = true, xmlns = ?NS_STREAM_MGMT_3}),
+ #sm_enabled{id = ID, resume = true} = recv(Config),
+ %% Initial request; 'h' should be 0.
+ send(Config, #sm_r{xmlns = ?NS_STREAM_MGMT_3}),
+ #sm_a{h = 0} = recv(Config),
+ %% sending two messages and requesting again; 'h' should be 3.
+ send(Config, Msg),
+ send(Config, Msg),
+ send(Config, Msg),
+ send(Config, #sm_r{xmlns = ?NS_STREAM_MGMT_3}),
+ #sm_a{h = 3} = recv(Config),
+ close_socket(Config),
+ {save_config, set_opt(sm_previd, ID, Config)}.
+
+resume(Config) ->
+ {_, SMConfig} = ?config(saved_config, Config),
+ ID = ?config(sm_previd, SMConfig),
+ Server = ?config(server, Config),
+ ServerJID = jid:make(<<"">>, Server, <<"">>),
+ MyJID = my_jid(Config),
+ Txt = #text{data = <<"body">>},
+ Msg = #message{from = ServerJID, to = MyJID, body = [Txt]},
+ %% Route message. The message should be queued by the C2S process.
+ ejabberd_router:route(ServerJID, MyJID, Msg),
+ send(Config, #sm_resume{previd = ID, h = 0, xmlns = ?NS_STREAM_MGMT_3}),
+ #sm_resumed{previd = ID, h = 3} = recv(Config),
+ #message{from = ServerJID, to = MyJID, body = [Txt]} = recv_message(Config),
+ #sm_r{} = recv(Config),
+ send(Config, #sm_a{h = 1, xmlns = ?NS_STREAM_MGMT_3}),
+ %% Send another stanza to increment the server's 'h' for sm_resume_failed.
+ send(Config, #presence{to = ServerJID}),
+ close_socket(Config),
+ {save_config, set_opt(sm_previd, ID, Config)}.
+
+resume_failed(Config) ->
+ {_, SMConfig} = ?config(saved_config, Config),
+ ID = ?config(sm_previd, SMConfig),
+ ct:sleep(5000), % Wait for session to time out.
+ send(Config, #sm_resume{previd = ID, h = 1, xmlns = ?NS_STREAM_MGMT_3}),
+ #sm_failed{reason = 'item-not-found', h = 4} = recv(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {sm_master_slave, [sequence], []}.
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("sm_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("sm_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("sm_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("sm_" ++ atom_to_list(T) ++ "_slave")]}.
diff --git a/test/suite.erl b/test/suite.erl
index c5593c4cf..3d832dd59 100644
--- a/test/suite.erl
+++ b/test/suite.erl
@@ -13,6 +13,7 @@
-include("suite.hrl").
-include_lib("kernel/include/file.hrl").
+-include("mod_roster.hrl").
%%%===================================================================
%%% API
@@ -27,14 +28,22 @@ init_config(Config) ->
SASLPath = filename:join([PrivDir, "sasl.log"]),
MnesiaDir = filename:join([PrivDir, "mnesia"]),
CertFile = filename:join([DataDir, "cert.pem"]),
+ SelfSignedCertFile = filename:join([DataDir, "self-signed-cert.pem"]),
+ CAFile = filename:join([DataDir, "ca.pem"]),
{ok, CWD} = file:get_cwd(),
{ok, _} = file:copy(CertFile, filename:join([CWD, "cert.pem"])),
+ {ok, _} = file:copy(SelfSignedCertFile,
+ filename:join([CWD, "self-signed-cert.pem"])),
+ {ok, _} = file:copy(CAFile, filename:join([CWD, "ca.pem"])),
{ok, CfgContentTpl} = file:read_file(ConfigPathTpl),
+ Password = <<"password!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>,
CfgContent = process_config_tpl(CfgContentTpl, [
{c2s_port, 5222},
{loglevel, 4},
{s2s_port, 5269},
+ {component_port, 5270},
{web_port, 5280},
+ {password, Password},
{mysql_server, <<"localhost">>},
{mysql_port, 3306},
{mysql_db, <<"ejabberd_test">>},
@@ -58,17 +67,35 @@ init_config(Config) ->
application:set_env(mnesia, dir, MnesiaDir),
[{server_port, ct:get_config(c2s_port, 5222)},
{server_host, "localhost"},
+ {component_port, ct:get_config(component_port, 5270)},
+ {s2s_port, ct:get_config(s2s_port, 5269)},
{server, ?COMMON_VHOST},
{user, <<"test_single!#$%^*()`~+-;_=[]{}|\\">>},
+ {nick, <<"nick!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
{master_nick, <<"master_nick!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
{slave_nick, <<"slave_nick!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
{room_subject, <<"hello, world!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
{certfile, CertFile},
+ {persistent_room, true},
+ {anonymous, false},
+ {type, client},
+ {xmlns, ?NS_CLIENT},
+ {ns_stream, ?NS_STREAM},
+ {stream_version, {1, 0}},
+ {stream_id, <<"">>},
+ {stream_from, <<"">>},
+ {db_xmlns, <<"">>},
+ {mechs, []},
+ {rosterver, false},
+ {lang, <<"en">>},
{base_dir, BaseDir},
+ {socket, undefined},
+ {pubsub_node, <<"node!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
+ {pubsub_node_title, <<"title!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
{resource, <<"resource!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
{master_resource, <<"master_resource!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
{slave_resource, <<"slave_resource!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
- {password, <<"password!@#$%^&*()'\"`~<>+-/;:_=[]{}|\\">>},
+ {password, Password},
{backends, get_config_backends()}
|Config].
@@ -115,47 +142,104 @@ process_config_tpl(Content, [{Name, DefaultValue} | Rest]) ->
V3 ->
V3
end,
- NewContent = binary:replace(Content, <<"@@",(atom_to_binary(Name, latin1))/binary, "@@">>, Val),
+ NewContent = binary:replace(Content,
+ <<"@@",(atom_to_binary(Name,latin1))/binary, "@@">>,
+ Val, [global]),
process_config_tpl(NewContent, Rest).
+stream_header(Config) ->
+ To = case ?config(server, Config) of
+ <<"">> -> undefined;
+ Server -> jid:make(Server)
+ end,
+ From = case ?config(stream_from, Config) of
+ <<"">> -> undefined;
+ Frm -> jid:make(Frm)
+ end,
+ #stream_start{to = To,
+ from = From,
+ lang = ?config(lang, Config),
+ version = ?config(stream_version, Config),
+ xmlns = ?config(xmlns, Config),
+ db_xmlns = ?config(db_xmlns, Config),
+ stream_xmlns = ?config(ns_stream, Config)}.
connect(Config) ->
- {ok, Sock} = ejabberd_socket:connect(
- ?config(server_host, Config),
- ?config(server_port, Config),
- [binary, {packet, 0}, {active, false}]),
- init_stream(set_opt(socket, Sock, Config)).
+ NewConfig = init_stream(Config),
+ case ?config(type, NewConfig) of
+ client -> process_stream_features(NewConfig);
+ server -> process_stream_features(NewConfig);
+ component -> NewConfig
+ end.
+
+tcp_connect(Config) ->
+ case ?config(socket, Config) of
+ undefined ->
+ Owner = self(),
+ NS = case ?config(type, Config) of
+ client -> ?NS_CLIENT;
+ server -> ?NS_SERVER;
+ component -> ?NS_COMPONENT
+ end,
+ ReceiverPid = spawn(fun() -> receiver(NS, Owner) end),
+ {ok, Sock} = ejabberd_socket:connect(
+ ?config(server_host, Config),
+ ?config(server_port, Config),
+ [binary, {packet, 0}, {active, false}],
+ infinity, ReceiverPid),
+ set_opt(socket, Sock, Config);
+ _ ->
+ Config
+ end.
init_stream(Config) ->
- ok = send_text(Config, io_lib:format(?STREAM_HEADER,
- [?config(server, Config)])),
- {xmlstreamstart, <<"stream:stream">>, Attrs} = recv(),
- <<"jabber:client">> = fxml:get_attr_s(<<"xmlns">>, Attrs),
- <<"1.0">> = fxml:get_attr_s(<<"version">>, Attrs),
- #stream_features{sub_els = Fs} = recv(),
- Mechs = lists:flatmap(
- fun(#sasl_mechanisms{list = Ms}) ->
- Ms;
- (_) ->
- []
- end, Fs),
- lists:foldl(
- fun(#feature_register{}, Acc) ->
- set_opt(register, true, Acc);
- (#starttls{}, Acc) ->
- set_opt(starttls, true, Acc);
- (#compression{methods = Ms}, Acc) ->
- set_opt(compression, Ms, Acc);
- (_, Acc) ->
- Acc
- end, set_opt(mechs, Mechs, Config), Fs).
+ Version = ?config(stream_version, Config),
+ NewConfig = tcp_connect(Config),
+ send(NewConfig, stream_header(NewConfig)),
+ XMLNS = case ?config(type, Config) of
+ client -> ?NS_CLIENT;
+ component -> ?NS_COMPONENT;
+ server -> ?NS_SERVER
+ end,
+ receive
+ #stream_start{id = ID, xmlns = XMLNS, version = Version} ->
+ set_opt(stream_id, ID, NewConfig)
+ end.
+
+process_stream_features(Config) ->
+ receive
+ #stream_features{sub_els = Fs} ->
+ Mechs = lists:flatmap(
+ fun(#sasl_mechanisms{list = Ms}) ->
+ Ms;
+ (_) ->
+ []
+ end, Fs),
+ lists:foldl(
+ fun(#feature_register{}, Acc) ->
+ set_opt(register, true, Acc);
+ (#starttls{}, Acc) ->
+ set_opt(starttls, true, Acc);
+ (#compression{methods = Ms}, Acc) ->
+ set_opt(compression, Ms, Acc);
+ (_, Acc) ->
+ Acc
+ end, set_opt(mechs, Mechs, Config), Fs)
+ end.
disconnect(Config) ->
+ ct:comment("Disconnecting"),
Socket = ?config(socket, Config),
- ok = ejabberd_socket:send(Socket, ?STREAM_TRAILER),
- {xmlstreamend, <<"stream:stream">>} = recv(),
+ try
+ ok = send_text(Config, ?STREAM_TRAILER)
+ catch exit:normal ->
+ ok
+ end,
+ receive {xmlstreamend, <<"stream:stream">>} -> ok end,
+ flush(Config),
ejabberd_socket:close(Socket),
- Config.
+ ct:comment("Disconnected"),
+ set_opt(socket, undefined, Config).
close_socket(Config) ->
Socket = ?config(socket, Config),
@@ -163,76 +247,199 @@ close_socket(Config) ->
Config.
starttls(Config) ->
+ starttls(Config, false).
+
+starttls(Config, ShouldFail) ->
send(Config, #starttls{}),
- #starttls_proceed{} = recv(),
- TLSSocket = ejabberd_socket:starttls(
- ?config(socket, Config),
- [{certfile, ?config(certfile, Config)},
- connect]),
- init_stream(set_opt(socket, TLSSocket, Config)).
+ receive
+ #starttls_proceed{} when ShouldFail ->
+ ct:fail(starttls_should_have_failed);
+ #starttls_failure{} when ShouldFail ->
+ Config;
+ #starttls_failure{} ->
+ ct:fail(starttls_failed);
+ #starttls_proceed{} ->
+ TLSSocket = ejabberd_socket:starttls(
+ ?config(socket, Config),
+ [{certfile, ?config(certfile, Config)},
+ connect]),
+ set_opt(socket, TLSSocket, Config)
+ end.
zlib(Config) ->
send(Config, #compress{methods = [<<"zlib">>]}),
- #compressed{} = recv(),
+ receive #compressed{} -> ok end,
ZlibSocket = ejabberd_socket:compress(?config(socket, Config)),
- init_stream(set_opt(socket, ZlibSocket, Config)).
+ process_stream_features(init_stream(set_opt(socket, ZlibSocket, Config))).
auth(Config) ->
+ auth(Config, false).
+
+auth(Config, ShouldFail) ->
+ Type = ?config(type, Config),
+ IsAnonymous = ?config(anonymous, Config),
Mechs = ?config(mechs, Config),
HaveMD5 = lists:member(<<"DIGEST-MD5">>, Mechs),
HavePLAIN = lists:member(<<"PLAIN">>, Mechs),
- if HavePLAIN ->
- auth_SASL(<<"PLAIN">>, Config);
+ HaveExternal = lists:member(<<"EXTERNAL">>, Mechs),
+ HaveAnonymous = lists:member(<<"ANONYMOUS">>, Mechs),
+ if HaveAnonymous and IsAnonymous ->
+ auth_SASL(<<"ANONYMOUS">>, Config, ShouldFail);
+ HavePLAIN ->
+ auth_SASL(<<"PLAIN">>, Config, ShouldFail);
HaveMD5 ->
- auth_SASL(<<"DIGEST-MD5">>, Config);
+ auth_SASL(<<"DIGEST-MD5">>, Config, ShouldFail);
+ HaveExternal andalso Type == server ->
+ auth_SASL(<<"EXTERNAL">>, Config, ShouldFail);
+ Type == client ->
+ auth_legacy(Config, false, ShouldFail);
+ Type == component ->
+ auth_component(Config, ShouldFail);
true ->
- ct:fail(no_sasl_mechanisms_available)
+ ct:fail(no_known_sasl_mechanism_available)
end.
bind(Config) ->
- #iq{type = result, sub_els = [#bind{}]} =
- send_recv(
- Config,
- #iq{type = set,
- sub_els = [#bind{resource = ?config(resource, Config)}]}),
- Config.
+ U = ?config(user, Config),
+ S = ?config(server, Config),
+ R = ?config(resource, Config),
+ case ?config(type, Config) of
+ client ->
+ #iq{type = result, sub_els = [#bind{jid = JID}]} =
+ send_recv(
+ Config, #iq{type = set, sub_els = [#bind{resource = R}]}),
+ case ?config(anonymous, Config) of
+ false ->
+ {U, S, R} = jid:tolower(JID),
+ Config;
+ true ->
+ {User, S, Resource} = jid:tolower(JID),
+ set_opt(user, User, set_opt(resource, Resource, Config))
+ end;
+ component ->
+ Config
+ end.
open_session(Config) ->
- #iq{type = result, sub_els = []} =
- send_recv(Config, #iq{type = set, sub_els = [#session{}]}),
+ open_session(Config, false).
+
+open_session(Config, Force) ->
+ if Force ->
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, sub_els = [#xmpp_session{}]});
+ true ->
+ ok
+ end,
Config.
+auth_legacy(Config, IsDigest) ->
+ auth_legacy(Config, IsDigest, false).
+
+auth_legacy(Config, IsDigest, ShouldFail) ->
+ ServerJID = server_jid(Config),
+ U = ?config(user, Config),
+ R = ?config(resource, Config),
+ P = ?config(password, Config),
+ #iq{type = result,
+ from = ServerJID,
+ sub_els = [#legacy_auth{username = <<"">>,
+ password = <<"">>,
+ resource = <<"">>} = Auth]} =
+ send_recv(Config,
+ #iq{to = ServerJID, type = get,
+ sub_els = [#legacy_auth{}]}),
+ Res = case Auth#legacy_auth.digest of
+ <<"">> when IsDigest ->
+ StreamID = ?config(stream_id, Config),
+ D = p1_sha:sha(<<StreamID/binary, P/binary>>),
+ send_recv(Config, #iq{to = ServerJID, type = set,
+ sub_els = [#legacy_auth{username = U,
+ resource = R,
+ digest = D}]});
+ _ when not IsDigest ->
+ send_recv(Config, #iq{to = ServerJID, type = set,
+ sub_els = [#legacy_auth{username = U,
+ resource = R,
+ password = P}]})
+ end,
+ case Res of
+ #iq{from = ServerJID, type = result, sub_els = []} ->
+ if ShouldFail ->
+ ct:fail(legacy_auth_should_have_failed);
+ true ->
+ Config
+ end;
+ #iq{from = ServerJID, type = error} ->
+ if ShouldFail ->
+ Config;
+ true ->
+ ct:fail(legacy_auth_failed)
+ end
+ end.
+
+auth_component(Config, ShouldFail) ->
+ StreamID = ?config(stream_id, Config),
+ Password = ?config(password, Config),
+ Digest = p1_sha:sha(<<StreamID/binary, Password/binary>>),
+ send(Config, #handshake{data = Digest}),
+ receive
+ #handshake{} when ShouldFail ->
+ ct:fail(component_auth_should_have_failed);
+ #handshake{} ->
+ Config;
+ #stream_error{reason = 'not-authorized'} when ShouldFail ->
+ Config;
+ #stream_error{reason = 'not-authorized'} ->
+ ct:fail(component_auth_failed)
+ end.
+
auth_SASL(Mech, Config) ->
+ auth_SASL(Mech, Config, false).
+
+auth_SASL(Mech, Config, ShouldFail) ->
{Response, SASL} = sasl_new(Mech,
?config(user, Config),
?config(server, Config),
?config(password, Config)),
send(Config, #sasl_auth{mechanism = Mech, text = Response}),
- wait_auth_SASL_result(set_opt(sasl, SASL, Config)).
+ wait_auth_SASL_result(set_opt(sasl, SASL, Config), ShouldFail).
-wait_auth_SASL_result(Config) ->
- case recv() of
+wait_auth_SASL_result(Config, ShouldFail) ->
+ receive
+ #sasl_success{} when ShouldFail ->
+ ct:fail(sasl_auth_should_have_failed);
#sasl_success{} ->
ejabberd_socket:reset_stream(?config(socket, Config)),
- send_text(Config,
- io_lib:format(?STREAM_HEADER,
- [?config(server, Config)])),
- {xmlstreamstart, <<"stream:stream">>, Attrs} = recv(),
- <<"jabber:client">> = fxml:get_attr_s(<<"xmlns">>, Attrs),
- <<"1.0">> = fxml:get_attr_s(<<"version">>, Attrs),
- #stream_features{sub_els = Fs} = recv(),
- lists:foldl(
- fun(#feature_sm{}, ConfigAcc) ->
- set_opt(sm, true, ConfigAcc);
- (#feature_csi{}, ConfigAcc) ->
- set_opt(csi, true, ConfigAcc);
- (_, ConfigAcc) ->
- ConfigAcc
- end, Config, Fs);
+ send(Config, stream_header(Config)),
+ Type = ?config(type, Config),
+ NS = if Type == client -> ?NS_CLIENT;
+ Type == server -> ?NS_SERVER
+ end,
+ receive #stream_start{xmlns = NS, version = {1,0}} -> ok end,
+ receive #stream_features{sub_els = Fs} ->
+ if Type == client ->
+ #xmpp_session{optional = true} =
+ lists:keyfind(xmpp_session, 1, Fs);
+ true ->
+ ok
+ end,
+ lists:foldl(
+ fun(#feature_sm{}, ConfigAcc) ->
+ set_opt(sm, true, ConfigAcc);
+ (#feature_csi{}, ConfigAcc) ->
+ set_opt(csi, true, ConfigAcc);
+ (#rosterver_feature{}, ConfigAcc) ->
+ set_opt(rosterver, true, ConfigAcc);
+ (_, ConfigAcc) ->
+ ConfigAcc
+ end, Config, Fs)
+ end;
#sasl_challenge{text = ClientIn} ->
{Response, SASL} = (?config(sasl, Config))(ClientIn),
send(Config, #sasl_response{text = Response}),
- wait_auth_SASL_result(set_opt(sasl, SASL, Config));
+ wait_auth_SASL_result(set_opt(sasl, SASL, Config), ShouldFail);
+ #sasl_failure{} when ShouldFail ->
+ Config;
#sasl_failure{} ->
ct:fail(sasl_auth_failed)
end.
@@ -249,28 +456,44 @@ match_failure(Received, [Match]) when is_list(Match)->
match_failure(Received, Matches) ->
ct:fail("Received input:~n~n~p~n~ndon't match expected patterns:~n~n~p", [Received, Matches]).
-recv() ->
+recv(_Config) ->
receive
- {'$gen_event', {xmlstreamelement, El}} ->
- Pkt = xmpp_codec:decode(fix_ns(El)),
- ct:pal("recv: ~p ->~n~s", [El, xmpp_codec:pp(Pkt)]),
- Pkt;
- {'$gen_event', Event} ->
- Event
+ {fail, El, Why} ->
+ ct:fail("recv failed: ~p->~n~s",
+ [El, xmpp:format_error(Why)]);
+ Event ->
+ Event
+ end.
+
+recv_iq(_Config) ->
+ receive #iq{} = IQ -> IQ end.
+
+recv_presence(_Config) ->
+ receive #presence{} = Pres -> Pres end.
+
+recv_message(_Config) ->
+ receive #message{} = Msg -> Msg end.
+
+decode_stream_element(NS, El) ->
+ decode(El, NS, []).
+
+format_element(El) ->
+ case erlang:function_exported(ct, log, 5) of
+ true -> ejabberd_web_admin:pretty_print_xml(El);
+ false -> io_lib:format("~p~n", [El])
end.
-fix_ns(#xmlel{name = Tag, attrs = Attrs} = El)
- when Tag == <<"stream:features">>; Tag == <<"stream:error">> ->
- NewAttrs = [{<<"xmlns">>, <<"http://etherx.jabber.org/streams">>}
- |lists:keydelete(<<"xmlns">>, 1, Attrs)],
- El#xmlel{attrs = NewAttrs};
-fix_ns(#xmlel{name = Tag, attrs = Attrs} = El)
- when Tag == <<"message">>; Tag == <<"iq">>; Tag == <<"presence">> ->
- NewAttrs = [{<<"xmlns">>, <<"jabber:client">>}
- |lists:keydelete(<<"xmlns">>, 1, Attrs)],
- El#xmlel{attrs = NewAttrs};
-fix_ns(El) ->
- El.
+decode(El, NS, Opts) ->
+ try
+ Pkt = xmpp:decode(El, NS, Opts),
+ ct:pal("RECV:~n~s~n~s",
+ [format_element(El), xmpp:pp(Pkt)]),
+ Pkt
+ catch _:{xmpp_codec, Why} ->
+ ct:pal("recv failed: ~p->~n~s",
+ [El, xmpp:format_error(Why)]),
+ erlang:error({xmpp_codec, Why})
+ end.
send_text(Config, Text) ->
ejabberd_socket:send(?config(socket, Config), Text).
@@ -289,18 +512,35 @@ send(State, Pkt) ->
_ ->
{undefined, Pkt}
end,
- El = xmpp_codec:encode(NewPkt),
- ct:pal("sent: ~p <-~n~s", [El, xmpp_codec:pp(NewPkt)]),
- ok = send_text(State, fxml:element_to_binary(El)),
+ El = xmpp:encode(NewPkt),
+ ct:pal("SENT:~n~s~n~s",
+ [format_element(El), xmpp:pp(NewPkt)]),
+ Data = case NewPkt of
+ #stream_start{} -> fxml:element_to_header(El);
+ _ -> fxml:element_to_binary(El)
+ end,
+ ok = send_text(State, Data),
NewID.
-send_recv(State, IQ) ->
+send_recv(State, #message{} = Msg) ->
+ ID = send(State, Msg),
+ receive #message{id = ID} = Result -> Result end;
+send_recv(State, #presence{} = Pres) ->
+ ID = send(State, Pres),
+ receive #presence{id = ID} = Result -> Result end;
+send_recv(State, #iq{} = IQ) ->
ID = send(State, IQ),
- #iq{id = ID} = recv().
+ receive #iq{id = ID} = Result -> Result end.
sasl_new(<<"PLAIN">>, User, Server, Password) ->
{<<User/binary, $@, Server/binary, 0, User/binary, 0, Password/binary>>,
fun (_) -> {error, <<"Invalid SASL challenge">>} end};
+sasl_new(<<"EXTERNAL">>, _User, _Server, _Password) ->
+ {<<"">>,
+ fun(_) -> ct:fail(sasl_challenge_is_not_expected) end};
+sasl_new(<<"ANONYMOUS">>, _User, _Server, _Password) ->
+ {<<"">>,
+ fun(_) -> ct:fail(sasl_challenge_is_not_expected) end};
sasl_new(<<"DIGEST-MD5">>, User, Server, Password) ->
{<<"">>,
fun (ServerIn) ->
@@ -395,6 +635,20 @@ muc_room_jid(Config) ->
Server = ?config(server, Config),
jid:make(<<"test">>, <<"conference.", Server/binary>>, <<>>).
+my_muc_jid(Config) ->
+ Nick = ?config(nick, Config),
+ RoomJID = muc_room_jid(Config),
+ jid:replace_resource(RoomJID, Nick).
+
+peer_muc_jid(Config) ->
+ PeerNick = ?config(peer_nick, Config),
+ RoomJID = muc_room_jid(Config),
+ jid:replace_resource(RoomJID, PeerNick).
+
+alt_room_jid(Config) ->
+ Server = ?config(server, Config),
+ jid:make(<<"alt">>, <<"conference.", Server/binary>>, <<>>).
+
mix_jid(Config) ->
Server = ?config(server, Config),
jid:make(<<>>, <<"mix.", Server/binary>>, <<>>).
@@ -404,9 +658,9 @@ mix_room_jid(Config) ->
jid:make(<<"test">>, <<"mix.", Server/binary>>, <<>>).
id() ->
- id(undefined).
+ id(<<>>).
-id(undefined) ->
+id(<<>>) ->
randoms:get_string();
id(ID) ->
ID.
@@ -415,6 +669,7 @@ get_features(Config) ->
get_features(Config, server_jid(Config)).
get_features(Config, To) ->
+ ct:comment("Getting features of ~s", [jid:to_string(To)]),
#iq{type = result, sub_els = [#disco_info{features = Features}]} =
send_recv(Config, #iq{type = get, sub_els = [#disco_info{}], to = To}),
Features.
@@ -430,16 +685,82 @@ set_opt(Opt, Val, Config) ->
[{Opt, Val}|lists:keydelete(Opt, 1, Config)].
wait_for_master(Config) ->
- put_event(Config, slave_ready),
- master_ready = get_event(Config).
+ put_event(Config, peer_ready),
+ case get_event(Config) of
+ peer_ready ->
+ ok;
+ Other ->
+ suite:match_failure(Other, peer_ready)
+ end.
wait_for_slave(Config) ->
- put_event(Config, master_ready),
- slave_ready = get_event(Config).
+ put_event(Config, peer_ready),
+ case get_event(Config) of
+ peer_ready ->
+ ok;
+ Other ->
+ suite:match_failure(Other, peer_ready)
+ end.
make_iq_result(#iq{from = From} = IQ) ->
IQ#iq{type = result, to = From, from = undefined, sub_els = []}.
+self_presence(Config, Type) ->
+ MyJID = my_jid(Config),
+ ct:comment("Sending self-presence"),
+ #presence{type = Type, from = MyJID} =
+ send_recv(Config, #presence{type = Type}).
+
+set_roster(Config, Subscription, Groups) ->
+ MyJID = my_jid(Config),
+ {U, S, _} = jid:tolower(MyJID),
+ PeerJID = ?config(peer, Config),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ PeerLJID = jid:tolower(PeerBareJID),
+ ct:comment("Adding ~s to roster with subscription '~s' in groups ~p",
+ [jid:to_string(PeerBareJID), Subscription, Groups]),
+ {atomic, _} = mod_roster:set_roster(#roster{usj = {U, S, PeerLJID},
+ us = {U, S},
+ jid = PeerLJID,
+ subscription = Subscription,
+ groups = Groups}),
+ Config.
+
+del_roster(Config) ->
+ del_roster(Config, ?config(peer, Config)).
+
+del_roster(Config, PeerJID) ->
+ MyJID = my_jid(Config),
+ {U, S, _} = jid:tolower(MyJID),
+ PeerBareJID = jid:remove_resource(PeerJID),
+ PeerLJID = jid:tolower(PeerBareJID),
+ ct:comment("Removing ~s from roster", [jid:to_string(PeerBareJID)]),
+ {atomic, _} = mod_roster:del_roster(U, S, PeerLJID),
+ Config.
+
+get_roster(Config) ->
+ {LUser, LServer, _} = jid:tolower(my_jid(Config)),
+ mod_roster:get_roster(LUser, LServer).
+
+receiver(NS, Owner) ->
+ MRef = erlang:monitor(process, Owner),
+ receiver(NS, Owner, MRef).
+
+receiver(NS, Owner, MRef) ->
+ receive
+ {'$gen_event', {xmlstreamelement, El}} ->
+ Owner ! decode_stream_element(NS, El),
+ receiver(NS, Owner, MRef);
+ {'$gen_event', {xmlstreamstart, Name, Attrs}} ->
+ Owner ! decode(#xmlel{name = Name, attrs = Attrs}, <<>>, []),
+ receiver(NS, Owner, MRef);
+ {'$gen_event', Event} ->
+ Owner ! Event,
+ receiver(NS, Owner, MRef);
+ {'DOWN', MRef, process, Owner, _} ->
+ ok
+ end.
+
%%%===================================================================
%%% Clients puts and gets events via this relay.
%%%===================================================================
@@ -456,6 +777,7 @@ event_relay() ->
event_relay(Events, Subscribers) ->
receive
{subscribe, From} ->
+ erlang:monitor(process, From),
From ! {ok, self()},
lists:foreach(
fun(Event) -> From ! {event, Event, self()}
@@ -469,7 +791,19 @@ event_relay(Events, Subscribers) ->
(_) ->
ok
end, Subscribers),
- event_relay([Event|Events], Subscribers)
+ event_relay([Event|Events], Subscribers);
+ {'DOWN', _MRef, process, Pid, _Info} ->
+ case lists:member(Pid, Subscribers) of
+ true ->
+ NewSubscribers = lists:delete(Pid, Subscribers),
+ lists:foreach(
+ fun(Subscriber) ->
+ Subscriber ! {event, peer_down, self()}
+ end, NewSubscribers),
+ event_relay(Events, NewSubscribers);
+ false ->
+ event_relay(Events, Subscribers)
+ end
end.
subscribe_to_events(Config) ->
@@ -494,3 +828,12 @@ get_event(Config) ->
{event, Event, Relay} ->
Event
end.
+
+flush(Config) ->
+ receive
+ {event, peer_down, _} -> flush(Config);
+ closed -> flush(Config);
+ Msg -> ct:fail({unexpected_msg, Msg})
+ after 0 ->
+ ok
+ end.
diff --git a/test/suite.hrl b/test/suite.hrl
index fb6b4f3ac..00239f8cf 100644
--- a/test/suite.hrl
+++ b/test/suite.hrl
@@ -5,12 +5,6 @@
-include("mod_proxy65.hrl").
-include("xmpp_codec.hrl").
--define(STREAM_HEADER,
- <<"<?xml version='1.0'?><stream:stream "
- "xmlns:stream='http://etherx.jabber.org/stream"
- "s' xmlns='jabber:client' to='~s' version='1.0"
- "'>">>).
-
-define(STREAM_TRAILER, <<"</stream:stream>">>).
-define(PUBSUB(Node), <<(?NS_PUBSUB)/binary, "#", Node>>).
@@ -19,7 +13,7 @@
-define(recv1(P1),
P1 = (fun() ->
- V = recv(),
+ V = recv(Config),
case V of
P1 -> V;
_ -> suite:match_failure([V], [??P1])
@@ -28,7 +22,7 @@
-define(recv2(P1, P2),
(fun() ->
- case {R1 = recv(), R2 = recv()} of
+ case {R1 = recv(Config), R2 = recv(Config)} of
{P1, P2} -> {R1, R2};
{P2, P1} -> {R2, R1};
{P1, V1} -> suite:match_failure([V1], [P2]);
@@ -41,7 +35,7 @@
-define(recv3(P1, P2, P3),
(fun() ->
- case R3 = recv() of
+ case R3 = recv(Config) of
P1 -> insert(R3, 1, ?recv2(P2, P3));
P2 -> insert(R3, 2, ?recv2(P1, P3));
P3 -> insert(R3, 3, ?recv2(P1, P2));
@@ -51,7 +45,7 @@
-define(recv4(P1, P2, P3, P4),
(fun() ->
- case R4 = recv() of
+ case R4 = recv(Config) of
P1 -> insert(R4, 1, ?recv3(P2, P3, P4));
P2 -> insert(R4, 2, ?recv3(P1, P3, P4));
P3 -> insert(R4, 3, ?recv3(P1, P2, P4));
@@ -62,7 +56,7 @@
-define(recv5(P1, P2, P3, P4, P5),
(fun() ->
- case R5 = recv() of
+ case R5 = recv(Config) of
P1 -> insert(R5, 1, ?recv4(P2, P3, P4, P5));
P2 -> insert(R5, 2, ?recv4(P1, P3, P4, P5));
P3 -> insert(R5, 3, ?recv4(P1, P2, P4, P5));
@@ -72,6 +66,14 @@
end
end)()).
+-define(match(Pattern, Result),
+ case Result of
+ Pattern ->
+ Pattern;
+ Mismatch ->
+ suite:match_failure([Mismatch], [??Pattern])
+ end).
+
-define(COMMON_VHOST, <<"localhost">>).
-define(MNESIA_VHOST, <<"mnesia.localhost">>).
-define(REDIS_VHOST, <<"redis.localhost">>).
@@ -81,6 +83,7 @@
-define(LDAP_VHOST, <<"ldap.localhost">>).
-define(EXTAUTH_VHOST, <<"extauth.localhost">>).
-define(RIAK_VHOST, <<"riak.localhost">>).
+-define(S2S_VHOST, <<"s2s.localhost">>).
insert(Val, N, Tuple) ->
L = tuple_to_list(Tuple),
diff --git a/test/vcard_tests.erl b/test/vcard_tests.erl
new file mode 100644
index 000000000..26cfdc92b
--- /dev/null
+++ b/test/vcard_tests.erl
@@ -0,0 +1,133 @@
+%%%-------------------------------------------------------------------
+%%% @author Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%% @copyright (C) 2016, Evgeny Khramtsov
+%%% @doc
+%%%
+%%% @end
+%%% Created : 16 Nov 2016 by Evgeny Khramtsov <ekhramtsov@process-one.net>
+%%%-------------------------------------------------------------------
+-module(vcard_tests).
+
+%% API
+-compile(export_all).
+-import(suite, [send_recv/2, disconnect/1, is_feature_advertised/2,
+ is_feature_advertised/3, server_jid/1,
+ my_jid/1, wait_for_slave/1, wait_for_master/1,
+ recv_presence/1, recv/1]).
+
+-include("suite.hrl").
+
+%%%===================================================================
+%%% API
+%%%===================================================================
+%%%===================================================================
+%%% Single user tests
+%%%===================================================================
+single_cases() ->
+ {vcard_single, [sequence],
+ [single_test(feature_enabled),
+ single_test(get_set),
+ single_test(service_vcard)]}.
+
+feature_enabled(Config) ->
+ BareMyJID = jid:remove_resource(my_jid(Config)),
+ true = is_feature_advertised(Config, ?NS_VCARD),
+ true = is_feature_advertised(Config, ?NS_VCARD, BareMyJID),
+ disconnect(Config).
+
+get_set(Config) ->
+ VCard =
+ #vcard_temp{fn = <<"Peter Saint-Andre">>,
+ n = #vcard_name{family = <<"Saint-Andre">>,
+ given = <<"Peter">>},
+ nickname = <<"stpeter">>,
+ bday = <<"1966-08-06">>,
+ adr = [#vcard_adr{work = true,
+ extadd = <<"Suite 600">>,
+ street = <<"1899 Wynkoop Street">>,
+ locality = <<"Denver">>,
+ region = <<"CO">>,
+ pcode = <<"80202">>,
+ ctry = <<"USA">>},
+ #vcard_adr{home = true,
+ locality = <<"Denver">>,
+ region = <<"CO">>,
+ pcode = <<"80209">>,
+ ctry = <<"USA">>}],
+ tel = [#vcard_tel{work = true,voice = true,
+ number = <<"303-308-3282">>},
+ #vcard_tel{home = true,voice = true,
+ number = <<"303-555-1212">>}],
+ email = [#vcard_email{internet = true,pref = true,
+ userid = <<"stpeter@jabber.org">>}],
+ jabberid = <<"stpeter@jabber.org">>,
+ title = <<"Executive Director">>,role = <<"Patron Saint">>,
+ org = #vcard_org{name = <<"XMPP Standards Foundation">>},
+ url = <<"http://www.xmpp.org/xsf/people/stpeter.shtml">>,
+ desc = <<"More information about me is located on my "
+ "personal website: http://www.saint-andre.com/">>},
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, sub_els = [VCard]}),
+ %% TODO: check if VCard == VCard1.
+ #iq{type = result, sub_els = [_VCard1]} =
+ send_recv(Config, #iq{type = get, sub_els = [#vcard_temp{}]}),
+ disconnect(Config).
+
+service_vcard(Config) ->
+ JID = server_jid(Config),
+ ct:comment("Retreiving vCard from ~s", [jid:to_string(JID)]),
+ #iq{type = result, sub_els = [#vcard_temp{}]} =
+ send_recv(Config, #iq{type = get, to = JID, sub_els = [#vcard_temp{}]}),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Master-slave tests
+%%%===================================================================
+master_slave_cases() ->
+ {vcard_master_slave, [sequence], []}.
+ %%[master_slave_test(xupdate)]}.
+
+xupdate_master(Config) ->
+ Img = <<137, "PNG\r\n", 26, $\n>>,
+ ImgHash = p1_sha:sha(Img),
+ MyJID = my_jid(Config),
+ Peer = ?config(slave, Config),
+ wait_for_slave(Config),
+ #presence{from = MyJID, type = available} = send_recv(Config, #presence{}),
+ #presence{from = Peer, type = available} = recv_presence(Config),
+ VCard = #vcard_temp{photo = #vcard_photo{type = <<"image/png">>, binval = Img}},
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, sub_els = [VCard]}),
+ #presence{from = MyJID, type = available,
+ sub_els = [#vcard_xupdate{hash = ImgHash}]} = recv_presence(Config),
+ #iq{type = result, sub_els = []} =
+ send_recv(Config, #iq{type = set, sub_els = [#vcard_temp{}]}),
+ ?recv2(#presence{from = MyJID, type = available,
+ sub_els = [#vcard_xupdate{hash = undefined}]},
+ #presence{from = Peer, type = unavailable}),
+ disconnect(Config).
+
+xupdate_slave(Config) ->
+ Img = <<137, "PNG\r\n", 26, $\n>>,
+ ImgHash = p1_sha:sha(Img),
+ MyJID = my_jid(Config),
+ Peer = ?config(master, Config),
+ #presence{from = MyJID, type = available} = send_recv(Config, #presence{}),
+ wait_for_master(Config),
+ #presence{from = Peer, type = available} = recv_presence(Config),
+ #presence{from = Peer, type = available,
+ sub_els = [#vcard_xupdate{hash = ImgHash}]} = recv_presence(Config),
+ #presence{from = Peer, type = available,
+ sub_els = [#vcard_xupdate{hash = undefined}]} = recv_presence(Config),
+ disconnect(Config).
+
+%%%===================================================================
+%%% Internal functions
+%%%===================================================================
+single_test(T) ->
+ list_to_atom("vcard_" ++ atom_to_list(T)).
+
+master_slave_test(T) ->
+ {list_to_atom("vcard_" ++ atom_to_list(T)), [parallel],
+ [list_to_atom("vcard_" ++ atom_to_list(T) ++ "_master"),
+ list_to_atom("vcard_" ++ atom_to_list(T) ++ "_slave")]}.