On 2016-02-12 01:09, Ahmed S. Darwish wrote:
The PA daemon currently uses a single SHM file for all the clients
sending and receiving commands over the low-latency srbchannel
mechanism.

To safely run PA daemon in system mode later using memfds, and to
provide the necessary ground work for sandboxing, create the
srbchannel SHM files on a per-client basis.

Signed-off-by: Ahmed S. Darwish <[email protected]>
---
  src/pulsecore/core.c            | 19 ++++++++++---------
  src/pulsecore/core.h            |  6 ++----
  src/pulsecore/protocol-native.c | 28 ++++++++++++++++++++++++----
  3 files changed, 36 insertions(+), 17 deletions(-)

[ For more details on the vacuuming issue, please check the
   message sent in direct reply to this patch.. ]

If the srbchannel is the only thing present in that pool, then vacuuming does not even make sense?


diff --git a/src/pulsecore/core.c b/src/pulsecore/core.c
index b2df7de..1c3c3cd 100644
--- a/src/pulsecore/core.c
+++ b/src/pulsecore/core.c
@@ -126,11 +126,6 @@ pa_core* pa_core_new(pa_mainloop_api *m, bool shared, 
size_t shm_size) {
      c->shm_size = shm_size;
      pa_silence_cache_init(&c->silence_cache);

-    if (shared && !(c->rw_mempool = pa_mempool_new(shared, shm_size)))
-        pa_log_warn("Failed to allocate shared writable memory pool.");
-    if (c->rw_mempool)
-        pa_mempool_set_is_remote_writable(c->rw_mempool, true);
-
      c->exit_event = NULL;
      c->scache_auto_unload_event = NULL;

@@ -217,8 +212,6 @@ static void core_free(pa_object *o) {
      pa_assert(!c->default_sink);

      pa_silence_cache_done(&c->silence_cache);
-    if (c->rw_mempool)
-        pa_mempool_free(c->rw_mempool);
      pa_mempool_free(c->mempool);

      for (j = 0; j < PA_CORE_HOOK_MAX; j++)
@@ -285,8 +278,16 @@ void pa_core_maybe_vacuum(pa_core *c) {

      pa_mempool_vacuum(c->mempool);

-    if (c->rw_mempool)
-        pa_mempool_vacuum(c->rw_mempool);
+    /*
+     * FIXME: Vacuum the per-client rw (srbchannel) mempools
+     *
+     * We need to vacuum the per-client mempool owned by each connection
+     * at pa_native_protocol::connections. Nonetheless, libpulsecore can
+     * not access any symbol from protocol-native.
+     *
+     * Vacuuming a per-client mempool on its streams deletion and
+     * uncorking also proved to be problematic.
+     */
  }

  pa_time_event* pa_core_rttime_new(pa_core *c, pa_usec_t usec, 
pa_time_event_cb_t cb, void *userdata) {
diff --git a/src/pulsecore/core.h b/src/pulsecore/core.h
index 428689c..9f5c445 100644
--- a/src/pulsecore/core.h
+++ b/src/pulsecore/core.h
@@ -177,10 +177,8 @@ struct pa_core {
      PA_LLIST_HEAD(pa_subscription_event, subscription_event_queue);
      pa_subscription_event *subscription_event_last;

-    /* The mempool is used for data we write to, it's readonly for the client.
-       The rw_mempool is used for data writable by both server and client (and
-       can be NULL in some cases). */
-    pa_mempool *mempool, *rw_mempool;
+    /* The mempool is used for data we write to, it's readonly for the client. 
*/
+    pa_mempool *mempool;

      /* Shared memory size, as specified either by daemon configuration
       * or PA daemon defaults (~ 64 MiB). */
diff --git a/src/pulsecore/protocol-native.c b/src/pulsecore/protocol-native.c
index 145db04..c6b3ca4 100644
--- a/src/pulsecore/protocol-native.c
+++ b/src/pulsecore/protocol-native.c
@@ -173,6 +173,12 @@ struct pa_native_connection {
      bool is_local:1;
      uint32_t version;
      pa_client *client;
+    /* R/W mempool, one per client connection, for srbchannel transport.
+     * Both server and client can write to this shm area.
+     *
+     * Note: This will be NULL if our connection with the client does
+     * not support srbchannels */
+    pa_mempool *rw_mempool;
      pa_pstream *pstream;
      pa_pdispatch *pdispatch;
      pa_idxset *record_streams, *output_streams;
@@ -1348,6 +1354,12 @@ static void 
native_connection_unlink(pa_native_connection *c) {
      if (c->pstream)
          pa_pstream_unlink(c->pstream);

+    /* This mempool is used exclusively by srbchanels. Do not free it
+     * except after ending all srbchannel communications through the
+     * pstreams unlink above */
+    if (c->rw_mempool)
+        pa_mempool_free(c->rw_mempool);

A general convention for _unlink functions is that they should allow being called more than one time. So all places which free the mempool should also set the variable to NULL.

+
      if (c->auth_timeout_event) {
          c->protocol->core->mainloop->time_free(c->auth_timeout_event);
          c->auth_timeout_event = NULL;
@@ -2606,15 +2618,17 @@ static void setup_srbchannel(pa_native_connection *c) {
          return;
      }

-    if (!c->protocol->core->rw_mempool) {
-        pa_log_debug("Disabling srbchannel, reason: No rw memory pool");

What if c->rw_mempool is already set at this point? Can that happen? If not, add an assertion, if it can (consider a malicious client) ...then handle it somehow :-)

+    if (!(c->rw_mempool = pa_mempool_new(true, c->protocol->core->shm_size))) {
+        pa_log_warn("Disabling srbchannel, reason: Failed to allocate shared "
+                    "writable memory pool.");
          return;
      }
+    pa_mempool_set_is_remote_writable(c->rw_mempool, true);

-    srb = pa_srbchannel_new(c->protocol->core->mainloop, 
c->protocol->core->rw_mempool);
+    srb = pa_srbchannel_new(c->protocol->core->mainloop, c->rw_mempool);
      if (!srb) {
          pa_log_debug("Failed to create srbchannel");
-        return;
+        goto free_rw_mempool;

If this is the only place you need goto, just instead do

        pa_mempool_free(c->rw_mempool);
        c->rw_mempool = NULL;
        return;

      }
      pa_log_debug("Enabling srbchannel...");
      pa_srbchannel_export(srb, &srbt);
@@ -2634,6 +2648,10 @@ static void setup_srbchannel(pa_native_connection *c) {
      pa_pstream_send_memblock(c->pstream, 0, 0, 0, &mc);

      c->srbpending = srb;
+    return;
+
+free_rw_mempool:
+    pa_mempool_free(c->rw_mempool);
  }

  static void command_enable_srbchannel(pa_pdispatch *pd, uint32_t command, 
uint32_t tag, pa_tagstruct *t, void *userdata) {
@@ -5125,6 +5143,8 @@ void pa_native_protocol_connect(pa_native_protocol *p, 
pa_iochannel *io, pa_nati
      c->client->send_event = client_send_event_cb;
      c->client->userdata = c;

+    c->rw_mempool = NULL;

A preferred way nowadays is to instead use pa_xnew0 when creating the struct initially.

+
      c->pstream = pa_pstream_new(p->core->mainloop, io, p->core->mempool);
      pa_pstream_set_receive_packet_callback(c->pstream, 
pstream_packet_callback, c);
      pa_pstream_set_receive_memblock_callback(c->pstream, 
pstream_memblock_callback, c);


Regards,


--
David Henningsson, Canonical Ltd.
https://launchpad.net/~diwic
_______________________________________________
pulseaudio-discuss mailing list
[email protected]
https://lists.freedesktop.org/mailman/listinfo/pulseaudio-discuss

Reply via email to