diff --git a/pycuda/curandom.py b/pycuda/curandom.py
index 324e5ce..2dc5d8c 100644
--- a/pycuda/curandom.py
+++ b/pycuda/curandom.py
@@ -7,7 +7,6 @@ import pycuda.gpuarray as array
 
 
 
-
 # {{{ MD5-based random number generation
 
 md5_code = """
@@ -244,6 +243,13 @@ def rand(shape, dtype=np.float32, stream=None):
 
 # {{{ CURAND wrapper
 
+import pycuda._curand as _curand
+get_curand_version = _curand.get_curand_version
+
+if get_curand_version() >= (3, 2, 0):
+    direction_vector_set = _curand.direction_vector_set
+    get_direction_vectors32 = _curand.get_direction_vectors32
+
 # {{{ Base class
 
 gen_template = """
@@ -260,19 +266,19 @@ random_source = """
 // Uses C++ features (templates); do not surround with extern C
 #include <curand_kernel.h>
 
-extern "C" 
+extern "C"
 {
 
 %(generators)s
 
-__global__ void skip_ahead(%(state_type)s *s, const int n, const int skip) 
+__global__ void skip_ahead(%(state_type)s *s, const int n, const int skip)
 {
   const int idx = blockIdx.x*blockDim.x+threadIdx.x;
   if (idx < n)
     skipahead(skip, &s[idx]);
 }
 
-__global__ void skip_ahead_array(%(state_type)s *s, const int n, const int *skip) 
+__global__ void skip_ahead_array(%(state_type)s *s, const int n, const int *skip)
 {
   const int idx = blockIdx.x*blockDim.x+threadIdx.x;
   if (idx < n)
@@ -302,7 +308,7 @@ class _RandomNumberGeneratorBase(object):
         ]
 
     def __init__(self, state_type, additional_source):
-        if drv.get_version() < (3, 2, 0):
+        if get_curand_version() < (3, 2, 0):
             raise EnvironmentError("Need at least CUDA 3.2")
 
         # Max 256 threads on ION during preparing
@@ -349,8 +355,8 @@ class _RandomNumberGeneratorBase(object):
                 if do_generate(out_type)]
 
         generator_sources = [
-                gen_template % { 
-                    "name":name, "out_type":out_type, "suffix": suffix, 
+                gen_template % {
+                    "name":name, "out_type":out_type, "suffix": suffix,
                     "state_type": state_type, }
                 for name, out_type, suffix in my_generators]
 
@@ -425,10 +431,19 @@ class _RandomNumberGeneratorBase(object):
 
 # {{{ XORWOW RNG
 
+def seed_getter_uniform(N):
+    result = pycuda.gpuarray.empty([N], numpy.int32)
+    value = random.randint(0, 2**31-1)
+    return result.fill(value)
+
+def seed_getter_unique(N):
+    result = numpy.random.randint(0, 2**31-1, N).astype(numpy.int32)
+    return pycuda.gpuarray.to_gpu(result)
+
 xorwow_random_source = """
 extern "C" {
 __global__ void prepare_with_seeds(curandState *s, const int n,
-  const int *seed, const int offset) 
+  const int *seed, const int offset)
 {
   const int id = blockIdx.x*blockDim.x+threadIdx.x;
   if (id < n)
@@ -438,58 +453,60 @@ __global__ void prepare_with_seeds(curandState *s, const int n,
 }
 """
 
-class XORWOWRandomNumberGenerator(_RandomNumberGeneratorBase):
-    has_box_muller = True
 
-    def __init__(self, seed_getter=None, offset=0):
-        """
-        :arg seed_getter: a function that, given an integer count, will yield an `int32` 
-          :class:`GPUArray` of seeds.
-        """
+if get_curand_version() >= (3, 2, 0):
+    class XORWOWRandomNumberGenerator(_RandomNumberGeneratorBase):
+        has_box_muller = True
 
-        super(XORWOWRandomNumberGenerator, self).__init__(
-            'curandStateXORWOW', xorwow_random_source)
+        def __init__(self, seed_getter=None, offset=0):
+            """
+            :arg seed_getter: a function that, given an integer count, will yield an `int32`
+              :class:`GPUArray` of seeds.
+            """
 
-        if seed_getter is None:
-            seed = array.to_gpu(
-                    np.asarray(
-                        np.random.random_integers(
-                            0, (1 << 31) - 1, self.generators_per_block), 
-                        dtype=np.int32))
-        else:
-            seed = seed_getter(self.generators_per_block)
+            super(XORWOWRandomNumberGenerator, self).__init__(
+                'curandStateXORWOW', xorwow_random_source)
 
-        if not (isinstance(seed, pycuda.gpuarray.GPUArray)
-                and seed.dtype == np.int32
-                and seed.size == self.generators_per_block):
-            raise TypeError("seed must be GPUArray of integers of right length")
+            if seed_getter is None:
+                seed = array.to_gpu(
+                        np.asarray(
+                            np.random.random_integers(
+                                0, (1 << 31) - 1, self.generators_per_block),
+                            dtype=np.int32))
+            else:
+                seed = seed_getter(self.generators_per_block)
 
-        p = self.module.get_function("prepare_with_seeds")
-        p.prepare("PiPi", block=(self.generators_per_block, 1, 1))
+            if not (isinstance(seed, pycuda.gpuarray.GPUArray)
+                    and seed.dtype == np.int32
+                    and seed.size == self.generators_per_block):
+                raise TypeError("seed must be GPUArray of integers of right length")
 
-        from pycuda.characterize import has_stack
-        has_stack = has_stack()
+            p = self.module.get_function("prepare_with_seeds")
+            p.prepare("PiPi", block=(self.generators_per_block, 1, 1))
 
-        if has_stack:
-            prev_stack_size = drv.Context.get_limit(drv.limit.STACK_SIZE)
+            from pycuda.characterize import has_stack
+            has_stack = has_stack()
 
-        try:
             if has_stack:
-                drv.Context.set_limit(drv.limit.STACK_SIZE, 1<<14) # 16k
+                prev_stack_size = drv.Context.get_limit(drv.limit.STACK_SIZE)
+
             try:
-                dev = drv.Context.get_device()
-                if dev.compute_capability() >= (2, 0):
-                    p.prepared_call((self.block_count, 1), self.state,
-                        self.block_count * self.generators_per_block, seed.gpudata, offset)
-                else:
-                    p.prepared_call((2 * self.block_count, 1), self.state,
-                        self.block_count * self.generators_per_block // 2, seed.gpudata, offset)
-            except drv.LaunchError:
-                raise ValueError("Initialisation failed. Decrease number of threads.")
-
-        finally:
-            if has_stack:
-                drv.Context.set_limit(drv.limit.STACK_SIZE, prev_stack_size)
+                if has_stack:
+                    drv.Context.set_limit(drv.limit.STACK_SIZE, 1<<14) # 16k
+                try:
+                    dev = drv.Context.get_device()
+                    if dev.compute_capability() >= (2, 0):
+                        p.prepared_call((self.block_count, 1), self.state,
+                            self.block_count * self.generators_per_block, seed.gpudata, offset)
+                    else:
+                        p.prepared_call((2 * self.block_count, 1), self.state,
+                            self.block_count * self.generators_per_block // 2, seed.gpudata, offset)
+                except drv.LaunchError:
+                    raise ValueError("Initialisation failed. Decrease number of threads.")
+
+            finally:
+                if has_stack:
+                    drv.Context.set_limit(drv.limit.STACK_SIZE, prev_stack_size)
 
 # }}}
 
@@ -497,58 +514,66 @@ class XORWOWRandomNumberGenerator(_RandomNumberGeneratorBase):
 
 sobol32_random_source = """
 extern "C" {
-__global__ void prepare(curandStateSobol32 *s, const int n, unsigned int *v,
-    const unsigned int o) 
+__global__ void prepare(curandStateSobol32 *s, const int n, unsigned int **v,
+    const unsigned int o)
 {
   const int id = blockIdx.x*blockDim.x+threadIdx.x;
   if (id < n)
-    curand_init(v, o, &s[id]);
+    curand_init(v[id], o, &s[id]);
 }
 }
 """
 
-class Sobol32RandomNumberGenerator(_RandomNumberGeneratorBase):
-    """
-    Class surrounding CURAND kernels from CUDA 3.2.
-    It allows for generating quasi-random numbers with uniform
-    and normal probability function of type int, float, and double.
-    """
+if get_curand_version() >= (3, 2, 0):
+    class Sobol32RandomNumberGenerator(_RandomNumberGeneratorBase):
+        """
+        Class surrounding CURAND kernels from CUDA 3.2.
+        It allows for generating quasi-random numbers with uniform
+        and normal probability function of type int, float, and double.
+        """
 
-    has_box_muller = False
+        has_box_muller = False
 
-    def __init__(self, dir_vector, offset):
-        super(Sobol32RandomNumberGenerator, self).__init__('curandStateSobol32',
-            sobol32_random_source)
+        def __init__(self, dir_vector=None, offset=0):
+            super(Sobol32RandomNumberGenerator, self).__init__('curandStateSobol32',
+                sobol32_random_source)
 
-        raise NotImplementedError("not working yet")
+            if dir_vector is None:
+                dir_vector = generate_direction_vectors(
+                    self.block_count * self.generators_per_block)
 
-        p = self.module.get_function("prepare")
-        p.prepare("PiPi", block=(self.generators_per_block, 1, 1))
+            if not (isinstance(dir_vector, pycuda.gpuarray.GPUArray)
+                    and dir_vector.dtype == np.int32
+                    and dir_vector.shape == (self.block_count * self.generators_per_block, 32)):
+                raise TypeError("seed must be GPUArray of integers of right length")
 
-        from pycuda.characterize import has_stack
-        has_stack = has_stack()
+            p = self.module.get_function("prepare")
+            p.prepare("PiPi", block=(self.generators_per_block, 1, 1))
 
-        if has_stack:
-            prev_stack_size = drv.Context.get_limit(drv.limit.STACK_SIZE)
+            from pycuda.characterize import has_stack
+            has_stack = has_stack()
 
-        try:
             if has_stack:
-                drv.Context.set_limit(drv.limit.STACK_SIZE, 1<<14) # 16k
-            try:
+                prev_stack_size = drv.Context.get_limit(drv.limit.STACK_SIZE)
 
-                dev = drv.Context.get_device()
-                if dev.compute_capability() >= (2, 0):
-                    p.prepared_call((self.block_count, 1), self.state,
-                        self.block_count * self.generators_per_block, vector, offset)
-                else:
-                    p.prepared_call((2 * self.block_count, 1), self.state,
-                        self.block_count * self.generators_per_block // 2, vector, offset)
-            except drv.LaunchError:
-                raise ValueError("Initialisation failed. Decrease number of threads.")
-
-        finally:
-            if has_stack:
-                drv.Context.set_limit(drv.limit.STACK_SIZE, prev_stack_size)
+            try:
+                if has_stack:
+                    drv.Context.set_limit(drv.limit.STACK_SIZE, 1<<14) # 16k
+                try:
+
+                    dev = drv.Context.get_device()
+                    if dev.compute_capability() >= (2, 0):
+                        p.prepared_call((self.block_count, 1), self.state,
+                            self.block_count * self.generators_per_block, vector, offset)
+                    else:
+                        p.prepared_call((2 * self.block_count, 1), self.state,
+                            self.block_count * self.generators_per_block // 2, vector, offset)
+                except drv.LaunchError:
+                    raise ValueError("Initialisation failed. Decrease number of threads.")
+
+            finally:
+                if has_stack:
+                    drv.Context.set_limit(drv.limit.STACK_SIZE, prev_stack_size)
 
 # }}}
 
diff --git a/setup.py b/setup.py
index 517f127..aeb0b26 100644
--- a/setup.py
+++ b/setup.py
@@ -20,6 +20,7 @@ def get_config_schema():
         IncludeDir("CUDA", None),
 
         Switch("CUDA_ENABLE_GL", False, "Enable CUDA GL interoperability"),
+        Switch("CUDA_ENABLE_CURAND", True, "Enable CURAND library"),
 
         LibraryDir("CUDADRV", []),
         Libraries("CUDADRV", ["cuda"]),
@@ -245,6 +246,9 @@ def main():
         EXTRA_SOURCES.append("src/wrapper/wrap_cudagl.cpp")
         EXTRA_DEFINES["HAVE_GL"] = 1
 
+    if conf["CUDA_ENABLE_CURAND"]:
+        EXTRA_DEFINES["HAVE_CURAND"] = 1
+
     ver_dic = {}
     exec(compile(open("pycuda/__init__.py").read(), "pycuda/__init__.py", 'exec'), ver_dic)
 
@@ -316,7 +320,7 @@ def main():
             packages=["pycuda", "pycuda.gl", "pycuda.sparse"],
 
             install_requires=[
-                "pytools>=8",
+                "pytools>=11",
                 "py>=1.0.0b7",
                 "decorator>=3.2.0"
                 ],
@@ -339,6 +343,13 @@ def main():
                     ),
                 Extension("_pvt_struct",
                     ["src/wrapper/_pycuda_struct.c"],
+                    ),
+                Extension("_curand",
+                    ["src/wrapper/wrap_curand.cpp"],
+                    include_dirs=INCLUDE_DIRS + EXTRA_INCLUDE_DIRS,
+                    library_dirs=LIBRARY_DIRS + conf["CUDADRV_LIB_DIR"],
+                    libraries=LIBRARIES + ["curand"] + conf["CUDADRV_LIBNAME"],
+                    define_macros=list(EXTRA_DEFINES.items()),
                     )],
 
             data_files=[
diff --git a/src/cpp/curand.hpp b/src/cpp/curand.hpp
new file mode 100644
index 0000000..e025b0c
--- /dev/null
+++ b/src/cpp/curand.hpp
@@ -0,0 +1,34 @@
+#ifndef _AFJDFJSDFSD_PYCUDA_HEADER_SEEN_CURAND_HPP
+#define _AFJDFJSDFSD_PYCUDA_HEADER_SEEN_CURAND_HPP
+
+
+#if CUDAPP_CUDA_VERSION >= 3020
+#include <curand.h>
+#endif
+
+
+namespace pycuda { namespace curandom {
+
+  py::tuple py_curand_version()
+  {
+    int version = 0;
+#if CUDAPP_CUDA_VERSION >= 3020
+    curandGetVersion(&version);
+#endif
+    return py::make_tuple(
+        version / 1000,
+        (version % 1000)/10,
+        version % 10);
+  }
+
+#if CUDAPP_CUDA_VERSION >= 3020
+  void py_curand_get_direction_vectors32(curandDirectionVectors32_t *vectors[],
+      curandDirectionVectorSet_t set)
+// TODO: checking; cannot use CUDAPP_CALL_GUARDED because function returns CURAND enum
+  { curandGetDirectionVectors32(vectors, set); }
+#endif
+
+} }
+
+#endif
+
diff --git a/src/wrapper/wrap_curand.cpp b/src/wrapper/wrap_curand.cpp
new file mode 100644
index 0000000..71adf57
--- /dev/null
+++ b/src/wrapper/wrap_curand.cpp
@@ -0,0 +1,36 @@
+#include <cuda.hpp>
+#include <curand.hpp>
+
+#include "tools.hpp"
+#include "wrap_helpers.hpp"
+
+#if CUDAPP_CUDA_VERSION >= 3020
+#include <curand.h>
+#endif
+
+using namespace pycuda;
+using namespace pycuda::curandom;
+
+void pycuda_expose_curand()
+{
+  using py::arg;
+  using py::args;
+
+#if CUDAPP_CUDA_VERSION >= 3020
+  py::enum_<curandDirectionVectorSet_t>("direction_vector_set")
+    .value("VECTOR_32", CURAND_DIRECTION_VECTORS_32_JOEKUO6)
+  ;
+#endif
+
+  py::def("get_curand_version", py_curand_version);
+
+#if CUDAPP_CUDA_VERSION >= 3020
+  py::def("get_direction_vectors32", py_curand_get_direction_vectors32, (arg("vectors"), arg("set")));
+#endif
+}
+
+BOOST_PYTHON_MODULE(_curand)
+{
+  pycuda_expose_curand();
+}
+
