Module: Mesa Branch: main Commit: 8103be7faa4435365f72fcde025d9c1f4b37d0f8 URL: http://cgit.freedesktop.org/mesa/mesa/commit/?id=8103be7faa4435365f72fcde025d9c1f4b37d0f8
Author: LingMan <[email protected]> Date: Sun Nov 5 01:26:13 2023 +0100 rusticl: Directly pass a `&Device` to `Mem::map_image` and `Mem::map_buffer` Until now they took a `&Arc<Queue>` but only ever used it to accessed the `Device`. Makes it clearer what these methods actually require. Reviewed-by: Karol Herbst <[email protected]> Part-of: <https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/26050> --- src/gallium/frontends/rusticl/api/memory.rs | 4 +-- src/gallium/frontends/rusticl/core/memory.rs | 53 +++++++++++++++------------- 2 files changed, 30 insertions(+), 27 deletions(-) diff --git a/src/gallium/frontends/rusticl/api/memory.rs b/src/gallium/frontends/rusticl/api/memory.rs index 9fd77bfe806..16d79b32a1c 100644 --- a/src/gallium/frontends/rusticl/api/memory.rs +++ b/src/gallium/frontends/rusticl/api/memory.rs @@ -1659,7 +1659,7 @@ fn enqueue_map_buffer( return Err(CL_INVALID_CONTEXT); } - let ptr = b.map_buffer(&q, offset, size)?; + let ptr = b.map_buffer(q.device, offset, size)?; create_and_queue( q, CL_COMMAND_MAP_BUFFER, @@ -2139,7 +2139,7 @@ fn enqueue_map_image( }; let ptr = i.map_image( - &q, + q.device, &origin, ®ion, unsafe { image_row_pitch.as_mut().unwrap() }, diff --git a/src/gallium/frontends/rusticl/core/memory.rs b/src/gallium/frontends/rusticl/core/memory.rs index 8e17b9deb6d..49acefc2e4e 100644 --- a/src/gallium/frontends/rusticl/core/memory.rs +++ b/src/gallium/frontends/rusticl/core/memory.rs @@ -557,18 +557,18 @@ impl Mem { fn tx_raw_async( &self, - q: &Arc<Queue>, + dev: &Device, rw: RWFlags, ) -> CLResult<(PipeTransfer, Option<PipeResource>)> { let mut offset = 0; let b = self.to_parent(&mut offset); - let r = b.get_res()?.get(&q.device).unwrap(); + let r = b.get_res()?.get(dev).unwrap(); let size = self.size.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?; - let ctx = q.device.helper_ctx(); + let ctx = dev.helper_ctx(); assert!(self.is_buffer()); - let tx = if can_map_directly(q.device, r) { + let tx = if can_map_directly(dev, r) { ctx.buffer_map_directly( r, offset.try_into().map_err(|_| CL_OUT_OF_HOST_MEMORY)?, @@ -582,8 +582,7 @@ impl Mem { if let Some(tx) = tx { Ok((tx, None)) } else { - let shadow = q - .device + let shadow = dev .screen() .resource_create_buffer(size as u32, ResourceType::Staging) .ok_or(CL_OUT_OF_RESOURCES)?; @@ -621,16 +620,16 @@ impl Mem { fn tx_image_raw_async( &self, - q: &Arc<Queue>, + dev: &Device, bx: &pipe_box, rw: RWFlags, ) -> CLResult<(PipeTransfer, Option<PipeResource>)> { assert!(!self.is_buffer()); - let r = self.get_res()?.get(q.device).unwrap(); - let ctx = q.device.helper_ctx(); + let r = self.get_res()?.get(dev).unwrap(); + let ctx = dev.helper_ctx(); - let tx = if can_map_directly(q.device, r) { + let tx = if can_map_directly(dev, r) { ctx.texture_map_directly(r, bx, rw) } else { None @@ -639,8 +638,7 @@ impl Mem { if let Some(tx) = tx { Ok((tx, None)) } else { - let shadow = q - .device + let shadow = dev .screen() .resource_create_texture( r.width(), @@ -1193,34 +1191,39 @@ impl Mem { /// the content behind the returned pointer is valid until unmapped. fn map<'a>( &self, - q: &Arc<Queue>, + dev: &'static Device, lock: &'a mut MutexGuard<Mappings>, rw: RWFlags, ) -> CLResult<&'a PipeTransfer> { - if let Entry::Vacant(e) = lock.tx.entry(q.device) { + if let Entry::Vacant(e) = lock.tx.entry(&dev) { let (tx, res) = if self.is_buffer() { - self.tx_raw_async(q, rw)? + self.tx_raw_async(dev, rw)? } else { let bx = self.image_desc.bx()?; - self.tx_image_raw_async(q, &bx, rw)? + self.tx_image_raw_async(dev, &bx, rw)? }; e.insert(MappingTransfer::new(tx, res)); } else { - lock.mark_pending(q.device); + lock.mark_pending(dev); } - Ok(&lock.tx.get_mut(&q.device).unwrap().tx) + Ok(&lock.tx.get_mut(dev).unwrap().tx) } - pub fn map_buffer(&self, q: &Arc<Queue>, offset: usize, _size: usize) -> CLResult<*mut c_void> { + pub fn map_buffer( + &self, + dev: &'static Device, + offset: usize, + _size: usize, + ) -> CLResult<*mut c_void> { assert!(self.is_buffer()); let mut lock = self.maps.lock().unwrap(); - let ptr = if self.has_user_shadow_buffer(q.device)? { + let ptr = if self.has_user_shadow_buffer(dev)? { self.host_ptr } else { - let tx = self.map(q, &mut lock, RWFlags::RW)?; + let tx = self.map(dev, &mut lock, RWFlags::RW)?; tx.ptr() }; @@ -1230,7 +1233,7 @@ impl Mem { pub fn map_image( &self, - q: &Arc<Queue>, + dev: &'static Device, origin: &CLVec<usize>, _region: &CLVec<usize>, row_pitch: &mut usize, @@ -1241,18 +1244,18 @@ impl Mem { let mut lock = self.maps.lock().unwrap(); // we might have a host_ptr shadow buffer or image created from buffer - let ptr = if self.has_user_shadow_buffer(q.device)? || self.is_parent_buffer() { + let ptr = if self.has_user_shadow_buffer(dev)? || self.is_parent_buffer() { *row_pitch = self.image_desc.image_row_pitch; *slice_pitch = self.image_desc.image_slice_pitch; if let Some(src) = &self.parent { - let tx = src.map(q, &mut lock, RWFlags::RW)?; + let tx = src.map(dev, &mut lock, RWFlags::RW)?; tx.ptr() } else { self.host_ptr } } else { - let tx = self.map(q, &mut lock, RWFlags::RW)?; + let tx = self.map(dev, &mut lock, RWFlags::RW)?; if self.image_desc.dims() > 1 { *row_pitch = tx.row_pitch() as usize;
