On Wed, 20 Dec 2023 15:12:15 +0000
Liviu Dudau <[email protected]> wrote:
> > +static int panthor_fw_load_section_entry(struct panthor_device *ptdev,
> > + const struct firmware *fw,
> > + struct panthor_fw_binary_iter *iter,
> > + u32 ehdr)
> > +{
> > + struct panthor_fw_binary_section_entry_hdr hdr;
> > + struct panthor_fw_section *section;
> > + u32 section_size;
> > + u32 name_len;
> > + int ret;
> > +
> > + ret = panthor_fw_binary_iter_read(ptdev, iter, &hdr, sizeof(hdr));
> > + if (ret)
> > + return ret;
> > +
> > + if (hdr.data.end < hdr.data.start) {
> > + drm_err(&ptdev->base, "Firmware corrupted, data.end <
> > data.start (0x%x < 0x%x)\n",
> > + hdr.data.end, hdr.data.start);
> > + return -EINVAL;
> > + }
> > +
> > + if (hdr.va.end < hdr.va.start) {
> > + drm_err(&ptdev->base, "Firmware corrupted, hdr.va.end <
> > hdr.va.start (0x%x < 0x%x)\n",
> > + hdr.va.end, hdr.va.start);
> > + return -EINVAL;
> > + }
> > +
> > + if (hdr.data.end > fw->size) {
> > + drm_err(&ptdev->base, "Firmware corrupted, file truncated?
> > data_end=0x%x > fw size=0x%zx\n",
> > + hdr.data.end, fw->size);
> > + return -EINVAL;
> > + }
> > +
> > + if ((hdr.va.start & ~PAGE_MASK) != 0 ||
> > + (hdr.va.end & ~PAGE_MASK) != 0) {
> > + drm_err(&ptdev->base, "Firmware corrupted, virtual addresses
> > not page aligned: 0x%x-0x%x\n",
> > + hdr.va.start, hdr.va.end);
> > + return -EINVAL;
> > + }
> > +
> > + if (hdr.flags & ~CSF_FW_BINARY_IFACE_ENTRY_RD_SUPPORTED_FLAGS) {
> > + drm_err(&ptdev->base, "Firmware contains interface with
> > unsupported flags (0x%x)\n",
> > + hdr.flags);
> > + return -EINVAL;
> > + }
> > +
> > + if (hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_PROT) {
> > + drm_warn(&ptdev->base,
> > + "Firmware protected mode entry not be supported,
> > ignoring");
> > + return 0;
> > + }
> > +
> > + if (hdr.va.start == CSF_MCU_SHARED_REGION_START &&
> > + !(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED)) {
> > + drm_err(&ptdev->base,
> > + "Interface at 0x%llx must be shared",
> > CSF_MCU_SHARED_REGION_START);
> > + return -EINVAL;
> > + }
> > +
> > + name_len = iter->size - iter->offset;
> > +
> > + section = drmm_kzalloc(&ptdev->base, sizeof(*section), GFP_KERNEL);
> > + if (!section)
> > + return -ENOMEM;
> > +
> > + list_add_tail(§ion->node, &ptdev->fw->sections);
> > + section->flags = hdr.flags;
> > + section->data.size = hdr.data.end - hdr.data.start;
> > +
> > + if (section->data.size > 0) {
> > + void *data = drmm_kmalloc(&ptdev->base, section->data.size,
> > GFP_KERNEL);
> > +
> > + if (!data)
> > + return -ENOMEM;
> > +
> > + memcpy(data, fw->data + hdr.data.start, section->data.size);
> > + section->data.buf = data;
> > + }
> > +
> > + if (name_len > 0) {
> > + char *name = drmm_kmalloc(&ptdev->base, name_len + 1,
> > GFP_KERNEL);
> > +
> > + if (!name)
> > + return -ENOMEM;
> > +
> > + memcpy(name, iter->data + iter->offset, name_len);
> > + name[name_len] = '\0';
> > + section->name = name;
> > + }
> > +
> > + section_size = hdr.va.end - hdr.va.start;
> > + if (section_size) {
> > + u32 cache_mode = hdr.flags &
> > CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_MASK;
> > + struct panthor_gem_object *bo;
> > + u32 vm_map_flags = 0;
> > + struct sg_table *sgt;
> > + u64 va = hdr.va.start;
> > +
> > + if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_WR))
> > + vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_READONLY;
> > +
> > + if (!(hdr.flags & CSF_FW_BINARY_IFACE_ENTRY_RD_EX))
> > + vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_NOEXEC;
> > +
> > + /* TODO: CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_*_COHERENT are
> > mapped to
> > + * non-cacheable for now. We might want to introduce a new
> > + * IOMMU_xxx flag (or abuse IOMMU_MMIO, which maps to device
> > + * memory and is currently not used by our driver) for
> > + * AS_MEMATTR_AARCH64_SHARED memory, so we can take benefit
> > + * of IO-coherent systems.
> > + */
> > + if (cache_mode !=
> > CSF_FW_BINARY_IFACE_ENTRY_RD_CACHE_MODE_CACHED)
> > + vm_map_flags |= DRM_PANTHOR_VM_BIND_OP_MAP_UNCACHED;
> > +
> > + section->mem = panthor_kernel_bo_create(ptdev,
> > panthor_fw_vm(ptdev),
> > + section_size,
> > + DRM_PANTHOR_BO_NO_MMAP,
> > + vm_map_flags, va);
> > + if (IS_ERR(section->mem))
> > + return PTR_ERR(section->mem);
> > +
> > + if (drm_WARN_ON(&ptdev->base, section->mem->va_node.start !=
> > hdr.va.start))
> > + return -EINVAL;
> > +
> > + if (section->flags & CSF_FW_BINARY_IFACE_ENTRY_RD_SHARED) {
> > + ret = panthor_kernel_bo_vmap(section->mem);
> > + if (ret)
> > + return ret;
> > + }
> > +
> > + panthor_fw_init_section_mem(ptdev, section);
> > +
> > + bo = to_panthor_bo(section->mem->obj);
> > + sgt = drm_gem_shmem_get_pages_sgt(&bo->base);
> > + if (IS_ERR(sgt))
> > + return PTR_ERR(section->mem);
>
> I think here we should return PTR_ERR(sgt).
Will fix.
>
> In general I agree with Chris that the list_add_tail() call should be delayed
> until all of the above allocations and preparations have succeeded.
If you don't mind, I'd rather patch panthor_fw_unplug() so it can deal
with partially initialized mem sections than adding an error path to
panthor_fw_load_section_entry().