/* * Copyright (c) Hisilicon Technologies Co., Ltd.. 2018-2019. All rights reserved. * Description: Gralloc * Author: Hisilicon * Created: 2019.11.07 */ #include "gralloc_ion.h" #include #include #include #include #ifdef GRALLOC_NEED_SMMU_MAP #include "ion_ext.h" #endif static pthread_mutex_t g_ion_handle_lock = PTHREAD_MUTEX_INITIALIZER; static unsigned int gralloc_ion_get_heapid(unsigned long long usage) { GRALLOC_IGNORE(usage); #ifdef GRALLOC_NEED_SMMU_MAP return ION_HEAP_SYSTEM_MASK; #else if ((usage & GRALLOC_USAGE_PROTECTED) != 0) { return ION_HEAP_SEC_SMMU_MASK; } return ION_HEAP_SMMU_MASK; #endif } static unsigned int gralloc_ion_get_flags(unsigned long long usage) { if (((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN) || ((usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC1_PRODUCER_USAGE_CPU_READ_OFTEN)) { #ifdef GRALLOC_NEED_SMMU_MAP return ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC; #else return ION_FLAG_CACHED; #endif } return GRALLOC_SUCCESS; } int gralloc_ion_device_open(private_module_t *m) { if (m == nullptr) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } pthread_mutex_lock(&g_ion_handle_lock); if (m->client < 0) { m->client = ion_open(); if (m->client < 0) { GRALLOC_ERROR_INFO(); pthread_mutex_unlock(&g_ion_handle_lock); return GRALLOC_FAILURE; } } pthread_mutex_unlock(&g_ion_handle_lock); return GRALLOC_SUCCESS; } int gralloc_ion_device_close(struct hw_device_t *device) { int ret = 0; gralloc1_device_t *dev = reinterpret_cast(device); if (dev == nullptr) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } private_module_t *m = reinterpret_cast(dev->common.module); if (m == nullptr) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } pthread_mutex_lock(&g_ion_handle_lock); if (m->client > 0) { ret = ion_close(m->client); if (ret < 0) { GRALLOC_ERROR_INFO(); pthread_mutex_unlock(&g_ion_handle_lock); return GRALLOC_FAILURE; } m->client = -1; } pthread_mutex_unlock(&g_ion_handle_lock); return GRALLOC_SUCCESS; } int gralloc_ion_allocate(const private_module_t *m, const buffer_descriptor_t *descriptors, private_internal_handle_t *handle) { int ret; unsigned int flags; unsigned int heaps; unsigned long long usage; int fd = -1; size_t size; #ifdef GRALLOC_NEED_SMMU_MAP ion_user_handle_t hnd = -1; #endif char errbuf[256] = {0}; /* 256 is enough for err string */ if (m == nullptr || handle == nullptr || descriptors == nullptr) { return GRALLOC_FAILURE; } usage = descriptors->producer_usage | descriptors->consumer_usage; size = descriptors->size; heaps = gralloc_ion_get_heapid(usage); flags = gralloc_ion_get_flags(usage); #ifdef GRALLOC_NEED_SMMU_MAP ret = ion_alloc(m->client, size, 0, heaps, flags, &hnd); #else ret = ion_alloc_fd(m->client, size, 0, heaps, flags, &fd); #endif if (ret < 0) { ALOGE("gralloc ion_alloc fail errno:%d %s", errno, strerror_r(errno, errbuf, sizeof(errbuf))); GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } #ifdef GRALLOC_NEED_SMMU_MAP ret = ion_share(m->client, hnd, &fd); if (ret < 0) { GRALLOC_ERROR_INFO(); ion_free(m->client, hnd); if (fd != -1) { close(fd); } handle->hnd = -1; handle->fd = -1; handle->size = 0; return GRALLOC_FAILURE; } handle->hnd = hnd; #endif handle->size = size; handle->fd = fd; return GRALLOC_SUCCESS; } void gralloc_ion_free(const private_module_t *m, private_internal_handle_t *handle) { if (m == nullptr || handle == nullptr) { GRALLOC_ERROR_INFO(); return; } int fd = handle->fd; #ifdef GRALLOC_NEED_SMMU_MAP int ret = 0; ion_user_handle_t hnd = handle->hnd; if (hnd != -1) { ret = ion_free(m->client, hnd); if (ret < 0) { GRALLOC_ERROR_INFO(); return; } handle->hnd = -1; } else { GRALLOC_ERROR_INFO(); return; } #else GRALLOC_IGNORE(m); #endif if (fd != -1) { close(fd); } handle->fd = -1; handle->size = 0; } int gralloc_ion_map(const private_module_t *m, private_internal_handle_t *handle) { char errbuf[256] = {0}; /* 256 is enough for err string */ if (m == nullptr || handle == nullptr) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } int fd = handle->fd; void *base = nullptr; #ifdef GRALLOC_NEED_SMMU_MAP unsigned int addr = (unsigned int)-1; ion_user_handle_t hnd = -1; size_t size = 0; #else size_t size = handle->size; GRALLOC_IGNORE(m); #endif unsigned long long usage = handle->consumer_usage | handle->producer_usage; #ifdef GRALLOC_NEED_SMMU_MAP int ret; ret = ion_import(m->client, fd, &hnd); if (ret != 0) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } ret = ion_map_iommu(m->client, hnd, &addr, &size, (usage & GRALLOC_USAGE_PROTECTED) ? 1 : 0); if (ret != 0) { ALOGE("gralloc ion_alloc fail errno:%d %s", errno, strerror_r(errno, errbuf, sizeof(errbuf))); return GRALLOC_FAILURE; } handle->hnd = hnd; handle->addr = addr; handle->size = size; #endif if (size <= 0) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } if ((handle->flags & private_internal_handle_t::PRIV_FLAGS_USES_ION) && !(usage & GRALLOC_USAGE_PROTECTED)) { base = mmap(nullptr, size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0); if (base == MAP_FAILED) { ALOGE("gralloc ion_alloc fail errno:%d %s", errno, strerror_r(errno, errbuf, sizeof(errbuf))); GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } handle->base = base; } return GRALLOC_SUCCESS; } int gralloc_ion_unmap(const private_module_t *m, private_internal_handle_t *handle) { if (m == nullptr || handle == nullptr) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } #ifdef GRALLOC_NEED_SMMU_MAP int ret; ion_user_handle_t hnd = handle->hnd; #else GRALLOC_IGNORE(m); #endif unsigned long long usage = handle->consumer_usage | handle->producer_usage; if ((handle->flags & private_internal_handle_t::PRIV_FLAGS_USES_ION) && !(usage & GRALLOC_USAGE_PROTECTED) && handle->base != nullptr) { munmap(handle->base, handle->size); handle->base = MAP_FAILED; } #ifdef GRALLOC_NEED_SMMU_MAP if (handle->addr != (unsigned int)-1) { ret = ion_unmap_iommu(m->client, hnd, (usage & GRALLOC_USAGE_PROTECTED) ? 1 : 0); if (ret < 0) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } ret = ion_free(m->client, hnd); if (ret < 0) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } } handle->hnd = -1; handle->addr = (unsigned int)-1; handle->size = 0; #endif return GRALLOC_SUCCESS; } int gralloc_ion_sync(const private_module_t *m, const private_internal_handle_t *handle) { if (m != nullptr && handle != nullptr) { if ((handle->flags & private_internal_handle_t::PRIV_FLAGS_USES_ION) && !(handle->flags & private_internal_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP)) { #ifdef GRALLOC_NEED_SMMU_MAP ion_sync_fd(m->client, handle->fd); #endif } } return GRALLOC_SUCCESS; } int gralloc_dmabuf_sync(const private_internal_handle_t *handle, bool start) { struct dma_buf_sync sync; int ret, retry; /* flush master memory */ if (handle == nullptr) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } if ((handle->flags & private_internal_handle_t::PRIV_FLAGS_USES_ION) && !(handle->flags & private_internal_handle_t::PRIV_FLAGS_USES_ION_DMA_HEAP)) { unsigned long long usage = handle->producer_usage | handle->consumer_usage; bool cpu_read = ((usage & GRALLOC_USAGE_SW_READ_MASK) != 0) ? true : false; bool cpu_write = ((usage & GRALLOC_USAGE_SW_WRITE_MASK) != 0) ? true : false; memset_s(&sync, sizeof(struct dma_buf_sync), 0, sizeof(struct dma_buf_sync)); /* START + READ: it would invalidate cache; END + WRITE: it would flush cache. */ if (start) { sync.flags |= DMA_BUF_SYNC_START; } else { sync.flags |= DMA_BUF_SYNC_END; } if (cpu_read) { sync.flags |= DMA_BUF_SYNC_READ; } if (cpu_write) { sync.flags |= DMA_BUF_SYNC_WRITE; } retry = 5; /* try 5 times */ do { ret = ioctl(handle->fd, DMA_BUF_IOCTL_SYNC, &sync); retry--; } while ((ret == -EAGAIN || ret == -EINTR) && retry); if (ret < 0) { GRALLOC_ERROR_INFO(); return GRALLOC_FAILURE; } } return 0; }