zoukankan      html  css  js  c++  java
  • [转]vfio 通过VFIO_IOMMU_MAP_DMA 调用iommu_map过程

    转自 https://blog.csdn.net/tiantao2012/article/details/69944664

    被暴露的设备,通过vfio_group的形式暴露给用户程序。这个应用过程大致是这样的:

    container = open("/dev/vfio/vfio", O_RDWR);
    group = open("/dev/vfio/26", O_RDWR);
    ioctl(group, VFIO_GROUP_SET_CONTAINER, &container);
    ioctl(container, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
    ioctl(container, VFIO_IOMMU_MAP_DMA, &dma_map);
    

    很明显,这个概念空间包括container和group两个概念,vfio_container是访问的上下文,vfio_group是vfio对iommu_group的表述。

    [vfio_iommu_type1.c]

      1 static long vfio_iommu_type1_ioctl(void *iommu_data,
      2                    unsigned int cmd, unsigned long arg)
      3 {
      4     struct vfio_iommu *iommu = iommu_data;
      5     unsigned long minsz;
      6 
      7     if (cmd == VFIO_CHECK_EXTENSION) {
      8         switch (arg) {
      9         case VFIO_TYPE1_IOMMU:
     10         case VFIO_TYPE1v2_IOMMU:
     11         case VFIO_TYPE1_NESTING_IOMMU:
     12             return 1;
     13         case VFIO_DMA_CC_IOMMU:
     14             if (!iommu)
     15                 return 0;
     16             return vfio_domains_have_iommu_cache(iommu);
     17         default:
     18             return 0;
     19         }
     20     } else if (cmd == VFIO_IOMMU_GET_INFO) {
     21         struct vfio_iommu_type1_info info;
     22         struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
     23         unsigned long capsz;
     24         int ret;
     25 
     26         minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes);
     27 
     28         /* For backward compatibility, cannot require this */
     29         capsz = offsetofend(struct vfio_iommu_type1_info, cap_offset);
     30 
     31         if (copy_from_user(&info, (void __user *)arg, minsz))
     32             return -EFAULT;
     33 
     34         if (info.argsz < minsz)
     35             return -EINVAL;
     36 
     37         if (info.argsz >= capsz) {
     38             minsz = capsz;
     39             info.cap_offset = 0; /* output, no-recopy necessary */
     40         }
     41 
     42         info.flags = VFIO_IOMMU_INFO_PGSIZES;
     43 
     44         info.iova_pgsizes = vfio_pgsize_bitmap(iommu);
     45 
     46         ret = vfio_iommu_iova_build_caps(iommu, &caps);
     47         if (ret)
     48             return ret;
     49 
     50         if (caps.size) {
     51             info.flags |= VFIO_IOMMU_INFO_CAPS;
     52 
     53             if (info.argsz < sizeof(info) + caps.size) {
     54                 info.argsz = sizeof(info) + caps.size;
     55             } else {
     56                 vfio_info_cap_shift(&caps, sizeof(info));
     57                 if (copy_to_user((void __user *)arg +
     58                         sizeof(info), caps.buf,
     59                         caps.size)) {
     60                     kfree(caps.buf);
     61                     return -EFAULT;
     62                 }
     63                 info.cap_offset = sizeof(info);
     64             }
     65 
     66             kfree(caps.buf);
     67         }
     68 
     69         return copy_to_user((void __user *)arg, &info, minsz) ?
     70             -EFAULT : 0;
     71 
     72     } else if (cmd == VFIO_IOMMU_MAP_DMA) {
     73         struct vfio_iommu_type1_dma_map map;
     74         uint32_t mask = VFIO_DMA_MAP_FLAG_READ |
     75                 VFIO_DMA_MAP_FLAG_WRITE;
     76 
     77         minsz = offsetofend(struct vfio_iommu_type1_dma_map, size);
     78 
     79         if (copy_from_user(&map, (void __user *)arg, minsz))
     80             return -EFAULT;
     81 
     82         if (map.argsz < minsz || map.flags & ~mask)
     83             return -EINVAL;
     84 
     85         return vfio_dma_do_map(iommu, &map);
     86 
     87     } else if (cmd == VFIO_IOMMU_UNMAP_DMA) {
     88         struct vfio_iommu_type1_dma_unmap unmap;
     89         long ret;
     90 
     91         minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size);
     92 
     93         if (copy_from_user(&unmap, (void __user *)arg, minsz))
     94             return -EFAULT;
     95 
     96         if (unmap.argsz < minsz || unmap.flags)
     97             return -EINVAL;
     98 
     99         ret = vfio_dma_do_unmap(iommu, &unmap);
    100         if (ret)
    101             return ret;
    102 
    103         return copy_to_user((void __user *)arg, &unmap, minsz) ?
    104             -EFAULT : 0;
    105     }
    106 
    107     return -ENOTTY;
    108 }

    vfio_dma

     1 struct vfio_dma {
     2     struct rb_node        node;
     3     dma_addr_t        iova;        /* Device address */
     4     unsigned long        vaddr;        /* Process virtual addr */
     5     size_t            size;        /* Map size (bytes) */
     6     int            prot;        /* IOMMU_READ/WRITE */
     7     bool            iommu_mapped;
     8     bool            lock_cap;    /* capable(CAP_IPC_LOCK) */
     9     struct task_struct    *task;
    10     struct rb_root        pfn_list;    /* Ex-user pinned pfn list */
    11 };

    vfio_dma_do_map

     1 static int vfio_dma_do_map(struct vfio_iommu *iommu,
     2                struct vfio_iommu_type1_dma_map *map)
     3 {
     4     dma_addr_t iova = map->iova;
     5     unsigned long vaddr = map->vaddr;
     6     size_t size = map->size;
     7     int ret = 0, prot = 0;
     8     uint64_t mask;
     9     struct vfio_dma *dma;
    10         ......
    11     if (vfio_find_dma(iommu, iova, size)) {
    12         ret = -EEXIST;
    13         goto out_unlock;
    14     }
    15         ......
    16         if (!vfio_iommu_iova_dma_valid(iommu, iova, iova + size - 1)) {
    17         ret = -EINVAL;
    18         goto out_unlock;
    19     }
    20         ......
    21     iommu->dma_avail--;
    22     dma->iova = iova;
    23     dma->vaddr = vaddr;
    24     dma->prot = prot;
    25         
    26         ........
    27         /* Insert zero-sized and grow as we map chunks of it */
    28     vfio_link_dma(iommu, dma);
    29 
    30     /* Don't pin and map if container doesn't contain IOMMU capable domain*/
    31     if (!IS_IOMMU_CAP_DOMAIN_IN_CONTAINER(iommu))
    32         dma->size = size;
    33     else
    34         ret = vfio_pin_map_dma(iommu, dma, size);
    35         .....
    36 }

    vfio_iommu_iova_dma_valid()

     1 /*
     2  * Check dma map request is within a valid iova range
     3  */
     4 static bool vfio_iommu_iova_dma_valid(struct vfio_iommu *iommu,
     5                       dma_addr_t start, dma_addr_t end)
     6 {
     7     struct list_head *iova = &iommu->iova_list;
     8     struct vfio_iova *node;
     9 
    10     list_for_each_entry(node, iova, list) {
    11         if (start >= node->start && end <= node->end)
    12             return true;
    13     }
    15     /*
    16      * Check for list_empty() as well since a container with
    17      * a single mdev device will have an empty list.
    18      */
    19     return list_empty(iova);
    20 }

    vfio_pin_map_dma(struct vfio_iommu *iommu, struct vfio_dma *dma,size_t map_size)  //需要map_size大小

    ---------vfio_iommu_map(iommu, iova + dma->size, pfn, npage,dma->prot)

    ------------------iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, npage << PAGE_SHIFT, prot | d->prot)

    --------------------------__iommu_map(domain, iova, paddr, size, prot, GFP_KERNEL)

     1 int __iommu_map(struct iommu_domain *domain, unsigned long iova,
     2           phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
     3 {
     4     const struct iommu_ops *ops = domain->ops;
     5     unsigned long orig_iova = iova;
     6     unsigned int min_pagesz;
     7     size_t orig_size = size;
     8     phys_addr_t orig_paddr = paddr;
     9     int ret = 0;
    10 
    11     if (unlikely(ops->map == NULL ||
    12              domain->pgsize_bitmap == 0UL))
    13         return -ENODEV;
    14 
    15     if (unlikely(!(domain->type & __IOMMU_DOMAIN_PAGING)))
    16         return -EINVAL;
    17 
    18     /* find out the minimum page size supported */
    19     min_pagesz = 1 << __ffs(domain->pgsize_bitmap);
    20 
    21     /*
    22      * both the virtual address and the physical one, as well as
    23      * the size of the mapping, must be aligned (at least) to the
    24      * size of the smallest page supported by the hardware
    25      */
    26     if (!IS_ALIGNED(iova | paddr | size, min_pagesz)) {
    27         pr_err("unaligned: iova 0x%lx pa %pa size 0x%zx min_pagesz 0x%x
    ",
    28                iova, &paddr, size, min_pagesz);
    29         return -EINVAL;
    30     }
    31 
    32     pr_debug("map: iova 0x%lx pa %pa size 0x%zx
    ", iova, &paddr, size);
    33 
    34     while (size) {
    35         size_t pgsize = iommu_pgsize(domain, iova | paddr, size);
    36 
    37         pr_debug("mapping: iova 0x%lx pa %pa pgsize 0x%zx
    ",
    38              iova, &paddr, pgsize);
    39         ret = ops->map(domain, iova, paddr, pgsize, prot, gfp);
    40 
    41         if (ret)
    42             break;
    43 
    44         iova += pgsize;
    45         paddr += pgsize;
    46         size -= pgsize;
    47     }
    48 
    49     if (ops->iotlb_sync_map)
    50         ops->iotlb_sync_map(domain);
    51 
    52     /* unroll mapping in case something went wrong */
    53     if (ret)
    54         iommu_unmap(domain, orig_iova, orig_size - size);
    55     else
    56         trace_map(orig_iova, orig_paddr, orig_size);
    57 
    58     return ret;
    59 }
     1 struct iommu_ops {
     2     bool (*capable)(enum iommu_cap);
     3 
     4     /* Domain allocation and freeing by the iommu driver */
     5     struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
     6     void (*domain_free)(struct iommu_domain *);
     7 
     8     int (*attach_dev)(struct iommu_domain *domain, struct device *dev);
     9     void (*detach_dev)(struct iommu_domain *domain, struct device *dev);
    10     int (*map)(struct iommu_domain *domain, unsigned long iova,
    11            phys_addr_t paddr, size_t size, int prot, gfp_t gfp); //map a physically contiguous memory region to an iommu domain
    12     size_t (*unmap)(struct iommu_domain *domain, unsigned long iova,
    13              size_t size, struct iommu_iotlb_gather *iotlb_gather);
    14     void (*flush_iotlb_all)(struct iommu_domain *domain);
    15     void (*iotlb_sync_map)(struct iommu_domain *domain);
    16     void (*iotlb_sync)(struct iommu_domain *domain,struct iommu_iotlb_gather *iotlb_gather);
    18     phys_addr_t (*iova_to_phys)(struct iommu_domain *domain, dma_addr_t iova);
    19     int (*add_device)(struct device *dev);
    20     void (*remove_device)(struct device *dev);
    21     struct iommu_group *(*device_group)(struct device *dev);
    22     int (*domain_get_attr)(struct iommu_domain *domain,enum iommu_attr attr, void *data);
    24     int (*domain_set_attr)(struct iommu_domain *domain,enum iommu_attr attr, void *data);
    26 
    27     /* Request/Free a list of reserved regions for a device */
    28     void (*get_resv_regions)(struct device *dev, struct list_head *list);
    29     void (*put_resv_regions)(struct device *dev, struct list_head *list);
    30     void (*apply_resv_region)(struct device *dev,struct iommu_domain *domain,struct iommu_resv_region *region);
    33 
    34     /* Window handling functions */
    35     int (*domain_window_enable)(struct iommu_domain *domain, u32 wnd_nr,phys_addr_t paddr, u64 size, int prot);
    37     void (*domain_window_disable)(struct iommu_domain *domain, u32 wnd_nr);
    38 
    39     int (*of_xlate)(struct device *dev, struct of_phandle_args *args);
    40     bool (*is_attach_deferred)(struct iommu_domain *domain, struct device *dev);
    41 
    42     /* Per device IOMMU features */
    43     bool (*dev_has_feat)(struct device *dev, enum iommu_dev_features f);
    44     bool (*dev_feat_enabled)(struct device *dev, enum iommu_dev_features f);
    45     int (*dev_enable_feat)(struct device *dev, enum iommu_dev_features f);
    46     int (*dev_disable_feat)(struct device *dev, enum iommu_dev_features f);
    47 
    48     /* Aux-domain specific attach/detach entries */
    49     int (*aux_attach_dev)(struct iommu_domain *domain, struct device *dev);
    50     void (*aux_detach_dev)(struct iommu_domain *domain, struct device *dev);
    51     int (*aux_get_pasid)(struct iommu_domain *domain, struct device *dev);
    52 
    53     struct iommu_sva *(*sva_bind)(struct device *dev, struct mm_struct *mm,void *drvdata);
    55     void (*sva_unbind)(struct iommu_sva *handle);
    56     int (*sva_get_pasid)(struct iommu_sva *handle);
    57 
    58     int (*page_response)(struct device *dev,struct iommu_fault_event *evt,struct iommu_page_response *msg);
    61     int (*cache_invalidate)(struct iommu_domain *domain, struct device *dev,struct iommu_cache_invalidate_info *inv_info);
    63     int (*sva_bind_gpasid)(struct iommu_domain *domain,struct device *dev, struct iommu_gpasid_bind_data *data);
    65 
    66     int (*sva_unbind_gpasid)(struct device *dev, int pasid);
    67 
    68     unsigned long pgsize_bitmap;
    69     struct module *owner;
    70 };

    对于arm-smmu_v3.c

    arm_smmu_device_probe(struct platform_device *pdev)

    -------ret = arm_smmu_device_hw_probe(smmu);

    ------.....

    ------iommu_device_sysfs_add(&smmu->iommu, dev, NULL,''smmu3.%pa",&ioaddr);

    ------iommu_device_set_ops(&smmu->iommu, &arm_smmu_ops);  //arm_smmu_op 在 probe smmu 硬件设备的后注册进去

     1 static struct iommu_ops arm_smmu_ops = {
     2     .capable        = arm_smmu_capable,
     3     .domain_alloc        = arm_smmu_domain_alloc,
     4     .domain_free        = arm_smmu_domain_free,
     5     .attach_dev        = arm_smmu_attach_dev,
     6     .map            = arm_smmu_map,
     7     .unmap            = arm_smmu_unmap,
     8     .flush_iotlb_all    = arm_smmu_flush_iotlb_all,
     9     .iotlb_sync        = arm_smmu_iotlb_sync,
    10     .iova_to_phys        = arm_smmu_iova_to_phys,
    11     .add_device        = arm_smmu_add_device,
    12     .remove_device        = arm_smmu_remove_device,
    13     .device_group        = arm_smmu_device_group,
    14     .domain_get_attr    = arm_smmu_domain_get_attr,
    15     .domain_set_attr    = arm_smmu_domain_set_attr,
    16     .of_xlate        = arm_smmu_of_xlate,
    17     .get_resv_regions    = arm_smmu_get_resv_regions,
    18     .put_resv_regions    = generic_iommu_put_resv_regions,
    19     .pgsize_bitmap        = -1UL, /* Restricted during device attach */
    20 };

    arm_smmu_map()

     1 static int arm_smmu_map(struct iommu_domain *domain, unsigned long iova,
     2             phys_addr_t paddr, size_t size, int prot, gfp_t gfp)
     3 {
     4     struct io_pgtable_ops *ops = to_smmu_domain(domain)->pgtbl_ops;// to_smmu_domain 转为 arm_smmu_domain
     6     if (!ops)
     7         return -ENODEV; 
     9     return ops->map(ops, iova, paddr, size, prot);
    10 }

    pgtbl_ops在以下函数中得到:

    arm_smmu_domain_finalise()

     1 static int arm_smmu_domain_finalise(struct iommu_domain *domain,
     2                     struct arm_smmu_master *master)
     3 {
     4 ......
     5 switch (smmu_domain->stage) {
     6     case ARM_SMMU_DOMAIN_S1:  
     7         ias = (smmu->features & ARM_SMMU_FEAT_VAX) ? 52 : 48;//ARM_SMMU_FEAT_VAX=SMMU_IDR5[11:10] Virtual Address eXtend
     8         ias = min_t(unsigned long, ias, VA_BITS);
     9         oas = smmu->ias;
    10         fmt = ARM_64_LPAE_S1;
    11         finalise_stage_fn = arm_smmu_domain_finalise_s1;
    12         break;
    13     case ARM_SMMU_DOMAIN_NESTED:
    14     case ARM_SMMU_DOMAIN_S2:
    15         ias = smmu->ias;
    16         oas = smmu->oas;
    17         fmt = ARM_64_LPAE_S2;
    18         finalise_stage_fn = arm_smmu_domain_finalise_s2;
    19         break;
    20     default:
    21         return -EINVAL;
    22     }
    23 
    24     pgtbl_cfg = (struct io_pgtable_cfg) {
    25         .pgsize_bitmap    = smmu->pgsize_bitmap,
    26         .ias        = ias,
    27         .oas        = oas,
    28         .coherent_walk    = smmu->features & ARM_SMMU_FEAT_COHERENCY,
    29         .tlb        = &arm_smmu_flush_ops,
    30         .iommu_dev    = smmu->dev,
    31     };
    32 
    33    pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);
    34 .....
    35 
    36     ret = finalise_stage_fn(smmu_domain, master, &pgtbl_cfg); //arm_smmu_domain_finalise_s1或s2
    37 
    38     smmu_domain->pgtbl_ops = pgtbl_ops;
    39 }

    pgtbl_ops = alloc_io_pgtable_ops(fmt, &pgtbl_cfg, smmu_domain);

     1 struct io_pgtable_ops *alloc_io_pgtable_ops(enum io_pgtable_fmt fmt,
     2                         struct io_pgtable_cfg *cfg,
     3                         void *cookie)
     4 {
     5     struct io_pgtable *iop;
     6     const struct io_pgtable_init_fns *fns;
     7 
     8     if (fmt >= IO_PGTABLE_NUM_FMTS)
     9         return NULL;
    10 
    11     fns = io_pgtable_init_table[fmt]; //若是ARM_SMMU_DOMAIN_S1 那么 fmt_64_LPAE_S1,即用 io_pgtable_arm_64_lpae_s1_init_fns
    /*
    io_pgtable_init_table[IO_PGTABLE_NUM_FMTS] = {
    #ifdef CONFIG_IOMMU_IO_PGTABLE_LPAE
     [ARM_32_LPAE_S1] = &io_pgtable_arm_32_lpae_s1_init_fns,
     [ARM_32_LPAE_S2] = &io_pgtable_arm_32_lpae_s2_init_fns,
     [ARM_64_LPAE_S1] = &io_pgtable_arm_64_lpae_s1_init_fns,
     [ARM_64_LPAE_S2] = &io_pgtable_arm_64_lpae_s2_init_fns,
     [ARM_MALI_LPAE] = &io_pgtable_arm_mali_lpae_init_fns,
    #endif
    #ifdef CONFIG_IOMMU_IO_PGTABLE_ARMV7S
     [ARM_V7S] = &io_pgtable_arm_v7s_init_fns,
    #endif
    */

    /*
    struct io_pgtable_init_fns io_pgtable_arm_64_lpae_s1_init_fns = {
     .alloc = arm_64_lpae_alloc_pgtable_s1,
     .free = arm_lpae_free_pgtable,
    };
    */
    12 if (!fns) 13 return NULL; 14 15 iop = fns->alloc(cfg, cookie); //alloc函数为 arm_64_lpae_alloc_pgtable_s1 16 if (!iop) 17 return NULL; 18 19 iop->fmt = fmt; 20 iop->cookie = cookie; 21 iop->cfg = *cfg; 22 23 return &iop->ops; 24 }

    继续看 arm_64_lpae_alloc_pgtable_s1 来找 iop->ops

    ------------------data = arm_lpae_alloc_pgtable(cfg);

     1 static struct arm_lpae_io_pgtable *
     2 arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
     3 {
     4     struct arm_lpae_io_pgtable *data;
     5     int levels, va_bits, pg_shift;
     6 
     7     arm_lpae_restrict_pgsizes(cfg);
     8 
     9     if (!(cfg->pgsize_bitmap & (SZ_4K | SZ_16K | SZ_64K)))
    10         return NULL;
    11 
    12     if (cfg->ias > ARM_LPAE_MAX_ADDR_BITS)  //zym  52bit 是这个吗?
    13         return NULL;
    14 
    15     if (cfg->oas > ARM_LPAE_MAX_ADDR_BITS)
    16         return NULL;
    17 
    18     if (!selftest_running && cfg->iommu_dev->dma_pfn_offset) {
    19         dev_err(cfg->iommu_dev, "Cannot accommodate DMA offset for IOMMU page tables
    ");
    20         return NULL;
    21     }
    22 
    23     data = kmalloc(sizeof(*data), GFP_KERNEL);
    24     if (!data)
    25         return NULL;
    26 
    27     pg_shift = __ffs(cfg->pgsize_bitmap);
    28     data->bits_per_level = pg_shift - ilog2(sizeof(arm_lpae_iopte));
    29 
    30     va_bits = cfg->ias - pg_shift;
    31     levels = DIV_ROUND_UP(va_bits, data->bits_per_level);
    32     data->start_level = ARM_LPAE_MAX_LEVELS - levels;
    33 
    34     /* Calculate the actual size of our pgd (without concatenation) */
    35     data->pgd_bits = va_bits - (data->bits_per_level * (levels - 1));
    36 
    37     data->iop.ops = (struct io_pgtable_ops) { //这个即为上面用的 pgtbl_ops,arm_smmu_map()中调用的ops->map 就在这里
    38         .map        = arm_lpae_map,
    39         .unmap        = arm_lpae_unmap,
    40         .iova_to_phys    = arm_lpae_iova_to_phys,
    41     };
    42 
    43     return data;
    44 }

    我们来看下这个map函数

     1 static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
     2             phys_addr_t paddr, size_t size, int iommu_prot)
     3 {
     4     struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
     5     struct io_pgtable_cfg *cfg = &data->iop.cfg;
     6     arm_lpae_iopte *ptep = data->pgd;
     7     int ret, lvl = data->start_level;
     8     arm_lpae_iopte prot;
     9     long iaext = (long)iova >> cfg->ias;
    10 
    11     /* If no access, then nothing to do */
    12     if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
    13         return 0;
    14 
    15     if (WARN_ON(!size || (size & cfg->pgsize_bitmap) != size))
    16         return -EINVAL;
    17 
    18     if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)
    19         iaext = ~iaext;
    20     if (WARN_ON(iaext || paddr >> cfg->oas))
    21         return -ERANGE;
    22 
    23     prot = arm_lpae_prot_to_pte(data, iommu_prot);
    24     ret = __arm_lpae_map(data, iova, paddr, size, prot, lvl, ptep);
    25     /*
    26      * Synchronise all PTE updates for the new mapping before there's
    27      * a chance for anything to kick off a table walk for the new iova.
    28      */
    29     wmb();
    30 
    31     return ret;
    32 }

    __arm_lpae_map函数,没看懂,应该是一级一级地去走。。。。https://blog.csdn.net/tiantao2012/article/details/59105775 里面讲了点

     1 static int __arm_lpae_map(struct arm_lpae_io_pgtable *data, unsigned long iova,
     2               phys_addr_t paddr, size_t size, arm_lpae_iopte prot,
     3               int lvl, arm_lpae_iopte *ptep)
     4 {
     5     arm_lpae_iopte *cptep, pte;
     6     size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
     7     size_t tblsz = ARM_LPAE_GRANULE(data);
     8     struct io_pgtable_cfg *cfg = &data->iop.cfg;
     9 
    10     /* Find our entry at the current level */
    11     ptep += ARM_LPAE_LVL_IDX(iova, lvl, data);
    12 
    13     /* If we can install a leaf entry at this level, then do so */
    14     if (size == block_size)
    15         return arm_lpae_init_pte(data, iova, paddr, prot, lvl, ptep);
    16 /*__arm_lpae_map 中首先得到block_size,    size_t block_size = ARM_LPAE_BLOCK_SIZE(lvl, data);
    如果要映射的size等于block_size,则调用arm_lpae_init_pte 来初始化pte
    smmu映射的时候类似mmu分为pgd/pmd/pte等类似,但是级别不能超过四个
    */ 17 /* We can't allocate tables at the final level */ 18 if (WARN_ON(lvl >= ARM_LPAE_MAX_LEVELS - 1)) 19 return -EINVAL; 20 21 /* Grab a pointer to the next level */ 22 pte = READ_ONCE(*ptep); 23 if (!pte) {//如果arm_lpae_iopte 也就是ptep里面的值为空 24 cptep = __arm_lpae_alloc_pages(tblsz, GFP_ATOMIC, cfg); 25 if (!cptep) 26 return -ENOMEM; 27 28 pte = arm_lpae_install_table(cptep, ptep, 0, cfg); 29 if (pte) 30 __arm_lpae_free_pages(cptep, tblsz, cfg); 31 } else if (!cfg->coherent_walk && !(pte & ARM_LPAE_PTE_SW_SYNC)) { 32 __arm_lpae_sync_pte(ptep, cfg); 33 } 34 35 if (pte && !iopte_leaf(pte, lvl, data->iop.fmt)) { 36 cptep = iopte_deref(pte, data); 37 } else if (pte) { 38 /* We require an unmap first */ 39 WARN_ON(!selftest_running); 40 return -EEXIST; 41 } 42 43 /* Rinse, repeat */ 44 return __arm_lpae_map(data, iova, paddr, size, prot, lvl + 1, cptep); 45 }
  • 相关阅读:
    突出展示案例
    下拉菜单案例
    推送人群的选择【技术篇】
    Couchbase 中的分布式储存
    开始了大概三四天的Rails学习之路
    分层架构与公司组织
    极光推送助推视频App,打造最活跃手机新媒体平台
    极光推送CTO黄鑫:技术人员要建立自己的知识图谱
    Android SDK教程
    Android消息推送 SDK 集成指南
  • 原文地址:https://www.cnblogs.com/yi-mu-xi/p/12336330.html
Copyright © 2011-2022 走看看