Lines Matching +full:partitions +full:- +full:table +full:- +full:offset

1 // SPDX-License-Identifier: GPL-2.0-only
4 * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
25 * with a table of contents data structure (@smem_header) at the beginning of
37 * These partitions are referenced from an optional partition table
39 * partition table entries (@smem_ptable_entry) lists the involved processors
44 * two regions are cached and non-cached memory respectively. Each region
48 * Items in the non-cached region are allocated from the start of the partition
50 * is hence the region between the cached and non-cached offsets. The header of
59 * be held - currently lock number 3 of the sfpb or tcsr is used for this on all
92 * struct smem_proc_comm - proc_comm communication struct (legacy)
104 * struct smem_global_entry - entry to reference smem items on the heap
106 * @offset: offset to the allocated space
113 __le32 offset; member
120 * struct smem_header - header found in beginning of primary smem region
140 * struct smem_ptable_entry - one entry in the @smem_ptable list
141 * @offset: offset, within the main shared memory region, of the partition
150 __le32 offset; member
160 * struct smem_ptable - partition table for the private partitions
162 * @version: version of the partition table
163 * @num_entries: number of partitions in the table
165 * @entry: list of @smem_ptable_entry for the @num_entries partitions
178 * struct smem_partition_header - header of the partitions
183 * @offset_free_uncached: offset to the first free byte of uncached memory in
185 * @offset_free_cached: offset to the first free byte of cached memory in this
200 * struct smem_partition - describes smem partition
216 * struct smem_private_entry - header of each item in the private partition
235 * struct smem_info - smem region info located after the table of contents
253 * struct smem_region - representation of a chunk of memory used for smem
265 * struct qcom_smem - device data for the smem device
268 * @ptable: virtual base of partition table
270 * @partitions: list of partitions of current processor/host
285 struct smem_partition partitions[SMEM_HOST_COUNT]; member
296 return p + le32_to_cpu(phdr->offset_free_uncached); in phdr_to_last_uncached_entry()
306 return p + le32_to_cpu(phdr->size) - ALIGN(sizeof(*e), cacheline); in phdr_to_first_cached_entry()
314 return p + le32_to_cpu(phdr->offset_free_cached); in phdr_to_last_cached_entry()
330 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr) + in uncached_entry_next()
331 le32_to_cpu(e->size); in uncached_entry_next()
339 return p - le32_to_cpu(e->size) - ALIGN(sizeof(*e), cacheline); in cached_entry_next()
346 return p + sizeof(*e) + le16_to_cpu(e->padding_hdr); in uncached_entry_to_item()
353 return p - le32_to_cpu(e->size); in cached_entry_to_item()
366 * qcom_smem_bust_hwspin_lock_by_host() - bust the smem hwspinlock for a host
382 return -EINVAL; in qcom_smem_bust_hwspin_lock_by_host()
384 return hwspin_lock_bust(__smem->hwlock, SMEM_HOST_ID_TO_HWSPINLOCK_ID(host)); in qcom_smem_bust_hwspin_lock_by_host()
389 * qcom_smem_is_available() - Check if SMEM is available
410 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_alloc_private()
411 p_end = (void *)phdr + part->size; in qcom_smem_alloc_private()
418 return -EINVAL; in qcom_smem_alloc_private()
421 if (hdr->canary != SMEM_PRIVATE_CANARY) in qcom_smem_alloc_private()
423 if (le16_to_cpu(hdr->item) == item) in qcom_smem_alloc_private()
424 return -EEXIST; in qcom_smem_alloc_private()
430 return -EINVAL; in qcom_smem_alloc_private()
435 dev_err(smem->dev, "Out of memory\n"); in qcom_smem_alloc_private()
436 return -ENOSPC; in qcom_smem_alloc_private()
439 hdr->canary = SMEM_PRIVATE_CANARY; in qcom_smem_alloc_private()
440 hdr->item = cpu_to_le16(item); in qcom_smem_alloc_private()
441 hdr->size = cpu_to_le32(ALIGN(size, 8)); in qcom_smem_alloc_private()
442 hdr->padding_data = cpu_to_le16(le32_to_cpu(hdr->size) - size); in qcom_smem_alloc_private()
443 hdr->padding_hdr = 0; in qcom_smem_alloc_private()
446 * Ensure the header is written before we advance the free offset, so in qcom_smem_alloc_private()
451 le32_add_cpu(&phdr->offset_free_uncached, alloc_size); in qcom_smem_alloc_private()
455 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_alloc_private()
456 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_alloc_private()
458 return -EINVAL; in qcom_smem_alloc_private()
468 header = smem->regions[0].virt_base; in qcom_smem_alloc_global()
469 entry = &header->toc[item]; in qcom_smem_alloc_global()
470 if (entry->allocated) in qcom_smem_alloc_global()
471 return -EEXIST; in qcom_smem_alloc_global()
474 if (WARN_ON(size > le32_to_cpu(header->available))) in qcom_smem_alloc_global()
475 return -ENOMEM; in qcom_smem_alloc_global()
477 entry->offset = header->free_offset; in qcom_smem_alloc_global()
478 entry->size = cpu_to_le32(size); in qcom_smem_alloc_global()
486 entry->allocated = cpu_to_le32(1); in qcom_smem_alloc_global()
488 le32_add_cpu(&header->free_offset, size); in qcom_smem_alloc_global()
489 le32_add_cpu(&header->available, -size); in qcom_smem_alloc_global()
495 * qcom_smem_alloc() - allocate space for a smem item
496 * @host: remote processor id, or -1
510 return -EPROBE_DEFER; in qcom_smem_alloc()
513 dev_err(__smem->dev, in qcom_smem_alloc()
515 return -EINVAL; in qcom_smem_alloc()
518 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_alloc()
519 return -EINVAL; in qcom_smem_alloc()
521 ret = hwspin_lock_timeout_irqsave(__smem->hwlock, in qcom_smem_alloc()
527 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_alloc()
528 part = &__smem->partitions[host]; in qcom_smem_alloc()
530 } else if (__smem->global_partition.virt_base) { in qcom_smem_alloc()
531 part = &__smem->global_partition; in qcom_smem_alloc()
537 hwspin_unlock_irqrestore(__smem->hwlock, &flags); in qcom_smem_alloc()
555 header = smem->regions[0].virt_base; in qcom_smem_get_global()
556 entry = &header->toc[item]; in qcom_smem_get_global()
557 if (!entry->allocated) in qcom_smem_get_global()
558 return ERR_PTR(-ENXIO); in qcom_smem_get_global()
560 aux_base = le32_to_cpu(entry->aux_base) & AUX_BASE_MASK; in qcom_smem_get_global()
562 for (i = 0; i < smem->num_regions; i++) { in qcom_smem_get_global()
563 region = &smem->regions[i]; in qcom_smem_get_global()
565 if ((u32)region->aux_base == aux_base || !aux_base) { in qcom_smem_get_global()
566 e_size = le32_to_cpu(entry->size); in qcom_smem_get_global()
567 entry_offset = le32_to_cpu(entry->offset); in qcom_smem_get_global()
569 if (WARN_ON(e_size + entry_offset > region->size)) in qcom_smem_get_global()
570 return ERR_PTR(-EINVAL); in qcom_smem_get_global()
575 return region->virt_base + entry_offset; in qcom_smem_get_global()
579 return ERR_PTR(-ENOENT); in qcom_smem_get_global()
593 phdr = (struct smem_partition_header __force *)part->virt_base; in qcom_smem_get_private()
594 p_end = (void *)phdr + part->size; in qcom_smem_get_private()
600 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
603 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
605 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
606 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
608 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
609 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
611 *size = e_size - padding_data; in qcom_smem_get_private()
616 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
625 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
629 e = phdr_to_first_cached_entry(phdr, part->cacheline); in qcom_smem_get_private()
633 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
636 if (e->canary != SMEM_PRIVATE_CANARY) in qcom_smem_get_private()
639 if (le16_to_cpu(e->item) == item) { in qcom_smem_get_private()
641 e_size = le32_to_cpu(e->size); in qcom_smem_get_private()
642 padding_data = le16_to_cpu(e->padding_data); in qcom_smem_get_private()
644 if (WARN_ON(e_size > part->size || padding_data > e_size)) in qcom_smem_get_private()
645 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
647 *size = e_size - padding_data; in qcom_smem_get_private()
652 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
657 e = cached_entry_next(e, part->cacheline); in qcom_smem_get_private()
661 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
663 return ERR_PTR(-ENOENT); in qcom_smem_get_private()
666 dev_err(smem->dev, "Found invalid canary in hosts %hu:%hu partition\n", in qcom_smem_get_private()
667 le16_to_cpu(phdr->host0), le16_to_cpu(phdr->host1)); in qcom_smem_get_private()
669 return ERR_PTR(-EINVAL); in qcom_smem_get_private()
673 * qcom_smem_get() - resolve ptr of size of a smem item
674 * @host: the remote processor, or -1
684 void *ptr = ERR_PTR(-EPROBE_DEFER); in qcom_smem_get()
689 if (WARN_ON(item >= __smem->item_count)) in qcom_smem_get()
690 return ERR_PTR(-EINVAL); in qcom_smem_get()
692 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get()
693 part = &__smem->partitions[host]; in qcom_smem_get()
695 } else if (__smem->global_partition.virt_base) { in qcom_smem_get()
696 part = &__smem->global_partition; in qcom_smem_get()
707 * qcom_smem_get_free_space() - retrieve amount of free space in a partition
708 * @host: the remote processor identifying a partition, or -1
721 return -EPROBE_DEFER; in qcom_smem_get_free_space()
723 if (host < SMEM_HOST_COUNT && __smem->partitions[host].virt_base) { in qcom_smem_get_free_space()
724 part = &__smem->partitions[host]; in qcom_smem_get_free_space()
725 phdr = part->virt_base; in qcom_smem_get_free_space()
726 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
727 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
729 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
730 return -EINVAL; in qcom_smem_get_free_space()
731 } else if (__smem->global_partition.virt_base) { in qcom_smem_get_free_space()
732 part = &__smem->global_partition; in qcom_smem_get_free_space()
733 phdr = part->virt_base; in qcom_smem_get_free_space()
734 ret = le32_to_cpu(phdr->offset_free_cached) - in qcom_smem_get_free_space()
735 le32_to_cpu(phdr->offset_free_uncached); in qcom_smem_get_free_space()
737 if (ret > le32_to_cpu(part->size)) in qcom_smem_get_free_space()
738 return -EINVAL; in qcom_smem_get_free_space()
740 header = __smem->regions[0].virt_base; in qcom_smem_get_free_space()
741 ret = le32_to_cpu(header->available); in qcom_smem_get_free_space()
743 if (ret > __smem->regions[0].size) in qcom_smem_get_free_space()
744 return -EINVAL; in qcom_smem_get_free_space()
757 * qcom_smem_virt_to_phys() - return the physical address associated
767 u64 offset; in qcom_smem_virt_to_phys() local
771 part = &__smem->partitions[i]; in qcom_smem_virt_to_phys()
773 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
774 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
776 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
780 part = &__smem->global_partition; in qcom_smem_virt_to_phys()
782 if (addr_in_range(part->virt_base, part->size, p)) { in qcom_smem_virt_to_phys()
783 offset = p - part->virt_base; in qcom_smem_virt_to_phys()
785 return (phys_addr_t)part->phys_base + offset; in qcom_smem_virt_to_phys()
788 for (i = 0; i < __smem->num_regions; i++) { in qcom_smem_virt_to_phys()
789 area = &__smem->regions[i]; in qcom_smem_virt_to_phys()
791 if (addr_in_range(area->virt_base, area->size, p)) { in qcom_smem_virt_to_phys()
792 offset = p - area->virt_base; in qcom_smem_virt_to_phys()
794 return (phys_addr_t)area->aux_base + offset; in qcom_smem_virt_to_phys()
803 * qcom_smem_get_soc_id() - return the SoC ID
818 *id = __le32_to_cpu(info->id); in qcom_smem_get_soc_id()
825 * qcom_smem_get_feature_code() - return the feature code
842 if (__le32_to_cpu(info->fmt) < SOCINFO_VERSION(0, 16)) in qcom_smem_get_feature_code()
843 return -EOPNOTSUPP; in qcom_smem_get_feature_code()
845 raw_code = __le32_to_cpu(info->feature_code); in qcom_smem_get_feature_code()
862 header = smem->regions[0].virt_base; in qcom_smem_get_sbl_version()
863 versions = header->version; in qcom_smem_get_sbl_version()
873 ptable = smem->ptable; in qcom_smem_get_ptable()
874 if (memcmp(ptable->magic, SMEM_PTABLE_MAGIC, sizeof(ptable->magic))) in qcom_smem_get_ptable()
875 return ERR_PTR(-ENOENT); in qcom_smem_get_ptable()
877 version = le32_to_cpu(ptable->version); in qcom_smem_get_ptable()
879 dev_err(smem->dev, in qcom_smem_get_ptable()
881 return ERR_PTR(-EINVAL); in qcom_smem_get_ptable()
895 info = (struct smem_info *)&ptable->entry[ptable->num_entries]; in qcom_smem_get_item_count()
896 if (memcmp(info->magic, SMEM_INFO_MAGIC, sizeof(info->magic))) in qcom_smem_get_item_count()
899 return le16_to_cpu(info->num_items); in qcom_smem_get_item_count()
904 * table entry is supplied. Returns a pointer to its header if
915 phys_addr = smem->regions[0].aux_base + le32_to_cpu(entry->offset); in qcom_smem_partition_header()
916 header = devm_ioremap_wc(smem->dev, phys_addr, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
921 if (memcmp(header->magic, SMEM_PART_MAGIC, sizeof(header->magic))) { in qcom_smem_partition_header()
922 dev_err(smem->dev, "bad partition magic %4ph\n", header->magic); in qcom_smem_partition_header()
926 if (host0 != le16_to_cpu(header->host0)) { in qcom_smem_partition_header()
927 dev_err(smem->dev, "bad host0 (%hu != %hu)\n", in qcom_smem_partition_header()
928 host0, le16_to_cpu(header->host0)); in qcom_smem_partition_header()
931 if (host1 != le16_to_cpu(header->host1)) { in qcom_smem_partition_header()
932 dev_err(smem->dev, "bad host1 (%hu != %hu)\n", in qcom_smem_partition_header()
933 host1, le16_to_cpu(header->host1)); in qcom_smem_partition_header()
937 size = le32_to_cpu(header->size); in qcom_smem_partition_header()
938 if (size != le32_to_cpu(entry->size)) { in qcom_smem_partition_header()
939 dev_err(smem->dev, "bad partition size (%u != %u)\n", in qcom_smem_partition_header()
940 size, le32_to_cpu(entry->size)); in qcom_smem_partition_header()
944 if (le32_to_cpu(header->offset_free_uncached) > size) { in qcom_smem_partition_header()
945 dev_err(smem->dev, "bad partition free uncached (%u > %u)\n", in qcom_smem_partition_header()
946 le32_to_cpu(header->offset_free_uncached), size); in qcom_smem_partition_header()
961 if (smem->global_partition.virt_base) { in qcom_smem_set_global_partition()
962 dev_err(smem->dev, "Already found the global partition\n"); in qcom_smem_set_global_partition()
963 return -EINVAL; in qcom_smem_set_global_partition()
970 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_set_global_partition()
971 entry = &ptable->entry[i]; in qcom_smem_set_global_partition()
972 if (!le32_to_cpu(entry->offset)) in qcom_smem_set_global_partition()
974 if (!le32_to_cpu(entry->size)) in qcom_smem_set_global_partition()
977 if (le16_to_cpu(entry->host0) != SMEM_GLOBAL_HOST) in qcom_smem_set_global_partition()
980 if (le16_to_cpu(entry->host1) == SMEM_GLOBAL_HOST) { in qcom_smem_set_global_partition()
987 dev_err(smem->dev, "Missing entry for global partition\n"); in qcom_smem_set_global_partition()
988 return -EINVAL; in qcom_smem_set_global_partition()
994 return -EINVAL; in qcom_smem_set_global_partition()
996 smem->global_partition.virt_base = (void __iomem *)header; in qcom_smem_set_global_partition()
997 smem->global_partition.phys_base = smem->regions[0].aux_base + in qcom_smem_set_global_partition()
998 le32_to_cpu(entry->offset); in qcom_smem_set_global_partition()
999 smem->global_partition.size = le32_to_cpu(entry->size); in qcom_smem_set_global_partition()
1000 smem->global_partition.cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_set_global_partition()
1019 for (i = 0; i < le32_to_cpu(ptable->num_entries); i++) { in qcom_smem_enumerate_partitions()
1020 entry = &ptable->entry[i]; in qcom_smem_enumerate_partitions()
1021 if (!le32_to_cpu(entry->offset)) in qcom_smem_enumerate_partitions()
1023 if (!le32_to_cpu(entry->size)) in qcom_smem_enumerate_partitions()
1026 host0 = le16_to_cpu(entry->host0); in qcom_smem_enumerate_partitions()
1027 host1 = le16_to_cpu(entry->host1); in qcom_smem_enumerate_partitions()
1036 dev_err(smem->dev, "bad host %u\n", remote_host); in qcom_smem_enumerate_partitions()
1037 return -EINVAL; in qcom_smem_enumerate_partitions()
1040 if (smem->partitions[remote_host].virt_base) { in qcom_smem_enumerate_partitions()
1041 dev_err(smem->dev, "duplicate host %u\n", remote_host); in qcom_smem_enumerate_partitions()
1042 return -EINVAL; in qcom_smem_enumerate_partitions()
1047 return -EINVAL; in qcom_smem_enumerate_partitions()
1049 smem->partitions[remote_host].virt_base = (void __iomem *)header; in qcom_smem_enumerate_partitions()
1050 smem->partitions[remote_host].phys_base = smem->regions[0].aux_base + in qcom_smem_enumerate_partitions()
1051 le32_to_cpu(entry->offset); in qcom_smem_enumerate_partitions()
1052 smem->partitions[remote_host].size = le32_to_cpu(entry->size); in qcom_smem_enumerate_partitions()
1053 smem->partitions[remote_host].cacheline = le32_to_cpu(entry->cacheline); in qcom_smem_enumerate_partitions()
1064 region->virt_base = devm_ioremap_wc(smem->dev, region->aux_base, SZ_4K); in qcom_smem_map_toc()
1065 ptable_start = region->aux_base + region->size - SZ_4K; in qcom_smem_map_toc()
1067 smem->ptable = devm_ioremap_wc(smem->dev, ptable_start, SZ_4K); in qcom_smem_map_toc()
1069 if (!region->virt_base || !smem->ptable) in qcom_smem_map_toc()
1070 return -ENOMEM; in qcom_smem_map_toc()
1079 phys_addr = smem->regions[0].aux_base; in qcom_smem_map_global()
1081 smem->regions[0].size = size; in qcom_smem_map_global()
1082 smem->regions[0].virt_base = devm_ioremap_wc(smem->dev, phys_addr, size); in qcom_smem_map_global()
1084 if (!smem->regions[0].virt_base) in qcom_smem_map_global()
1085 return -ENOMEM; in qcom_smem_map_global()
1093 struct device *dev = smem->dev; in qcom_smem_resolve_mem()
1098 np = of_parse_phandle(dev->of_node, name, 0); in qcom_smem_resolve_mem()
1101 return -EINVAL; in qcom_smem_resolve_mem()
1109 region->aux_base = r.start; in qcom_smem_resolve_mem()
1110 region->size = resource_size(&r); in qcom_smem_resolve_mem()
1129 if (of_property_present(pdev->dev.of_node, "qcom,rpm-msg-ram")) in qcom_smem_probe()
1132 smem = devm_kzalloc(&pdev->dev, struct_size(smem, regions, num_regions), in qcom_smem_probe()
1135 return -ENOMEM; in qcom_smem_probe()
1137 smem->dev = &pdev->dev; in qcom_smem_probe()
1138 smem->num_regions = num_regions; in qcom_smem_probe()
1140 rmem = of_reserved_mem_lookup(pdev->dev.of_node); in qcom_smem_probe()
1142 smem->regions[0].aux_base = rmem->base; in qcom_smem_probe()
1143 smem->regions[0].size = rmem->size; in qcom_smem_probe()
1146 * Fall back to the memory-region reference, if we're not a in qcom_smem_probe()
1147 * reserved-memory node. in qcom_smem_probe()
1149 ret = qcom_smem_resolve_mem(smem, "memory-region", &smem->regions[0]); in qcom_smem_probe()
1155 ret = qcom_smem_resolve_mem(smem, "qcom,rpm-msg-ram", &smem->regions[1]); in qcom_smem_probe()
1161 ret = qcom_smem_map_toc(smem, &smem->regions[0]); in qcom_smem_probe()
1166 smem->regions[i].virt_base = devm_ioremap_wc(&pdev->dev, in qcom_smem_probe()
1167 smem->regions[i].aux_base, in qcom_smem_probe()
1168 smem->regions[i].size); in qcom_smem_probe()
1169 if (!smem->regions[i].virt_base) { in qcom_smem_probe()
1170 dev_err(&pdev->dev, "failed to remap %pa\n", &smem->regions[i].aux_base); in qcom_smem_probe()
1171 return -ENOMEM; in qcom_smem_probe()
1175 header = smem->regions[0].virt_base; in qcom_smem_probe()
1176 if (le32_to_cpu(header->initialized) != 1 || in qcom_smem_probe()
1177 le32_to_cpu(header->reserved)) { in qcom_smem_probe()
1178 dev_err(&pdev->dev, "SMEM is not initialized by SBL\n"); in qcom_smem_probe()
1179 return -EINVAL; in qcom_smem_probe()
1182 hwlock_id = of_hwspin_lock_get_id(pdev->dev.of_node, 0); in qcom_smem_probe()
1184 if (hwlock_id != -EPROBE_DEFER) in qcom_smem_probe()
1185 dev_err(&pdev->dev, "failed to retrieve hwlock\n"); in qcom_smem_probe()
1189 smem->hwlock = hwspin_lock_request_specific(hwlock_id); in qcom_smem_probe()
1190 if (!smem->hwlock) in qcom_smem_probe()
1191 return -ENXIO; in qcom_smem_probe()
1193 ret = hwspin_lock_timeout_irqsave(smem->hwlock, HWSPINLOCK_TIMEOUT, &flags); in qcom_smem_probe()
1196 size = readl_relaxed(&header->available) + readl_relaxed(&header->free_offset); in qcom_smem_probe()
1197 hwspin_unlock_irqrestore(smem->hwlock, &flags); in qcom_smem_probe()
1205 devm_iounmap(smem->dev, smem->regions[0].virt_base); in qcom_smem_probe()
1211 smem->item_count = qcom_smem_get_item_count(smem); in qcom_smem_probe()
1215 smem->item_count = SMEM_ITEM_COUNT; in qcom_smem_probe()
1218 dev_err(&pdev->dev, "Unsupported SMEM version 0x%x\n", version); in qcom_smem_probe()
1219 return -EINVAL; in qcom_smem_probe()
1224 if (ret < 0 && ret != -ENOENT) in qcom_smem_probe()
1229 smem->socinfo = platform_device_register_data(&pdev->dev, "qcom-socinfo", in qcom_smem_probe()
1232 if (IS_ERR(smem->socinfo)) in qcom_smem_probe()
1233 dev_dbg(&pdev->dev, "failed to register socinfo device\n"); in qcom_smem_probe()
1240 platform_device_unregister(__smem->socinfo); in qcom_smem_remove()
1242 hwspin_lock_free(__smem->hwlock); in qcom_smem_remove()
1256 .name = "qcom-smem",