Lines Matching +full:pre +full:- +full:verified
1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * Copyright (C) International Business Machines Corp., 2000-2004
27 * Overall design --
34 * where <name> is constructed from a null-terminated ascii string
36 * (1 ... 65535 bytes). The in-memory format is
39 * +-------+--------+--------+----------------+-------------------+
42 * +-------+--------+--------+----------------+-------------------+
47 * +------------+-------------------+--------------------+-----
50 * +------------+-------------------+--------------------+-----
52 * On-disk:
55 * written directly. An EA list may be in-lined in the inode if there is
77 * Mapping of on-disk attribute names: for on-disk attribute names with an
97 if (is_known_namespace(ea->name)) in name_size()
98 return ea->namelen; in name_size()
100 return ea->namelen + XATTR_OS2_PREFIX_LEN; in name_size()
105 int len = ea->namelen; in copy_name()
107 if (!is_known_namespace(ea->name)) { in copy_name()
112 memcpy(buffer, ea->name, ea->namelen); in copy_name()
113 buffer[ea->namelen] = 0; in copy_name()
126 * PRE CONDITIONS:
127 * Already verified that the specified EA is small enough to fit inline
130 * ip - Inode pointer
131 * ealist - EA list pointer
132 * size - size of ealist in bytes
133 * ea - dxd_t structure to be filled in with necessary EA information
141 * RETURNS: 0 for successful copy to inline area; -1 if area not available
149 * Make sure we have an EA -- the NULL EA list is valid, but you in ea_write_inline()
153 assert(size <= sizeof (ji->i_inline_ea)); in ea_write_inline()
159 if (!(ji->mode2 & INLINEEA) && !(ji->ea.flag & DXD_INLINE)) in ea_write_inline()
160 return -EPERM; in ea_write_inline()
165 memcpy(ji->i_inline_ea, ealist, size); in ea_write_inline()
166 ea->flag = DXD_INLINE; in ea_write_inline()
167 ji->mode2 &= ~INLINEEA; in ea_write_inline()
169 ea->flag = 0; in ea_write_inline()
175 if (ji->ea.flag & DXD_INLINE) in ea_write_inline()
176 ji->mode2 |= INLINEEA; in ea_write_inline()
187 * PRE CONDITIONS: EA has been verified
190 * ip - Inode pointer
191 * ealist - EA list pointer
192 * size - size of ealist in bytes
193 * ea - dxd_t structure to be filled in appropriately with where the
204 struct super_block *sb = ip->i_sb; in ea_write()
216 * Quick check to see if this is an in-linable EA. Short EAs in ea_write()
217 * and empty EAs are all in-linable, provided the space exists. in ea_write()
219 if (!ealist || size <= sizeof (ji->i_inline_ea)) { in ea_write()
225 nblocks = (size + (sb->s_blocksize - 1)) >> sb->s_blocksize_bits; in ea_write()
246 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_write()
253 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) in ea_write()
254 << sb->s_blocksize_bits; in ea_write()
257 rc = -EIO; in ea_write()
261 memcpy(mp->data, cp, nb); in ea_write()
265 * forced writes like this one. --hch in ea_write()
272 * the write failed -- this means that the buffer in ea_write()
284 nbytes -= nb; in ea_write()
287 ea->flag = DXD_EXTENT; in ea_write()
288 DXDsize(ea, le32_to_cpu(ealist->size)); in ea_write()
293 if (ji->ea.flag & DXD_INLINE) in ea_write()
294 ji->mode2 |= INLINEEA; in ea_write()
312 * ip - Inode pointer
313 * ealist - Pointer to buffer to fill in with EA
320 int ea_size = sizeDXD(&ji->ea); in ea_read_inline()
323 ealist->size = 0; in ea_read_inline()
328 if ((sizeDXD(&ji->ea) > sizeof (ji->i_inline_ea))) in ea_read_inline()
329 return -EIO; in ea_read_inline()
330 if (le32_to_cpu(((struct jfs_ea_list *) &ji->i_inline_ea)->size) in ea_read_inline()
332 return -EIO; in ea_read_inline()
334 memcpy(ealist, ji->i_inline_ea, ea_size); in ea_read_inline()
344 * ip - Inode pointer
345 * ealist - Pointer to buffer to fill in with EA
353 struct super_block *sb = ip->i_sb; in ea_read()
364 /* quick check for in-line EA */ in ea_read()
365 if (ji->ea.flag & DXD_INLINE) in ea_read()
368 nbytes = sizeDXD(&ji->ea); in ea_read()
371 return -EIO; in ea_read()
378 nblocks = lengthDXD(&ji->ea) << sbi->l2nbperpage; in ea_read()
379 blkno = addressDXD(&ji->ea) << sbi->l2nbperpage; in ea_read()
386 for (i = 0; i < nblocks; i += sbi->nbperpage) { in ea_read()
393 ((((nb + sb->s_blocksize - 1)) >> sb->s_blocksize_bits)) in ea_read()
394 << sb->s_blocksize_bits; in ea_read()
397 return -EIO; in ea_read()
399 memcpy(cp, mp->data, nb); in ea_read()
403 nbytes -= nb; in ea_read()
420 * inode - Inode pointer
421 * ea_buf - Structure to be populated with ealist and its metadata
422 * min_size- minimum size of buffer to be returned
429 struct super_block *sb = inode->i_sb; in ea_get()
431 int ea_size = sizeDXD(&ji->ea); in ea_get()
437 memset(&ea_buf->new_ea, 0, sizeof(ea_buf->new_ea)); in ea_get()
440 if (ji->ea.flag == 0) in ea_get()
445 ea_buf->flag = 0; in ea_get()
446 ea_buf->max_size = 0; in ea_get()
447 ea_buf->xattr = NULL; in ea_get()
450 if ((min_size <= sizeof (ji->i_inline_ea)) && in ea_get()
451 (ji->mode2 & INLINEEA)) { in ea_get()
452 ea_buf->flag = EA_INLINE | EA_NEW; in ea_get()
453 ea_buf->max_size = sizeof (ji->i_inline_ea); in ea_get()
454 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; in ea_get()
455 DXDlength(&ea_buf->new_ea, 0); in ea_get()
456 DXDaddress(&ea_buf->new_ea, 0); in ea_get()
457 ea_buf->new_ea.flag = DXD_INLINE; in ea_get()
458 DXDsize(&ea_buf->new_ea, min_size); in ea_get()
462 } else if (ji->ea.flag & DXD_INLINE) { in ea_get()
463 if (min_size <= sizeof (ji->i_inline_ea)) { in ea_get()
464 ea_buf->flag = EA_INLINE; in ea_get()
465 ea_buf->max_size = sizeof (ji->i_inline_ea); in ea_get()
466 ea_buf->xattr = (struct jfs_ea_list *) ji->i_inline_ea; in ea_get()
471 if (!(ji->ea.flag & DXD_EXTENT)) { in ea_get()
473 return -EIO; in ea_get()
475 current_blocks = (ea_size + sb->s_blocksize - 1) >> in ea_get()
476 sb->s_blocksize_bits; in ea_get()
486 ea_buf->max_size = (size + sb->s_blocksize - 1) & in ea_get()
487 ~(sb->s_blocksize - 1); in ea_get()
489 ea_buf->xattr = kmalloc(ea_buf->max_size, GFP_KERNEL); in ea_get()
490 if (ea_buf->xattr == NULL) in ea_get()
491 return -ENOMEM; in ea_get()
493 ea_buf->flag = EA_MALLOC; in ea_get()
498 if ((rc = ea_read(inode, ea_buf->xattr))) { in ea_get()
499 kfree(ea_buf->xattr); in ea_get()
500 ea_buf->xattr = NULL; in ea_get()
505 blocks_needed = (min_size + sb->s_blocksize - 1) >> in ea_get()
506 sb->s_blocksize_bits; in ea_get()
512 return -EDQUOT; in ea_get()
521 DXDlength(&ea_buf->new_ea, blocks_needed); in ea_get()
522 DXDaddress(&ea_buf->new_ea, blkno); in ea_get()
523 ea_buf->new_ea.flag = DXD_EXTENT; in ea_get()
524 DXDsize(&ea_buf->new_ea, min_size); in ea_get()
526 ea_buf->flag = EA_EXTENT | EA_NEW; in ea_get()
528 ea_buf->mp = get_metapage(inode, blkno, in ea_get()
529 blocks_needed << sb->s_blocksize_bits, in ea_get()
531 if (ea_buf->mp == NULL) { in ea_get()
533 rc = -EIO; in ea_get()
536 ea_buf->xattr = ea_buf->mp->data; in ea_get()
537 ea_buf->max_size = (min_size + sb->s_blocksize - 1) & in ea_get()
538 ~(sb->s_blocksize - 1); in ea_get()
541 if ((rc = ea_read(inode, ea_buf->xattr))) { in ea_get()
542 discard_metapage(ea_buf->mp); in ea_get()
548 ea_buf->flag = EA_EXTENT; in ea_get()
549 ea_buf->mp = read_metapage(inode, addressDXD(&ji->ea), in ea_get()
550 lengthDXD(&ji->ea) << sb->s_blocksize_bits, in ea_get()
552 if (ea_buf->mp == NULL) { in ea_get()
553 rc = -EIO; in ea_get()
556 ea_buf->xattr = ea_buf->mp->data; in ea_get()
557 ea_buf->max_size = (ea_size + sb->s_blocksize - 1) & in ea_get()
558 ~(sb->s_blocksize - 1); in ea_get()
561 if (EALIST_SIZE(ea_buf->xattr) != ea_size) { in ea_get()
562 int size = min_t(int, EALIST_SIZE(ea_buf->xattr), ea_size); in ea_get()
566 ea_buf->xattr, size, 1); in ea_get()
568 rc = -EIO; in ea_get()
584 if (ea_buf->flag & EA_MALLOC) in ea_release()
585 kfree(ea_buf->xattr); in ea_release()
586 else if (ea_buf->flag & EA_EXTENT) { in ea_release()
587 assert(ea_buf->mp); in ea_release()
588 release_metapage(ea_buf->mp); in ea_release()
590 if (ea_buf->flag & EA_NEW) in ea_release()
591 dbFree(inode, addressDXD(&ea_buf->new_ea), in ea_release()
592 lengthDXD(&ea_buf->new_ea)); in ea_release()
606 } else if (ea_buf->flag & EA_INLINE) { in ea_put()
607 assert(new_size <= sizeof (ji->i_inline_ea)); in ea_put()
608 ji->mode2 &= ~INLINEEA; in ea_put()
609 ea_buf->new_ea.flag = DXD_INLINE; in ea_put()
610 DXDsize(&ea_buf->new_ea, new_size); in ea_put()
611 DXDaddress(&ea_buf->new_ea, 0); in ea_put()
612 DXDlength(&ea_buf->new_ea, 0); in ea_put()
613 } else if (ea_buf->flag & EA_MALLOC) { in ea_put()
614 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); in ea_put()
615 kfree(ea_buf->xattr); in ea_put()
616 } else if (ea_buf->flag & EA_NEW) { in ea_put()
618 flush_metapage(ea_buf->mp); in ea_put()
620 /* ->xattr must point to original ea's metapage */ in ea_put()
621 rc = ea_write(inode, ea_buf->xattr, new_size, &ea_buf->new_ea); in ea_put()
622 discard_metapage(ea_buf->mp); in ea_put()
629 if (ji->ea.flag & DXD_EXTENT) { in ea_put()
630 invalidate_dxd_metapages(inode, ji->ea); in ea_put()
631 old_blocks = lengthDXD(&ji->ea); in ea_put()
635 txEA(tid, inode, &ji->ea, &ea_buf->new_ea); in ea_put()
636 if (ea_buf->new_ea.flag & DXD_EXTENT) { in ea_put()
637 new_blocks = lengthDXD(&ea_buf->new_ea); in ea_put()
638 if (ji->ea.flag & DXD_INLINE) in ea_put()
639 ji->mode2 |= INLINEEA; in ea_put()
641 ji->ea = ea_buf->new_ea; in ea_put()
643 txEA(tid, inode, &ji->ea, NULL); in ea_put()
644 if (ji->ea.flag & DXD_INLINE) in ea_put()
645 ji->mode2 |= INLINEEA; in ea_put()
646 ji->ea.flag = 0; in ea_put()
647 ji->ea.size = 0; in ea_put()
673 down_write(&JFS_IP(inode)->xattr_sem); in __jfs_setxattr()
688 if ((namelen == ea->namelen) && in __jfs_setxattr()
689 (memcmp(name, ea->name, namelen) == 0)) { in __jfs_setxattr()
692 rc = -EEXIST; in __jfs_setxattr()
705 rc = -ENODATA; in __jfs_setxattr()
733 length = (char *) END_EALIST(ealist) - (char *) next_ea; in __jfs_setxattr()
736 xattr_size -= old_ea_size; in __jfs_setxattr()
746 * The size of EA value is limitted by on-disk format up to in __jfs_setxattr()
749 * we can pre-checkup the value size against USHRT_MAX, and in __jfs_setxattr()
750 * return -E2BIG in this case, which is consistent with the in __jfs_setxattr()
754 rc = -E2BIG; in __jfs_setxattr()
759 ea->flag = 0; in __jfs_setxattr()
760 ea->namelen = namelen; in __jfs_setxattr()
761 ea->valuelen = (cpu_to_le16(value_len)); in __jfs_setxattr()
762 memcpy(ea->name, name, namelen); in __jfs_setxattr()
763 ea->name[namelen] = 0; in __jfs_setxattr()
765 memcpy(&ea->name[namelen + 1], value, value_len); in __jfs_setxattr()
769 /* DEBUG - If we did this right, these number match */ in __jfs_setxattr()
775 rc = -EINVAL; in __jfs_setxattr()
785 ealist->size = cpu_to_le32(new_size); in __jfs_setxattr()
793 up_write(&JFS_IP(inode)->xattr_sem); in __jfs_setxattr()
809 down_read(&JFS_IP(inode)->xattr_sem); in __jfs_getxattr()
828 size = -EUCLEAN; in __jfs_getxattr()
832 if ((namelen == ea->namelen) && in __jfs_getxattr()
833 memcmp(name, ea->name, namelen) == 0) { in __jfs_getxattr()
835 size = le16_to_cpu(ea->valuelen); in __jfs_getxattr()
839 size = -ERANGE; in __jfs_getxattr()
842 value = ((char *) &ea->name) + ea->namelen + 1; in __jfs_getxattr()
848 size = -ENODATA; in __jfs_getxattr()
852 up_read(&JFS_IP(inode)->xattr_sem); in __jfs_getxattr()
862 return (strncmp(ea->name, XATTR_TRUSTED_PREFIX, in can_list()
877 down_read(&JFS_IP(inode)->xattr_sem); in jfs_listxattr()
895 size = -EUCLEAN; in jfs_listxattr()
907 size = -ERANGE; in jfs_listxattr()
923 up_read(&JFS_IP(inode)->xattr_sem); in jfs_listxattr()
934 tid = txBegin(inode->i_sb, 0); in __jfs_xattr_set()
935 mutex_lock(&ji->commit_mutex); in __jfs_xattr_set()
940 mutex_unlock(&ji->commit_mutex); in __jfs_xattr_set()
968 return -EOPNOTSUPP; in jfs_xattr_get_os2()
979 return -EOPNOTSUPP; in jfs_xattr_set_os2()
1025 for (xattr = xattr_array; xattr->name != NULL; xattr++) { in jfs_initxattrs()
1027 strlen(xattr->name) + 1, GFP_NOFS); in jfs_initxattrs()
1029 err = -ENOMEM; in jfs_initxattrs()
1033 strcpy(name + XATTR_SECURITY_PREFIX_LEN, xattr->name); in jfs_initxattrs()
1036 xattr->value, xattr->value_len, 0); in jfs_initxattrs()