Contributors: 18
Author Tokens Token Proportion Commits Commit Proportion
Linus Torvalds (pre-git) 1980 63.10% 18 33.33%
Evgeniy Dushistov 377 12.01% 7 12.96%
Linus Torvalds 325 10.36% 5 9.26%
Al Viro 188 5.99% 7 12.96%
Mark Fortescue 89 2.84% 1 1.85%
Dave Jones 45 1.43% 1 1.85%
Matthew Wilcox 38 1.21% 2 3.70%
Arnd Bergmann 30 0.96% 1 1.85%
Alex Kiernan 26 0.83% 1 1.85%
Gustavo A. R. Silva 14 0.45% 1 1.85%
Nicholas Piggin 12 0.38% 1 1.85%
Christoph Hellwig 5 0.16% 3 5.56%
Andrew Morton 4 0.13% 1 1.85%
Roel Kluin 1 0.03% 1 1.85%
Akinobu Mita 1 0.03% 1 1.85%
Greg Kroah-Hartman 1 0.03% 1 1.85%
Steven Cole 1 0.03% 1 1.85%
Mariusz Kozlowski 1 0.03% 1 1.85%
Total 3138 54

123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597
/* SPDX-License-Identifier: GPL-2.0 */
/*
 *  linux/fs/ufs/util.h
 *
 * Copyright (C) 1998 
 * Daniel Pirkl <daniel.pirkl@email.cz>
 * Charles University, Faculty of Mathematics and Physics
 */

#include <linux/buffer_head.h>
#include <linux/fs.h>
#include "swab.h"

/*
 * functions used for retyping
 */
static inline struct ufs_buffer_head *UCPI_UBH(struct ufs_cg_private_info *cpi)
{
	return &cpi->c_ubh;
}
static inline struct ufs_buffer_head *USPI_UBH(struct ufs_sb_private_info *spi)
{
	return &spi->s_ubh;
}



/*
 * macros used for accessing structures
 */
static inline s32
ufs_get_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
		 struct ufs_super_block_third *usb3)
{
	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
	case UFS_ST_SUNOS:
		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT)
			return fs32_to_cpu(sb, usb1->fs_u0.fs_sun.fs_state);
		fallthrough;	/* to UFS_ST_SUN */
	case UFS_ST_SUN:
		return fs32_to_cpu(sb, usb3->fs_un2.fs_sun.fs_state);
	case UFS_ST_SUNx86:
		return fs32_to_cpu(sb, usb1->fs_u1.fs_sunx86.fs_state);
	case UFS_ST_44BSD:
	default:
		return fs32_to_cpu(sb, usb3->fs_un2.fs_44.fs_state);
	}
}

static inline void
ufs_set_fs_state(struct super_block *sb, struct ufs_super_block_first *usb1,
		 struct ufs_super_block_third *usb3, s32 value)
{
	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
	case UFS_ST_SUNOS:
		if (fs32_to_cpu(sb, usb3->fs_postblformat) == UFS_42POSTBLFMT) {
			usb1->fs_u0.fs_sun.fs_state = cpu_to_fs32(sb, value);
			break;
		}
		fallthrough;	/* to UFS_ST_SUN */
	case UFS_ST_SUN:
		usb3->fs_un2.fs_sun.fs_state = cpu_to_fs32(sb, value);
		break;
	case UFS_ST_SUNx86:
		usb1->fs_u1.fs_sunx86.fs_state = cpu_to_fs32(sb, value);
		break;
	case UFS_ST_44BSD:
		usb3->fs_un2.fs_44.fs_state = cpu_to_fs32(sb, value);
		break;
	}
}

static inline u32
ufs_get_fs_npsect(struct super_block *sb, struct ufs_super_block_first *usb1,
		  struct ufs_super_block_third *usb3)
{
	if ((UFS_SB(sb)->s_flags & UFS_ST_MASK) == UFS_ST_SUNx86)
		return fs32_to_cpu(sb, usb3->fs_un2.fs_sunx86.fs_npsect);
	else
		return fs32_to_cpu(sb, usb1->fs_u1.fs_sun.fs_npsect);
}

static inline u64
ufs_get_fs_qbmask(struct super_block *sb, struct ufs_super_block_third *usb3)
{
	__fs64 tmp;

	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
	case UFS_ST_SUNOS:
	case UFS_ST_SUN:
		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qbmask[0];
		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qbmask[1];
		break;
	case UFS_ST_SUNx86:
		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qbmask[0];
		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qbmask[1];
		break;
	case UFS_ST_44BSD:
		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qbmask[0];
		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qbmask[1];
		break;
	}

	return fs64_to_cpu(sb, tmp);
}

static inline u64
ufs_get_fs_qfmask(struct super_block *sb, struct ufs_super_block_third *usb3)
{
	__fs64 tmp;

	switch (UFS_SB(sb)->s_flags & UFS_ST_MASK) {
	case UFS_ST_SUNOS:
	case UFS_ST_SUN:
		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sun.fs_qfmask[0];
		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sun.fs_qfmask[1];
		break;
	case UFS_ST_SUNx86:
		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_sunx86.fs_qfmask[0];
		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_sunx86.fs_qfmask[1];
		break;
	case UFS_ST_44BSD:
		((__fs32 *)&tmp)[0] = usb3->fs_un2.fs_44.fs_qfmask[0];
		((__fs32 *)&tmp)[1] = usb3->fs_un2.fs_44.fs_qfmask[1];
		break;
	}

	return fs64_to_cpu(sb, tmp);
}

static inline u16
ufs_get_de_namlen(struct super_block *sb, struct ufs_dir_entry *de)
{
	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
		return fs16_to_cpu(sb, de->d_u.d_namlen);
	else
		return de->d_u.d_44.d_namlen; /* XXX this seems wrong */
}

static inline void
ufs_set_de_namlen(struct super_block *sb, struct ufs_dir_entry *de, u16 value)
{
	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) == UFS_DE_OLD)
		de->d_u.d_namlen = cpu_to_fs16(sb, value);
	else
		de->d_u.d_44.d_namlen = value; /* XXX this seems wrong */
}

static inline void
ufs_set_de_type(struct super_block *sb, struct ufs_dir_entry *de, int mode)
{
	if ((UFS_SB(sb)->s_flags & UFS_DE_MASK) != UFS_DE_44BSD)
		return;

	/*
	 * TODO turn this into a table lookup
	 */
	switch (mode & S_IFMT) {
	case S_IFSOCK:
		de->d_u.d_44.d_type = DT_SOCK;
		break;
	case S_IFLNK:
		de->d_u.d_44.d_type = DT_LNK;
		break;
	case S_IFREG:
		de->d_u.d_44.d_type = DT_REG;
		break;
	case S_IFBLK:
		de->d_u.d_44.d_type = DT_BLK;
		break;
	case S_IFDIR:
		de->d_u.d_44.d_type = DT_DIR;
		break;
	case S_IFCHR:
		de->d_u.d_44.d_type = DT_CHR;
		break;
	case S_IFIFO:
		de->d_u.d_44.d_type = DT_FIFO;
		break;
	default:
		de->d_u.d_44.d_type = DT_UNKNOWN;
	}
}

static inline u32
ufs_get_inode_uid(struct super_block *sb, struct ufs_inode *inode)
{
	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
	case UFS_UID_44BSD:
		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_uid);
	case UFS_UID_EFT:
		if (inode->ui_u1.oldids.ui_suid == 0xFFFF)
			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_uid);
		fallthrough;
	default:
		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_suid);
	}
}

static inline void
ufs_set_inode_uid(struct super_block *sb, struct ufs_inode *inode, u32 value)
{
	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
	case UFS_UID_44BSD:
		inode->ui_u3.ui_44.ui_uid = cpu_to_fs32(sb, value);
		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
		break;
	case UFS_UID_EFT:
		inode->ui_u3.ui_sun.ui_uid = cpu_to_fs32(sb, value);
		if (value > 0xFFFF)
			value = 0xFFFF;
		fallthrough;
	default:
		inode->ui_u1.oldids.ui_suid = cpu_to_fs16(sb, value);
		break;
	}
}

static inline u32
ufs_get_inode_gid(struct super_block *sb, struct ufs_inode *inode)
{
	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
	case UFS_UID_44BSD:
		return fs32_to_cpu(sb, inode->ui_u3.ui_44.ui_gid);
	case UFS_UID_EFT:
		if (inode->ui_u1.oldids.ui_sgid == 0xFFFF)
			return fs32_to_cpu(sb, inode->ui_u3.ui_sun.ui_gid);
		fallthrough;
	default:
		return fs16_to_cpu(sb, inode->ui_u1.oldids.ui_sgid);
	}
}

static inline void
ufs_set_inode_gid(struct super_block *sb, struct ufs_inode *inode, u32 value)
{
	switch (UFS_SB(sb)->s_flags & UFS_UID_MASK) {
	case UFS_UID_44BSD:
		inode->ui_u3.ui_44.ui_gid = cpu_to_fs32(sb, value);
		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
		break;
	case UFS_UID_EFT:
		inode->ui_u3.ui_sun.ui_gid = cpu_to_fs32(sb, value);
		if (value > 0xFFFF)
			value = 0xFFFF;
		fallthrough;
	default:
		inode->ui_u1.oldids.ui_sgid =  cpu_to_fs16(sb, value);
		break;
	}
}

dev_t ufs_get_inode_dev(struct super_block *, struct ufs_inode_info *);
void ufs_set_inode_dev(struct super_block *, struct ufs_inode_info *, dev_t);
int ufs_prepare_chunk(struct folio *folio, loff_t pos, unsigned len);

/*
 * These functions manipulate ufs buffers
 */
#define ubh_bread(sb,fragment,size) _ubh_bread_(uspi,sb,fragment,size)  
extern struct ufs_buffer_head * _ubh_bread_(struct ufs_sb_private_info *, struct super_block *, u64 , u64);
extern struct ufs_buffer_head * ubh_bread_uspi(struct ufs_sb_private_info *, struct super_block *, u64, u64);
extern void ubh_brelse (struct ufs_buffer_head *);
extern void ubh_brelse_uspi (struct ufs_sb_private_info *);
extern void ubh_mark_buffer_dirty (struct ufs_buffer_head *);
extern void ubh_sync_block(struct ufs_buffer_head *);
extern void ubh_bforget (struct ufs_buffer_head *);
extern int  ubh_buffer_dirty (struct ufs_buffer_head *);

/* This functions works with cache pages*/
struct folio *ufs_get_locked_folio(struct address_space *mapping, pgoff_t index);
static inline void ufs_put_locked_folio(struct folio *folio)
{
       folio_unlock(folio);
       folio_put(folio);
}

/*
 * macros and inline function to get important structures from ufs_sb_private_info
 */

static inline void *get_usb_offset(struct ufs_sb_private_info *uspi,
				   unsigned int offset)
{
	unsigned int index;
	
	index = offset >> uspi->s_fshift;
	offset &= ~uspi->s_fmask;
	return uspi->s_ubh.bh[index]->b_data + offset;
}

#define ubh_get_usb_first(uspi) \
	((struct ufs_super_block_first *)get_usb_offset((uspi), 0))

#define ubh_get_usb_second(uspi) \
	((struct ufs_super_block_second *)get_usb_offset((uspi), UFS_SECTOR_SIZE))

#define ubh_get_usb_third(uspi)	\
	((struct ufs_super_block_third *)get_usb_offset((uspi), 2*UFS_SECTOR_SIZE))


#define ubh_get_ucg(ubh) \
	((struct ufs_cylinder_group *)((ubh)->bh[0]->b_data))


/*
 * Extract byte from ufs_buffer_head
 * Extract the bits for a block from a map inside ufs_buffer_head
 */
#define ubh_get_addr8(ubh,begin) \
	((u8*)(ubh)->bh[(begin) >> uspi->s_fshift]->b_data + \
	((begin) & ~uspi->s_fmask))

#define ubh_get_addr16(ubh,begin) \
	(((__fs16*)((ubh)->bh[(begin) >> (uspi->s_fshift-1)]->b_data)) + \
	((begin) & ((uspi->fsize>>1) - 1)))

#define ubh_get_addr32(ubh,begin) \
	(((__fs32*)((ubh)->bh[(begin) >> (uspi->s_fshift-2)]->b_data)) + \
	((begin) & ((uspi->s_fsize>>2) - 1)))

#define ubh_get_addr64(ubh,begin) \
	(((__fs64*)((ubh)->bh[(begin) >> (uspi->s_fshift-3)]->b_data)) + \
	((begin) & ((uspi->s_fsize>>3) - 1)))

#define ubh_get_addr ubh_get_addr8

static inline void *ubh_get_data_ptr(struct ufs_sb_private_info *uspi,
				     struct ufs_buffer_head *ubh,
				     u64 blk)
{
	if (uspi->fs_magic == UFS2_MAGIC)
		return ubh_get_addr64(ubh, blk);
	else
		return ubh_get_addr32(ubh, blk);
}

#define ubh_blkmap(ubh,begin,bit) \
	((*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) >> ((bit) & 7)) & (0xff >> (UFS_MAXFRAG - uspi->s_fpb)))

static inline u64
ufs_freefrags(struct ufs_sb_private_info *uspi)
{
	return ufs_blkstofrags(uspi->cs_total.cs_nbfree) +
		uspi->cs_total.cs_nffree;
}

/*
 * Macros to access cylinder group array structures
 */
#define ubh_cg_blktot(ucpi,cylno) \
	(*((__fs32*)ubh_get_addr(UCPI_UBH(ucpi), (ucpi)->c_btotoff + ((cylno) << 2))))

#define ubh_cg_blks(ucpi,cylno,rpos) \
	(*((__fs16*)ubh_get_addr(UCPI_UBH(ucpi), \
	(ucpi)->c_boff + (((cylno) * uspi->s_nrpos + (rpos)) << 1 ))))

/*
 * Bitmap operations
 * These functions work like classical bitmap operations.
 * The difference is that we don't have the whole bitmap
 * in one contiguous chunk of memory, but in several buffers.
 * The parameters of each function are super_block, ufs_buffer_head and
 * position of the beginning of the bitmap.
 */
#define ubh_setbit(ubh,begin,bit) \
	(*ubh_get_addr(ubh, (begin) + ((bit) >> 3)) |= (1 << ((bit) & 7)))

#define ubh_clrbit(ubh,begin,bit) \
	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) &= ~(1 << ((bit) & 7)))

#define ubh_isset(ubh,begin,bit) \
	(*ubh_get_addr (ubh, (begin) + ((bit) >> 3)) & (1 << ((bit) & 7)))

#define ubh_isclr(ubh,begin,bit) (!ubh_isset(ubh,begin,bit))

#define ubh_find_first_zero_bit(ubh,begin,size) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,0)

#define ubh_find_next_zero_bit(ubh,begin,size,offset) _ubh_find_next_zero_bit_(uspi,ubh,begin,size,offset)
static inline unsigned _ubh_find_next_zero_bit_(
	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
	unsigned begin, unsigned size, unsigned offset)
{
	unsigned base, count, pos;

	size -= offset;
	begin <<= 3;
	offset += begin;
	base = offset >> uspi->s_bpfshift;
	offset &= uspi->s_bpfmask;
	for (;;) {
		count = min_t(unsigned int, size + offset, uspi->s_bpf);
		size -= count - offset;
		pos = find_next_zero_bit_le(ubh->bh[base]->b_data, count, offset);
		if (pos < count || !size)
			break;
		base++;
		offset = 0;
	}
	return (base << uspi->s_bpfshift) + pos - begin;
} 	

static inline unsigned find_last_zero_bit (unsigned char * bitmap,
	unsigned size, unsigned offset)
{
	unsigned bit, i;
	unsigned char * mapp;
	unsigned char map;

	mapp = bitmap + (size >> 3);
	map = *mapp--;
	bit = 1 << (size & 7);
	for (i = size; i > offset; i--) {
		if ((map & bit) == 0)
			break;
		if ((i & 7) != 0) {
			bit >>= 1;
		} else {
			map = *mapp--;
			bit = 1 << 7;
		}
	}
	return i;
}

#define ubh_find_last_zero_bit(ubh,begin,size,offset) _ubh_find_last_zero_bit_(uspi,ubh,begin,size,offset)
static inline unsigned _ubh_find_last_zero_bit_(
	struct ufs_sb_private_info * uspi, struct ufs_buffer_head * ubh,
	unsigned begin, unsigned start, unsigned end)
{
	unsigned base, count, pos, size;

	size = start - end;
	begin <<= 3;
	start += begin;
	base = start >> uspi->s_bpfshift;
	start &= uspi->s_bpfmask;
	for (;;) {
		count = min_t(unsigned int,
			    size + (uspi->s_bpf - start), uspi->s_bpf)
			- (uspi->s_bpf - start);
		size -= count;
		pos = find_last_zero_bit (ubh->bh[base]->b_data,
			start, start - count);
		if (pos > start - count || !size)
			break;
		base--;
		start = uspi->s_bpf;
	}
	return (base << uspi->s_bpfshift) + pos - begin;
} 	

static inline int ubh_isblockset(struct ufs_sb_private_info *uspi,
	struct ufs_cg_private_info *ucpi, unsigned int frag)
{
	struct ufs_buffer_head *ubh = UCPI_UBH(ucpi);
	u8 *p = ubh_get_addr(ubh, ucpi->c_freeoff + (frag >> 3));
	u8 mask;

	switch (uspi->s_fpb) {
	case 8:
		return *p == 0xff;
	case 4:
		mask = 0x0f << (frag & 4);
		return (*p & mask) == mask;
	case 2:
		mask = 0x03 << (frag & 6);
		return (*p & mask) == mask;
	case 1:
		mask = 0x01 << (frag & 7);
		return (*p & mask) == mask;
	}
	return 0;	
}

static inline void ubh_clrblock(struct ufs_sb_private_info *uspi,
	struct ufs_cg_private_info *ucpi, unsigned int frag)
{
	struct ufs_buffer_head *ubh = UCPI_UBH(ucpi);
	u8 *p = ubh_get_addr(ubh, ucpi->c_freeoff + (frag >> 3));

	switch (uspi->s_fpb) {
	case 8:
		*p = 0x00;
	    	return; 
	case 4:
		*p &= ~(0x0f << (frag & 4));
		return;
	case 2:
		*p &= ~(0x03 << (frag & 6));
		return;
	case 1:
		*p &= ~(0x01 << (frag & 7));
		return;
	}
}

static inline void ubh_setblock(struct ufs_sb_private_info * uspi,
	struct ufs_cg_private_info *ucpi, unsigned int frag)
{
	struct ufs_buffer_head *ubh = UCPI_UBH(ucpi);
	u8 *p = ubh_get_addr(ubh, ucpi->c_freeoff + (frag >> 3));

	switch (uspi->s_fpb) {
	case 8:
		*p = 0xff;
	    	return;
	case 4:
		*p |= 0x0f << (frag & 4);
		return;
	case 2:
		*p |= 0x03 << (frag & 6);
		return;
	case 1:
		*p |= 0x01 << (frag & 7);
		return;
	}
}

static inline void ufs_fragacct (struct super_block * sb, unsigned blockmap,
	__fs32 * fraglist, int cnt)
{
	struct ufs_sb_private_info * uspi;
	unsigned fragsize, pos;
	
	uspi = UFS_SB(sb)->s_uspi;
	
	fragsize = 0;
	for (pos = 0; pos < uspi->s_fpb; pos++) {
		if (blockmap & (1 << pos)) {
			fragsize++;
		}
		else if (fragsize > 0) {
			fs32_add(sb, &fraglist[fragsize], cnt);
			fragsize = 0;
		}
	}
	if (fragsize > 0 && fragsize < uspi->s_fpb)
		fs32_add(sb, &fraglist[fragsize], cnt);
}

static inline void *ufs_get_direct_data_ptr(struct ufs_sb_private_info *uspi,
					    struct ufs_inode_info *ufsi,
					    unsigned blk)
{
	BUG_ON(blk > UFS_TIND_BLOCK);
	return uspi->fs_magic == UFS2_MAGIC ?
		(void *)&ufsi->i_u1.u2_i_data[blk] :
		(void *)&ufsi->i_u1.i_data[blk];
}

static inline u64 ufs_data_ptr_to_cpu(struct super_block *sb, void *p)
{
	return UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC ?
		fs64_to_cpu(sb, *(__fs64 *)p) :
		fs32_to_cpu(sb, *(__fs32 *)p);
}

static inline void ufs_cpu_to_data_ptr(struct super_block *sb, void *p, u64 val)
{
	if (UFS_SB(sb)->s_uspi->fs_magic == UFS2_MAGIC)
		*(__fs64 *)p = cpu_to_fs64(sb, val);
	else
		*(__fs32 *)p = cpu_to_fs32(sb, val);
}

static inline void ufs_data_ptr_clear(struct ufs_sb_private_info *uspi,
				      void *p)
{
	if (uspi->fs_magic == UFS2_MAGIC)
		*(__fs64 *)p = 0;
	else
		*(__fs32 *)p = 0;
}

static inline int ufs_is_data_ptr_zero(struct ufs_sb_private_info *uspi,
				       void *p)
{
	if (uspi->fs_magic == UFS2_MAGIC)
		return *(__fs64 *)p == 0;
	else
		return *(__fs32 *)p == 0;
}

static inline __fs32 ufs_get_seconds(struct super_block *sbp)
{
	time64_t now = ktime_get_real_seconds();

	/* Signed 32-bit interpretation wraps around in 2038, which
	 * happens in ufs1 inode stamps but not ufs2 using 64-bits
	 * stamps. For superblock and blockgroup, let's assume
	 * unsigned 32-bit stamps, which are good until y2106.
	 * Wrap around rather than clamp here to make the dirty
	 * file system detection work in the superblock stamp.
	 */
	return cpu_to_fs32(sbp, lower_32_bits(now));
}