Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit 77e4635a authored by Nathan Scott's avatar Nathan Scott Committed by Tim Shimmin
Browse files

[XFS] Add a greedy allocation interface, allocating within a min/max size


range.

SGI-PV: 955302
SGI-Modid: xfs-linux-melb:xfs-kern:26803a

Signed-off-by: default avatarNathan Scott <nathans@sgi.com>
Signed-off-by: default avatarTim Shimmin <tes@sgi.com>
parent 572d95f4
Loading
Loading
Loading
Loading
+16 −0
Original line number Diff line number Diff line
@@ -68,6 +68,22 @@ kmem_zalloc(size_t size, unsigned int __nocast flags)
	return ptr;
}

void *
kmem_zalloc_greedy(size_t *size, size_t minsize, size_t maxsize,
		   unsigned int __nocast flags)
{
	void	*ptr;

	while (!(ptr = kmem_zalloc(maxsize, flags))) {
		if ((maxsize >>= 1) <= minsize) {
			maxsize = minsize;
		 	flags = KM_SLEEP;
		}
	}
	*size = maxsize;
	return ptr;
}

void
kmem_free(void *ptr, size_t size)
{
+2 −1
Original line number Diff line number Diff line
@@ -55,8 +55,9 @@ kmem_flags_convert(unsigned int __nocast flags)
}

extern void *kmem_alloc(size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void *kmem_zalloc(size_t, unsigned int __nocast);
extern void *kmem_zalloc_greedy(size_t *, size_t, size_t, unsigned int __nocast);
extern void *kmem_realloc(void *, size_t, size_t, unsigned int __nocast);
extern void  kmem_free(void *, size_t);

/*
+6 −7
Original line number Diff line number Diff line
@@ -112,17 +112,16 @@ xfs_Gqm_init(void)
{
	xfs_dqhash_t	*udqhash, *gdqhash;
	xfs_qm_t	*xqm;
	uint		i, hsize, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
	uint		i, hsize;

	/*
	 * Initialize the dquot hash tables.
	 */
	hsize = XFS_QM_HASHSIZE_HIGH;
	while (!(udqhash = kmem_zalloc(hsize * sizeof(*udqhash), flags))) {
		if ((hsize >>= 1) <= XFS_QM_HASHSIZE_LOW)
			flags = KM_SLEEP;
	}
	gdqhash = kmem_zalloc(hsize * sizeof(*gdqhash), KM_SLEEP | KM_LARGE);
	udqhash = kmem_zalloc_greedy(&hsize,
				     XFS_QM_HASHSIZE_LOW, XFS_QM_HASHSIZE_HIGH,
				     KM_SLEEP | KM_MAYFAIL | KM_LARGE);
	gdqhash = kmem_zalloc(hsize, KM_SLEEP | KM_LARGE);
	hsize /= sizeof(xfs_dqhash_t);
	ndquot = hsize << 8;

	xqm = kmem_zalloc(sizeof(xfs_qm_t), KM_SLEEP);
+8 −9
Original line number Diff line number Diff line
@@ -50,7 +50,7 @@ void
xfs_ihash_init(xfs_mount_t *mp)
{
	__uint64_t	icount;
	uint		i, flags = KM_SLEEP | KM_MAYFAIL | KM_LARGE;
	uint		i;

	if (!mp->m_ihsize) {
		icount = mp->m_maxicount ? mp->m_maxicount :
@@ -61,15 +61,14 @@ xfs_ihash_init(xfs_mount_t *mp)
					(64 * NBPP) / sizeof(xfs_ihash_t));
	}

	while (!(mp->m_ihash = (xfs_ihash_t *)kmem_zalloc(mp->m_ihsize *
						sizeof(xfs_ihash_t), flags))) {
		if ((mp->m_ihsize >>= 1) <= NBPP)
			flags = KM_SLEEP;
	}
	for (i = 0; i < mp->m_ihsize; i++) {
	mp->m_ihash = kmem_zalloc_greedy(&mp->m_ihsize,
					 NBPC * sizeof(xfs_ihash_t),
					 mp->m_ihsize * sizeof(xfs_ihash_t),
					 KM_SLEEP | KM_MAYFAIL | KM_LARGE);
	mp->m_ihsize /= sizeof(xfs_ihash_t);
	for (i = 0; i < mp->m_ihsize; i++)
		rwlock_init(&(mp->m_ihash[i].ih_lock));
}
}

/*
 * Free up structures allocated by xfs_ihash_init, at unmount time.
+2 −14
Original line number Diff line number Diff line
@@ -326,7 +326,6 @@ xfs_bulkstat(
	int			i;	/* loop index */
	int			icount;	/* count of inodes good in irbuf */
	int			irbsize; /* size of irec buffer in bytes */
	unsigned int		kmflags; /* flags for allocating irec buffer */
	xfs_ino_t		ino;	/* inode number (filesystem) */
	xfs_inobt_rec_incore_t	*irbp;	/* current irec buffer pointer */
	xfs_inobt_rec_incore_t	*irbuf;	/* start of irec buffer */
@@ -371,19 +370,8 @@ xfs_bulkstat(
		(XFS_INODE_CLUSTER_SIZE(mp) >> mp->m_sb.sb_inodelog);
	nimask = ~(nicluster - 1);
	nbcluster = nicluster >> mp->m_sb.sb_inopblog;
	/*
	 * Allocate a local buffer for inode cluster btree records.
	 * This caps our maximum readahead window (so don't be stingy)
	 * but we must handle the case where we can't get a contiguous
	 * multi-page buffer, so we drop back toward pagesize; the end
	 * case we ensure succeeds, via appropriate allocation flags.
	 */
	irbsize = NBPP * 4;
	kmflags = KM_SLEEP | KM_MAYFAIL;
	while (!(irbuf = kmem_alloc(irbsize, kmflags))) {
		if ((irbsize >>= 1) <= NBPP)
			kmflags = KM_SLEEP;
	}
	irbuf = kmem_zalloc_greedy(&irbsize, NBPC, NBPC * 4,
				   KM_SLEEP | KM_MAYFAIL | KM_LARGE);
	nirbuf = irbsize / sizeof(*irbuf);

	/*