Donate to e Foundation | Murena handsets with /e/OS | Own a part of Murena! Learn more

Commit f1e3cfff authored by Nitin Gupta's avatar Nitin Gupta Committed by Greg Kroah-Hartman
Browse files

Staging: zram: Rename ramzswap to zram in code



Automated renames in code:
 - rzs* -> zram*
 - RZS* -> ZRAM*
 - ramzswap* -> zram*

Manual changes:
 - Edited comments/messages mentioning "swap"

Signed-off-by: default avatarNitin Gupta <ngupta@vflare.org>
Acked-by: default avatarPekka Enberg <penberg@cs.helsinki.fi>
Signed-off-by: default avatarGreg Kroah-Hartman <gregkh@suse.de>
parent 16a4bfb9
Loading
Loading
Loading
Loading
+215 −216

File changed.

Preview size limit exceeded, changes collapsed.

+31 −35
Original line number Diff line number Diff line
/*
 * Compressed RAM based swap device
 * Compressed RAM block device
 *
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
 *
@@ -12,8 +12,8 @@
 * Project home: http://compcache.googlecode.com
 */

#ifndef _RAMZSWAP_DRV_H_
#define _RAMZSWAP_DRV_H_
#ifndef _ZRAM_DRV_H_
#define _ZRAM_DRV_H_

#include <linux/spinlock.h>
#include <linux/mutex.h>
@@ -41,7 +41,7 @@ struct zobj_header {

/*-- Configurable parameters */

/* Default ramzswap disk size: 25% of total RAM */
/* Default zram disk size: 25% of total RAM */
static const unsigned default_disksize_perc_ram = 25;

/*
@@ -63,23 +63,20 @@ static const unsigned max_zpage_size = PAGE_SIZE / 4 * 3;
#define SECTORS_PER_PAGE_SHIFT	(PAGE_SHIFT - SECTOR_SHIFT)
#define SECTORS_PER_PAGE	(1 << SECTORS_PER_PAGE_SHIFT)

/* Flags for ramzswap pages (table[page_no].flags) */
enum rzs_pageflags {
/* Flags for zram pages (table[page_no].flags) */
enum zram_pageflags {
	/* Page is stored uncompressed */
	RZS_UNCOMPRESSED,
	ZRAM_UNCOMPRESSED,

	/* Page consists entirely of zeros */
	RZS_ZERO,
	ZRAM_ZERO,

	__NR_RZS_PAGEFLAGS,
	__NR_ZRAM_PAGEFLAGS,
};

/*-- Data structures */

/*
 * Allocated for each swap slot, indexed by page no.
 * These table entries must fit exactly in a page.
 */
/* Allocated for each disk page */
struct table {
	struct page *page;
	u16 offset;
@@ -87,17 +84,17 @@ struct table {
	u8 flags;
} __attribute__((aligned(4)));

struct ramzswap_stats {
struct zram_stats {
	/* basic stats */
	size_t compr_size;	/* compressed size of pages stored -
				 * needed to enforce memlimit */
	/* more stats */
#if defined(CONFIG_RAMZSWAP_STATS)
#if defined(CONFIG_ZRAM_STATS)
	u64 num_reads;		/* failed + successful */
	u64 num_writes;		/* --do-- */
	u64 failed_reads;	/* should NEVER! happen */
	u64 failed_writes;	/* can happen when memory is too low */
	u64 invalid_io;		/* non-swap I/O requests */
	u64 invalid_io;		/* non-page-aligned I/O requests */
	u64 notify_free;	/* no. of swap slot free notifications */
	u32 pages_zero;		/* no. of zero filled pages */
	u32 pages_stored;	/* no. of pages currently stored */
@@ -106,7 +103,7 @@ struct ramzswap_stats {
#endif
};

struct ramzswap {
struct zram {
	struct xv_pool *mem_pool;
	void *compress_workmem;
	void *compress_buffer;
@@ -118,51 +115,50 @@ struct ramzswap {
	struct gendisk *disk;
	int init_done;
	/*
	 * This is limit on amount of *uncompressed* worth of data
	 * we can hold. When backing swap device is provided, it is
	 * set equal to device size.
	 * This is the limit on amount of *uncompressed* worth of data
	 * we can store in a disk.
	 */
	size_t disksize;	/* bytes */

	struct ramzswap_stats stats;
	struct zram_stats stats;
};

/*-- */

/* Debugging and Stats */
#if defined(CONFIG_RAMZSWAP_STATS)
static void rzs_stat_inc(u32 *v)
#if defined(CONFIG_ZRAM_STATS)
static void zram_stat_inc(u32 *v)
{
	*v = *v + 1;
}

static void rzs_stat_dec(u32 *v)
static void zram_stat_dec(u32 *v)
{
	*v = *v - 1;
}

static void rzs_stat64_inc(struct ramzswap *rzs, u64 *v)
static void zram_stat64_inc(struct zram *zram, u64 *v)
{
	spin_lock(&rzs->stat64_lock);
	spin_lock(&zram->stat64_lock);
	*v = *v + 1;
	spin_unlock(&rzs->stat64_lock);
	spin_unlock(&zram->stat64_lock);
}

static u64 rzs_stat64_read(struct ramzswap *rzs, u64 *v)
static u64 zram_stat64_read(struct zram *zram, u64 *v)
{
	u64 val;

	spin_lock(&rzs->stat64_lock);
	spin_lock(&zram->stat64_lock);
	val = *v;
	spin_unlock(&rzs->stat64_lock);
	spin_unlock(&zram->stat64_lock);

	return val;
}
#else
#define rzs_stat_inc(v)
#define rzs_stat_dec(v)
#define rzs_stat64_inc(r, v)
#define rzs_stat64_read(r, v)
#endif /* CONFIG_RAMZSWAP_STATS */
#define zram_stat_inc(v)
#define zram_stat_dec(v)
#define zram_stat64_inc(r, v)
#define zram_stat64_read(r, v)
#endif /* CONFIG_ZRAM_STATS */

#endif
+10 −11
Original line number Diff line number Diff line
/*
 * Compressed RAM based swap device
 * Compressed RAM block device
 *
 * Copyright (C) 2008, 2009, 2010  Nitin Gupta
 *
@@ -12,17 +12,16 @@
 * Project home: http://compcache.googlecode.com
 */

#ifndef _RAMZSWAP_IOCTL_H_
#define _RAMZSWAP_IOCTL_H_
#ifndef _ZRAM_IOCTL_H_
#define _ZRAM_IOCTL_H_

struct ramzswap_ioctl_stats {
	u64 disksize;		/* user specified or equal to backing swap
				 * size (if present) */
struct zram_ioctl_stats {
	u64 disksize;		/* disksize in bytes (user specifies in KB) */
	u64 num_reads;		/* failed + successful */
	u64 num_writes;		/* --do-- */
	u64 failed_reads;	/* should NEVER! happen */
	u64 failed_writes;	/* can happen when memory is too low */
	u64 invalid_io;		/* non-swap I/O requests */
	u64 invalid_io;		/* non-page-aligned I/O requests */
	u64 notify_free;	/* no. of swap slot free notifications */
	u32 pages_zero;		/* no. of zero filled pages */
	u32 good_compress_pct;	/* no. of pages with compression ratio<=50% */
@@ -34,9 +33,9 @@ struct ramzswap_ioctl_stats {
	u64 mem_used_total;
} __attribute__ ((packed, aligned(4)));

#define RZSIO_SET_DISKSIZE_KB	_IOW('z', 0, size_t)
#define RZSIO_GET_STATS		_IOR('z', 1, struct ramzswap_ioctl_stats)
#define RZSIO_INIT		_IO('z', 2)
#define RZSIO_RESET		_IO('z', 3)
#define ZRAMIO_SET_DISKSIZE_KB	_IOW('z', 0, size_t)
#define ZRAMIO_GET_STATS	_IOR('z', 1, struct zram_ioctl_stats)
#define ZRAMIO_INIT		_IO('z', 2)
#define ZRAMIO_RESET		_IO('z', 3)

#endif