Loading drivers/staging/ozwpan/ozeventdef.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -37,7 +37,7 @@ struct oz_event { unsigned ctx4; unsigned ctx4; }; }; #define OZ_EVT_LIST_SZ 256 #define OZ_EVT_LIST_SZ 64 struct oz_evtlist { struct oz_evtlist { int count; int count; int missed; int missed; Loading drivers/staging/zcache/zcache-main.c +6 −8 Original line number Original line Diff line number Diff line Loading @@ -701,7 +701,6 @@ static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id, u32 size = clen + sizeof(struct zv_hdr); u32 size = clen + sizeof(struct zv_hdr); int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; void *handle = NULL; void *handle = NULL; char *buf; BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled()); BUG_ON(chunks >= NCHUNKS); BUG_ON(chunks >= NCHUNKS); Loading @@ -710,14 +709,13 @@ static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id, goto out; goto out; atomic_inc(&zv_curr_dist_counts[chunks]); atomic_inc(&zv_curr_dist_counts[chunks]); atomic_inc(&zv_cumul_dist_counts[chunks]); atomic_inc(&zv_cumul_dist_counts[chunks]); zv = (struct zv_hdr *)((char *)cdata - sizeof(*zv)); zv = zs_map_object(pool, handle); zv->index = index; zv->index = index; zv->oid = *oid; zv->oid = *oid; zv->pool_id = pool_id; zv->pool_id = pool_id; zv->size = clen; zv->size = clen; SET_SENTINEL(zv, ZVH); SET_SENTINEL(zv, ZVH); buf = zs_map_object(pool, handle); memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); memcpy(buf, zv, clen + sizeof(*zv)); zs_unmap_object(pool, handle); zs_unmap_object(pool, handle); out: out: return handle; return handle; Loading Loading @@ -1170,14 +1168,14 @@ static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0); static unsigned long zcache_curr_pers_pampd_count_max; static unsigned long zcache_curr_pers_pampd_count_max; /* forward reference */ /* forward reference */ static int zcache_compress(struct page *from, void **out_va, size_t *out_len); static int zcache_compress(struct page *from, void **out_va, unsigned *out_len); static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, struct tmem_pool *pool, struct tmem_oid *oid, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index) uint32_t index) { { void *pampd = NULL, *cdata; void *pampd = NULL, *cdata; size_t clen; unsigned clen; int ret; int ret; unsigned long count; unsigned long count; struct page *page = (struct page *)(data); struct page *page = (struct page *)(data); Loading Loading @@ -1326,7 +1324,7 @@ static struct tmem_pamops zcache_pamops = { static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); #define ZCACHE_DSTMEM_ORDER 1 #define ZCACHE_DSTMEM_ORDER 1 static int zcache_compress(struct page *from, void **out_va, size_t *out_len) static int zcache_compress(struct page *from, void **out_va, unsigned *out_len) { { int ret = 0; int ret = 0; unsigned char *dmem = __get_cpu_var(zcache_dstmem); unsigned char *dmem = __get_cpu_var(zcache_dstmem); Loading @@ -1339,7 +1337,7 @@ static int zcache_compress(struct page *from, void **out_va, size_t *out_len) from_va = kmap_atomic(from, KM_USER0); from_va = kmap_atomic(from, KM_USER0); mb(); mb(); ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, (unsigned int *)out_len); out_len); BUG_ON(ret); BUG_ON(ret); *out_va = dmem; *out_va = dmem; kunmap_atomic(from_va, KM_USER0); kunmap_atomic(from_va, KM_USER0); Loading Loading
drivers/staging/ozwpan/ozeventdef.h +1 −1 Original line number Original line Diff line number Diff line Loading @@ -37,7 +37,7 @@ struct oz_event { unsigned ctx4; unsigned ctx4; }; }; #define OZ_EVT_LIST_SZ 256 #define OZ_EVT_LIST_SZ 64 struct oz_evtlist { struct oz_evtlist { int count; int count; int missed; int missed; Loading
drivers/staging/zcache/zcache-main.c +6 −8 Original line number Original line Diff line number Diff line Loading @@ -701,7 +701,6 @@ static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id, u32 size = clen + sizeof(struct zv_hdr); u32 size = clen + sizeof(struct zv_hdr); int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; int chunks = (size + (CHUNK_SIZE - 1)) >> CHUNK_SHIFT; void *handle = NULL; void *handle = NULL; char *buf; BUG_ON(!irqs_disabled()); BUG_ON(!irqs_disabled()); BUG_ON(chunks >= NCHUNKS); BUG_ON(chunks >= NCHUNKS); Loading @@ -710,14 +709,13 @@ static struct zv_hdr *zv_create(struct zs_pool *pool, uint32_t pool_id, goto out; goto out; atomic_inc(&zv_curr_dist_counts[chunks]); atomic_inc(&zv_curr_dist_counts[chunks]); atomic_inc(&zv_cumul_dist_counts[chunks]); atomic_inc(&zv_cumul_dist_counts[chunks]); zv = (struct zv_hdr *)((char *)cdata - sizeof(*zv)); zv = zs_map_object(pool, handle); zv->index = index; zv->index = index; zv->oid = *oid; zv->oid = *oid; zv->pool_id = pool_id; zv->pool_id = pool_id; zv->size = clen; zv->size = clen; SET_SENTINEL(zv, ZVH); SET_SENTINEL(zv, ZVH); buf = zs_map_object(pool, handle); memcpy((char *)zv + sizeof(struct zv_hdr), cdata, clen); memcpy(buf, zv, clen + sizeof(*zv)); zs_unmap_object(pool, handle); zs_unmap_object(pool, handle); out: out: return handle; return handle; Loading Loading @@ -1170,14 +1168,14 @@ static atomic_t zcache_curr_pers_pampd_count = ATOMIC_INIT(0); static unsigned long zcache_curr_pers_pampd_count_max; static unsigned long zcache_curr_pers_pampd_count_max; /* forward reference */ /* forward reference */ static int zcache_compress(struct page *from, void **out_va, size_t *out_len); static int zcache_compress(struct page *from, void **out_va, unsigned *out_len); static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, static void *zcache_pampd_create(char *data, size_t size, bool raw, int eph, struct tmem_pool *pool, struct tmem_oid *oid, struct tmem_pool *pool, struct tmem_oid *oid, uint32_t index) uint32_t index) { { void *pampd = NULL, *cdata; void *pampd = NULL, *cdata; size_t clen; unsigned clen; int ret; int ret; unsigned long count; unsigned long count; struct page *page = (struct page *)(data); struct page *page = (struct page *)(data); Loading Loading @@ -1326,7 +1324,7 @@ static struct tmem_pamops zcache_pamops = { static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); static DEFINE_PER_CPU(unsigned char *, zcache_dstmem); #define ZCACHE_DSTMEM_ORDER 1 #define ZCACHE_DSTMEM_ORDER 1 static int zcache_compress(struct page *from, void **out_va, size_t *out_len) static int zcache_compress(struct page *from, void **out_va, unsigned *out_len) { { int ret = 0; int ret = 0; unsigned char *dmem = __get_cpu_var(zcache_dstmem); unsigned char *dmem = __get_cpu_var(zcache_dstmem); Loading @@ -1339,7 +1337,7 @@ static int zcache_compress(struct page *from, void **out_va, size_t *out_len) from_va = kmap_atomic(from, KM_USER0); from_va = kmap_atomic(from, KM_USER0); mb(); mb(); ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, ret = zcache_comp_op(ZCACHE_COMPOP_COMPRESS, from_va, PAGE_SIZE, dmem, (unsigned int *)out_len); out_len); BUG_ON(ret); BUG_ON(ret); *out_va = dmem; *out_va = dmem; kunmap_atomic(from_va, KM_USER0); kunmap_atomic(from_va, KM_USER0); Loading