Loading fs/cifs/cifsproto.h +2 −0 Original line number Diff line number Diff line Loading @@ -483,6 +483,8 @@ int cifs_async_readv(struct cifs_readdata *rdata); /* asynchronous write support */ struct cifs_writedata { struct kref refcount; struct list_head list; struct completion done; enum writeback_sync_modes sync_mode; struct work_struct work; struct cifsFileInfo *cfile; Loading fs/cifs/cifssmb.c +3 −1 Original line number Diff line number Diff line Loading @@ -2081,8 +2081,10 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) wdata = kzalloc(sizeof(*wdata) + sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); if (wdata != NULL) { INIT_WORK(&wdata->work, complete); kref_init(&wdata->refcount); INIT_LIST_HEAD(&wdata->list); init_completion(&wdata->done); INIT_WORK(&wdata->work, complete); } return wdata; } Loading fs/cifs/file.c +138 −85 Original line number Diff line number Diff line Loading @@ -2106,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len) return num_pages; } static void cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata) { int i; size_t bytes = wdata->bytes; /* marshal up the pages into iov array */ for (i = 0; i < wdata->nr_pages; i++) { iov[i + 1].iov_len = min(bytes, PAGE_SIZE); iov[i + 1].iov_base = kmap(wdata->pages[i]); bytes -= iov[i + 1].iov_len; } } static void cifs_uncached_writev_complete(struct work_struct *work) { int i; struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = wdata->cfile->dentry->d_inode; struct cifsInodeInfo *cifsi = CIFS_I(inode); spin_lock(&inode->i_lock); cifs_update_eof(cifsi, wdata->offset, wdata->bytes); if (cifsi->server_eof > inode->i_size) i_size_write(inode, cifsi->server_eof); spin_unlock(&inode->i_lock); complete(&wdata->done); if (wdata->result != -EAGAIN) { for (i = 0; i < wdata->nr_pages; i++) put_page(wdata->pages[i]); } kref_put(&wdata->refcount, cifs_writedata_release); } /* attempt to send write to server, retry on any -EAGAIN errors */ static int cifs_uncached_retry_writev(struct cifs_writedata *wdata) { int rc; do { if (wdata->cfile->invalidHandle) { rc = cifs_reopen_file(wdata->cfile, false); if (rc != 0) continue; } rc = cifs_async_writev(wdata); } while (rc == -EAGAIN); return rc; } static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { unsigned int written; unsigned long num_pages, npages, i; unsigned long nr_pages, i; size_t copied, len, cur_len; ssize_t total_written = 0; struct kvec *to_send; struct page **pages; loff_t offset = *poffset; struct iov_iter it; struct inode *inode; struct cifsFileInfo *open_file; struct cifs_tcon *pTcon; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb; struct cifs_io_parms io_parms; int xid, rc; __u32 pid; struct cifs_writedata *wdata, *tmp; struct list_head wdata_list; int rc; pid_t pid; len = iov_length(iov, nr_segs); if (!len) Loading @@ -2133,105 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, if (rc) return rc; INIT_LIST_HEAD(&wdata_list); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); num_pages = get_numpages(cifs_sb->wsize, len, &cur_len); pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL); if (!pages) return -ENOMEM; to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL); if (!to_send) { kfree(pages); return -ENOMEM; } rc = cifs_write_allocate_pages(pages, num_pages); if (rc) { kfree(pages); kfree(to_send); return rc; } xid = GetXid(); open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; pTcon = tlink_tcon(open_file->tlink); inode = file->f_path.dentry->d_inode; iov_iter_init(&it, iov, nr_segs, len, 0); npages = num_pages; do { size_t save_len = cur_len; for (i = 0; i < npages; i++) { copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE); copied = iov_iter_copy_from_user(pages[i], &it, 0, copied); cur_len -= copied; iov_iter_advance(&it, copied); to_send[i+1].iov_base = kmap(pages[i]); to_send[i+1].iov_len = copied; } size_t save_len; cur_len = save_len - cur_len; nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len); wdata = cifs_writedata_alloc(nr_pages, cifs_uncached_writev_complete); if (!wdata) { rc = -ENOMEM; break; } do { if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, false); if (rc != 0) rc = cifs_write_allocate_pages(wdata->pages, nr_pages); if (rc) { kfree(wdata); break; } io_parms.netfid = open_file->netfid; io_parms.pid = pid; io_parms.tcon = pTcon; io_parms.offset = *poffset; io_parms.length = cur_len; rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send, npages, 0); } while (rc == -EAGAIN); for (i = 0; i < npages; i++) kunmap(pages[i]); save_len = cur_len; for (i = 0; i < nr_pages; i++) { copied = min_t(const size_t, cur_len, PAGE_SIZE); copied = iov_iter_copy_from_user(wdata->pages[i], &it, 0, copied); cur_len -= copied; iov_iter_advance(&it, copied); } cur_len = save_len - cur_len; if (written) { len -= written; total_written += written; spin_lock(&inode->i_lock); cifs_update_eof(CIFS_I(inode), *poffset, written); spin_unlock(&inode->i_lock); *poffset += written; } else if (rc < 0) { if (!total_written) total_written = rc; wdata->sync_mode = WB_SYNC_ALL; wdata->nr_pages = nr_pages; wdata->offset = (__u64)offset; wdata->cfile = cifsFileInfo_get(open_file); wdata->pid = pid; wdata->bytes = cur_len; wdata->marshal_iov = cifs_uncached_marshal_iov; rc = cifs_uncached_retry_writev(wdata); if (rc) { kref_put(&wdata->refcount, cifs_writedata_release); break; } /* get length and number of kvecs of the next write */ npages = get_numpages(cifs_sb->wsize, len, &cur_len); list_add_tail(&wdata->list, &wdata_list); offset += cur_len; len -= cur_len; } while (len > 0); if (total_written > 0) { spin_lock(&inode->i_lock); if (*poffset > inode->i_size) i_size_write(inode, *poffset); spin_unlock(&inode->i_lock); /* * If at least one write was successfully sent, then discard any rc * value from the later writes. If the other write succeeds, then * we'll end up returning whatever was written. If it fails, then * we'll get a new rc value from that. */ if (!list_empty(&wdata_list)) rc = 0; /* * Wait for and collect replies for any successful sends in order of * increasing offset. Once an error is hit or we get a fatal signal * while waiting, then return without waiting for any more replies. */ restart_loop: list_for_each_entry_safe(wdata, tmp, &wdata_list, list) { if (!rc) { /* FIXME: freezable too? */ rc = wait_for_completion_killable(&wdata->done); if (rc) rc = -EINTR; else if (wdata->result) rc = wdata->result; else total_written += wdata->bytes; /* resend call if it's a retryable error */ if (rc == -EAGAIN) { rc = cifs_uncached_retry_writev(wdata); goto restart_loop; } } list_del_init(&wdata->list); kref_put(&wdata->refcount, cifs_writedata_release); } cifs_stats_bytes_written(pTcon, total_written); mark_inode_dirty_sync(inode); if (total_written > 0) *poffset += total_written; for (i = 0; i < num_pages; i++) put_page(pages[i]); kfree(to_send); kfree(pages); FreeXid(xid); return total_written; cifs_stats_bytes_written(tcon, total_written); return total_written ? total_written : (ssize_t)rc; } ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, Loading Loading
fs/cifs/cifsproto.h +2 −0 Original line number Diff line number Diff line Loading @@ -483,6 +483,8 @@ int cifs_async_readv(struct cifs_readdata *rdata); /* asynchronous write support */ struct cifs_writedata { struct kref refcount; struct list_head list; struct completion done; enum writeback_sync_modes sync_mode; struct work_struct work; struct cifsFileInfo *cfile; Loading
fs/cifs/cifssmb.c +3 −1 Original line number Diff line number Diff line Loading @@ -2081,8 +2081,10 @@ cifs_writedata_alloc(unsigned int nr_pages, work_func_t complete) wdata = kzalloc(sizeof(*wdata) + sizeof(struct page *) * (nr_pages - 1), GFP_NOFS); if (wdata != NULL) { INIT_WORK(&wdata->work, complete); kref_init(&wdata->refcount); INIT_LIST_HEAD(&wdata->list); init_completion(&wdata->done); INIT_WORK(&wdata->work, complete); } return wdata; } Loading
fs/cifs/file.c +138 −85 Original line number Diff line number Diff line Loading @@ -2106,24 +2106,79 @@ size_t get_numpages(const size_t wsize, const size_t len, size_t *cur_len) return num_pages; } static void cifs_uncached_marshal_iov(struct kvec *iov, struct cifs_writedata *wdata) { int i; size_t bytes = wdata->bytes; /* marshal up the pages into iov array */ for (i = 0; i < wdata->nr_pages; i++) { iov[i + 1].iov_len = min(bytes, PAGE_SIZE); iov[i + 1].iov_base = kmap(wdata->pages[i]); bytes -= iov[i + 1].iov_len; } } static void cifs_uncached_writev_complete(struct work_struct *work) { int i; struct cifs_writedata *wdata = container_of(work, struct cifs_writedata, work); struct inode *inode = wdata->cfile->dentry->d_inode; struct cifsInodeInfo *cifsi = CIFS_I(inode); spin_lock(&inode->i_lock); cifs_update_eof(cifsi, wdata->offset, wdata->bytes); if (cifsi->server_eof > inode->i_size) i_size_write(inode, cifsi->server_eof); spin_unlock(&inode->i_lock); complete(&wdata->done); if (wdata->result != -EAGAIN) { for (i = 0; i < wdata->nr_pages; i++) put_page(wdata->pages[i]); } kref_put(&wdata->refcount, cifs_writedata_release); } /* attempt to send write to server, retry on any -EAGAIN errors */ static int cifs_uncached_retry_writev(struct cifs_writedata *wdata) { int rc; do { if (wdata->cfile->invalidHandle) { rc = cifs_reopen_file(wdata->cfile, false); if (rc != 0) continue; } rc = cifs_async_writev(wdata); } while (rc == -EAGAIN); return rc; } static ssize_t cifs_iovec_write(struct file *file, const struct iovec *iov, unsigned long nr_segs, loff_t *poffset) { unsigned int written; unsigned long num_pages, npages, i; unsigned long nr_pages, i; size_t copied, len, cur_len; ssize_t total_written = 0; struct kvec *to_send; struct page **pages; loff_t offset = *poffset; struct iov_iter it; struct inode *inode; struct cifsFileInfo *open_file; struct cifs_tcon *pTcon; struct cifs_tcon *tcon; struct cifs_sb_info *cifs_sb; struct cifs_io_parms io_parms; int xid, rc; __u32 pid; struct cifs_writedata *wdata, *tmp; struct list_head wdata_list; int rc; pid_t pid; len = iov_length(iov, nr_segs); if (!len) Loading @@ -2133,105 +2188,103 @@ cifs_iovec_write(struct file *file, const struct iovec *iov, if (rc) return rc; INIT_LIST_HEAD(&wdata_list); cifs_sb = CIFS_SB(file->f_path.dentry->d_sb); num_pages = get_numpages(cifs_sb->wsize, len, &cur_len); pages = kmalloc(sizeof(struct pages *)*num_pages, GFP_KERNEL); if (!pages) return -ENOMEM; to_send = kmalloc(sizeof(struct kvec)*(num_pages + 1), GFP_KERNEL); if (!to_send) { kfree(pages); return -ENOMEM; } rc = cifs_write_allocate_pages(pages, num_pages); if (rc) { kfree(pages); kfree(to_send); return rc; } xid = GetXid(); open_file = file->private_data; tcon = tlink_tcon(open_file->tlink); if (cifs_sb->mnt_cifs_flags & CIFS_MOUNT_RWPIDFORWARD) pid = open_file->pid; else pid = current->tgid; pTcon = tlink_tcon(open_file->tlink); inode = file->f_path.dentry->d_inode; iov_iter_init(&it, iov, nr_segs, len, 0); npages = num_pages; do { size_t save_len = cur_len; for (i = 0; i < npages; i++) { copied = min_t(const size_t, cur_len, PAGE_CACHE_SIZE); copied = iov_iter_copy_from_user(pages[i], &it, 0, copied); cur_len -= copied; iov_iter_advance(&it, copied); to_send[i+1].iov_base = kmap(pages[i]); to_send[i+1].iov_len = copied; } size_t save_len; cur_len = save_len - cur_len; nr_pages = get_numpages(cifs_sb->wsize, len, &cur_len); wdata = cifs_writedata_alloc(nr_pages, cifs_uncached_writev_complete); if (!wdata) { rc = -ENOMEM; break; } do { if (open_file->invalidHandle) { rc = cifs_reopen_file(open_file, false); if (rc != 0) rc = cifs_write_allocate_pages(wdata->pages, nr_pages); if (rc) { kfree(wdata); break; } io_parms.netfid = open_file->netfid; io_parms.pid = pid; io_parms.tcon = pTcon; io_parms.offset = *poffset; io_parms.length = cur_len; rc = CIFSSMBWrite2(xid, &io_parms, &written, to_send, npages, 0); } while (rc == -EAGAIN); for (i = 0; i < npages; i++) kunmap(pages[i]); save_len = cur_len; for (i = 0; i < nr_pages; i++) { copied = min_t(const size_t, cur_len, PAGE_SIZE); copied = iov_iter_copy_from_user(wdata->pages[i], &it, 0, copied); cur_len -= copied; iov_iter_advance(&it, copied); } cur_len = save_len - cur_len; if (written) { len -= written; total_written += written; spin_lock(&inode->i_lock); cifs_update_eof(CIFS_I(inode), *poffset, written); spin_unlock(&inode->i_lock); *poffset += written; } else if (rc < 0) { if (!total_written) total_written = rc; wdata->sync_mode = WB_SYNC_ALL; wdata->nr_pages = nr_pages; wdata->offset = (__u64)offset; wdata->cfile = cifsFileInfo_get(open_file); wdata->pid = pid; wdata->bytes = cur_len; wdata->marshal_iov = cifs_uncached_marshal_iov; rc = cifs_uncached_retry_writev(wdata); if (rc) { kref_put(&wdata->refcount, cifs_writedata_release); break; } /* get length and number of kvecs of the next write */ npages = get_numpages(cifs_sb->wsize, len, &cur_len); list_add_tail(&wdata->list, &wdata_list); offset += cur_len; len -= cur_len; } while (len > 0); if (total_written > 0) { spin_lock(&inode->i_lock); if (*poffset > inode->i_size) i_size_write(inode, *poffset); spin_unlock(&inode->i_lock); /* * If at least one write was successfully sent, then discard any rc * value from the later writes. If the other write succeeds, then * we'll end up returning whatever was written. If it fails, then * we'll get a new rc value from that. */ if (!list_empty(&wdata_list)) rc = 0; /* * Wait for and collect replies for any successful sends in order of * increasing offset. Once an error is hit or we get a fatal signal * while waiting, then return without waiting for any more replies. */ restart_loop: list_for_each_entry_safe(wdata, tmp, &wdata_list, list) { if (!rc) { /* FIXME: freezable too? */ rc = wait_for_completion_killable(&wdata->done); if (rc) rc = -EINTR; else if (wdata->result) rc = wdata->result; else total_written += wdata->bytes; /* resend call if it's a retryable error */ if (rc == -EAGAIN) { rc = cifs_uncached_retry_writev(wdata); goto restart_loop; } } list_del_init(&wdata->list); kref_put(&wdata->refcount, cifs_writedata_release); } cifs_stats_bytes_written(pTcon, total_written); mark_inode_dirty_sync(inode); if (total_written > 0) *poffset += total_written; for (i = 0; i < num_pages; i++) put_page(pages[i]); kfree(to_send); kfree(pages); FreeXid(xid); return total_written; cifs_stats_bytes_written(tcon, total_written); return total_written ? total_written : (ssize_t)rc; } ssize_t cifs_user_writev(struct kiocb *iocb, const struct iovec *iov, Loading