Loading fs/cifs/cifsfs.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -74,7 +74,7 @@ module_param(cifs_min_small, int, 0); MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " "Range: 2 to 256"); "Range: 2 to 256"); unsigned int cifs_max_pending = CIFS_MAX_REQ; unsigned int cifs_max_pending = CIFS_MAX_REQ; module_param(cifs_max_pending, int, 0); module_param(cifs_max_pending, int, 0444); MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " "Default: 50 Range: 2 to 256"); "Default: 50 Range: 2 to 256"); unsigned short echo_retries = 5; unsigned short echo_retries = 5; Loading fs/cifs/cifsglob.h +26 −3 Original line number Original line Diff line number Diff line Loading @@ -291,7 +291,13 @@ struct TCP_Server_Info { bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ bool large_buf; /* is current buffer large? */ struct delayed_work echo; /* echo ping workqueue job */ struct delayed_work echo; /* echo ping workqueue job */ struct kvec *iov; /* reusable kvec array for receives */ unsigned int nr_iov; /* number of kvecs in array */ char *smallbuf; /* pointer to current "small" buffer */ char *bigbuf; /* pointer to current "big" buffer */ unsigned int total_read; /* total amount of data read in this pass */ #ifdef CONFIG_CIFS_FSCACHE #ifdef CONFIG_CIFS_FSCACHE struct fscache_cookie *fscache; /* client index cache cookie */ struct fscache_cookie *fscache; /* client index cache cookie */ #endif #endif Loading Loading @@ -650,8 +656,24 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon, struct mid_q_entry; struct mid_q_entry; /* /* * This is the prototype for the mid callback function. When creating one, * This is the prototype for the mid receive function. This function is for * take special care to avoid deadlocks. Things to bear in mind: * receiving the rest of the SMB frame, starting with the WordCount (which is * just after the MID in struct smb_hdr). Note: * * - This will be called by cifsd, with no locks held. * - The mid will still be on the pending_mid_q. * - mid->resp_buf will point to the current buffer. * * Returns zero on a successful receive, or an error. The receive state in * the TCP_Server_Info will also be updated. */ typedef int (mid_receive_t)(struct TCP_Server_Info *server, struct mid_q_entry *mid); /* * This is the prototype for the mid callback function. This is called once the * mid has been received off of the socket. When creating one, take special * care to avoid deadlocks. Things to bear in mind: * * * - it will be called by cifsd, with no locks held * - it will be called by cifsd, with no locks held * - the mid will be removed from any lists * - the mid will be removed from any lists Loading @@ -669,9 +691,10 @@ struct mid_q_entry { unsigned long when_sent; /* time when smb send finished */ unsigned long when_sent; /* time when smb send finished */ unsigned long when_received; /* when demux complete (taken off wire) */ unsigned long when_received; /* when demux complete (taken off wire) */ #endif #endif mid_receive_t *receive; /* call receive callback */ mid_callback_t *callback; /* call completion callback */ mid_callback_t *callback; /* call completion callback */ void *callback_data; /* general purpose pointer for callback */ void *callback_data; /* general purpose pointer for callback */ struct smb_hdr *resp_buf; /* response buffer */ struct smb_hdr *resp_buf; /* pointer to received SMB header */ int midState; /* wish this were enum but can not pass to wait_event */ int midState; /* wish this were enum but can not pass to wait_event */ __u8 command; /* smb command code */ __u8 command; /* smb command code */ bool largeBuf:1; /* if valid response, is pointer to large buf */ bool largeBuf:1; /* if valid response, is pointer to large buf */ Loading fs/cifs/cifspdu.h +1 −3 Original line number Original line Diff line number Diff line Loading @@ -1089,9 +1089,7 @@ typedef struct smb_com_read_rsp { __le16 DataLengthHigh; __le16 DataLengthHigh; __u64 Reserved2; __u64 Reserved2; __u16 ByteCount; __u16 ByteCount; __u8 Pad; /* BB check for whether padded to DWORD /* read response data immediately follows */ boundary and optimum performance here */ char Data[1]; } __attribute__((packed)) READ_RSP; } __attribute__((packed)) READ_RSP; typedef struct locking_andx_range { typedef struct locking_andx_range { Loading fs/cifs/cifsproto.h +27 −2 Original line number Original line Diff line number Diff line Loading @@ -69,8 +69,9 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server); struct TCP_Server_Info *server); extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, unsigned int nvec, mid_callback_t *callback, unsigned int nvec, mid_receive_t *receive, void *cbdata, bool ignore_pend); mid_callback_t *callback, void *cbdata, bool ignore_pend); extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, struct smb_hdr * /* input */ , struct smb_hdr * /* input */ , struct smb_hdr * /* out */ , struct smb_hdr * /* out */ , Loading Loading @@ -153,6 +154,12 @@ extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, const char *, int); const char *, int); extern void dequeue_mid(struct mid_q_entry *mid, bool malformed); extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read); extern int cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, unsigned int nr_segs, unsigned int to_read); extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb); struct cifs_sb_info *cifs_sb); extern int cifs_match_super(struct super_block *, void *); extern int cifs_match_super(struct super_block *, void *); Loading Loading @@ -442,6 +449,24 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16); extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24); unsigned char *p24); /* asynchronous read support */ struct cifs_readdata { struct cifsFileInfo *cfile; struct address_space *mapping; __u64 offset; unsigned int bytes; pid_t pid; int result; struct list_head pages; struct work_struct work; unsigned int nr_iov; struct kvec iov[1]; }; struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages); void cifs_readdata_free(struct cifs_readdata *rdata); int cifs_async_readv(struct cifs_readdata *rdata); /* asynchronous write support */ /* asynchronous write support */ struct cifs_writedata { struct cifs_writedata { struct kref refcount; struct kref refcount; Loading fs/cifs/cifssmb.c +362 −2 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,8 @@ #include <linux/slab.h> #include <linux/slab.h> #include <linux/posix_acl_xattr.h> #include <linux/posix_acl_xattr.h> #include <linux/pagemap.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/task_io_accounting_ops.h> #include <asm/uaccess.h> #include <asm/uaccess.h> #include "cifspdu.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsglob.h" Loading @@ -40,6 +42,7 @@ #include "cifsproto.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_debug.h" #include "fscache.h" #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_POSIX static struct { static struct { Loading Loading @@ -83,6 +86,9 @@ static struct { #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CIFS_POSIX */ #endif /* CIFS_POSIX */ /* Forward declarations */ static void cifs_readv_complete(struct work_struct *work); /* Mark as invalid, all open files on tree connections since they /* Mark as invalid, all open files on tree connections since they were closed when session to server was lost */ were closed when session to server was lost */ static void mark_open_files_invalid(struct cifs_tcon *pTcon) static void mark_open_files_invalid(struct cifs_tcon *pTcon) Loading Loading @@ -737,7 +743,8 @@ CIFSSMBEcho(struct TCP_Server_Info *server) iov.iov_base = smb; iov.iov_base = smb; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback, server, true); if (rc) if (rc) cFYI(1, "Echo request failed: %d", rc); cFYI(1, "Echo request failed: %d", rc); Loading Loading @@ -1374,6 +1381,359 @@ CIFSSMBOpen(const int xid, struct cifs_tcon *tcon, return rc; return rc; } } struct cifs_readdata * cifs_readdata_alloc(unsigned int nr_pages) { struct cifs_readdata *rdata; /* readdata + 1 kvec for each page */ rdata = kzalloc(sizeof(*rdata) + sizeof(struct kvec) * nr_pages, GFP_KERNEL); if (rdata != NULL) { INIT_WORK(&rdata->work, cifs_readv_complete); INIT_LIST_HEAD(&rdata->pages); } return rdata; } void cifs_readdata_free(struct cifs_readdata *rdata) { cifsFileInfo_put(rdata->cfile); kfree(rdata); } /* * Discard any remaining data in the current SMB. To do this, we borrow the * current bigbuf. */ static int cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { READ_RSP *rsp = (READ_RSP *)server->smallbuf; unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length); int remaining = rfclen + 4 - server->total_read; struct cifs_readdata *rdata = mid->callback_data; while (remaining > 0) { int length; length = cifs_read_from_socket(server, server->bigbuf, min_t(unsigned int, remaining, CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)); if (length < 0) return length; server->total_read += length; remaining -= length; } dequeue_mid(mid, rdata->result); return 0; } static int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) { int length, len; unsigned int data_offset, remaining, data_len; struct cifs_readdata *rdata = mid->callback_data; READ_RSP *rsp = (READ_RSP *)server->smallbuf; unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4; u64 eof; pgoff_t eof_index; struct page *page, *tpage; cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__, mid->mid, rdata->offset, rdata->bytes); /* * read the rest of READ_RSP header (sans Data array), or whatever we * can if there's not enough data. At this point, we've read down to * the Mid. */ len = min_t(unsigned int, rfclen, sizeof(*rsp)) - sizeof(struct smb_hdr) + 1; rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1; rdata->iov[0].iov_len = len; length = cifs_readv_from_socket(server, rdata->iov, 1, len); if (length < 0) return length; server->total_read += length; /* Was the SMB read successful? */ rdata->result = map_smb_to_linux_error(&rsp->hdr, false); if (rdata->result != 0) { cFYI(1, "%s: server returned error %d", __func__, rdata->result); return cifs_readv_discard(server, mid); } /* Is there enough to get to the rest of the READ_RSP header? */ if (server->total_read < sizeof(READ_RSP)) { cFYI(1, "%s: server returned short header. got=%u expected=%zu", __func__, server->total_read, sizeof(READ_RSP)); rdata->result = -EIO; return cifs_readv_discard(server, mid); } data_offset = le16_to_cpu(rsp->DataOffset) + 4; if (data_offset < server->total_read) { /* * win2k8 sometimes sends an offset of 0 when the read * is beyond the EOF. Treat it as if the data starts just after * the header. */ cFYI(1, "%s: data offset (%u) inside read response header", __func__, data_offset); data_offset = server->total_read; } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { /* data_offset is beyond the end of smallbuf */ cFYI(1, "%s: data offset (%u) beyond end of smallbuf", __func__, data_offset); rdata->result = -EIO; return cifs_readv_discard(server, mid); } cFYI(1, "%s: total_read=%u data_offset=%u", __func__, server->total_read, data_offset); len = data_offset - server->total_read; if (len > 0) { /* read any junk before data into the rest of smallbuf */ rdata->iov[0].iov_base = server->smallbuf + server->total_read; rdata->iov[0].iov_len = len; length = cifs_readv_from_socket(server, rdata->iov, 1, len); if (length < 0) return length; server->total_read += length; } /* set up first iov for signature check */ rdata->iov[0].iov_base = server->smallbuf; rdata->iov[0].iov_len = server->total_read; cFYI(1, "0: iov_base=%p iov_len=%zu", rdata->iov[0].iov_base, rdata->iov[0].iov_len); /* how much data is in the response? */ data_len = le16_to_cpu(rsp->DataLengthHigh) << 16; data_len += le16_to_cpu(rsp->DataLength); if (data_offset + data_len > rfclen) { /* data_len is corrupt -- discard frame */ rdata->result = -EIO; return cifs_readv_discard(server, mid); } /* marshal up the page array */ len = 0; remaining = data_len; rdata->nr_iov = 1; /* determine the eof that the server (probably) has */ eof = CIFS_I(rdata->mapping->host)->server_eof; eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index); list_for_each_entry_safe(page, tpage, &rdata->pages, lru) { if (remaining >= PAGE_CACHE_SIZE) { /* enough data to fill the page */ rdata->iov[rdata->nr_iov].iov_base = kmap(page); rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE; cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu", rdata->nr_iov, page->index, rdata->iov[rdata->nr_iov].iov_base, rdata->iov[rdata->nr_iov].iov_len); ++rdata->nr_iov; len += PAGE_CACHE_SIZE; remaining -= PAGE_CACHE_SIZE; } else if (remaining > 0) { /* enough for partial page, fill and zero the rest */ rdata->iov[rdata->nr_iov].iov_base = kmap(page); rdata->iov[rdata->nr_iov].iov_len = remaining; cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu", rdata->nr_iov, page->index, rdata->iov[rdata->nr_iov].iov_base, rdata->iov[rdata->nr_iov].iov_len); memset(rdata->iov[rdata->nr_iov].iov_base + remaining, '\0', PAGE_CACHE_SIZE - remaining); ++rdata->nr_iov; len += remaining; remaining = 0; } else if (page->index > eof_index) { /* * The VFS will not try to do readahead past the * i_size, but it's possible that we have outstanding * writes with gaps in the middle and the i_size hasn't * caught up yet. Populate those with zeroed out pages * to prevent the VFS from repeatedly attempting to * fill them until the writes are flushed. */ zero_user(page, 0, PAGE_CACHE_SIZE); list_del(&page->lru); lru_cache_add_file(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); page_cache_release(page); } else { /* no need to hold page hostage */ list_del(&page->lru); lru_cache_add_file(page); unlock_page(page); page_cache_release(page); } } /* issue the read if we have any iovecs left to fill */ if (rdata->nr_iov > 1) { length = cifs_readv_from_socket(server, &rdata->iov[1], rdata->nr_iov - 1, len); if (length < 0) return length; server->total_read += length; } else { length = 0; } rdata->bytes = length; cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read, rfclen, remaining); /* discard anything left over */ if (server->total_read < rfclen) return cifs_readv_discard(server, mid); dequeue_mid(mid, false); return length; } static void cifs_readv_complete(struct work_struct *work) { struct cifs_readdata *rdata = container_of(work, struct cifs_readdata, work); struct page *page, *tpage; list_for_each_entry_safe(page, tpage, &rdata->pages, lru) { list_del(&page->lru); lru_cache_add_file(page); kunmap(page); if (rdata->result == 0) { flush_dcache_page(page); SetPageUptodate(page); } unlock_page(page); if (rdata->result == 0) cifs_readpage_to_fscache(rdata->mapping->host, page); page_cache_release(page); } cifs_readdata_free(rdata); } static void cifs_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__, mid->mid, mid->midState, rdata->result, rdata->bytes); switch (mid->midState) { case MID_RESPONSE_RECEIVED: /* result already set, check signature */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { if (cifs_verify_signature(rdata->iov, rdata->nr_iov, server, mid->sequence_number + 1)) cERROR(1, "Unexpected SMB signature"); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->bytes); cifs_stats_bytes_read(tcon, rdata->bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; break; default: rdata->result = -EIO; } queue_work(system_nrt_wq, &rdata->work); DeleteMidQEntry(mid); atomic_dec(&server->inFlight); wake_up(&server->request_q); } /* cifs_async_readv - send an async write, and set up mid to handle result */ int cifs_async_readv(struct cifs_readdata *rdata) { int rc; READ_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); cFYI(1, "%s: offset=%llu bytes=%u", __func__, rdata->offset, rdata->bytes); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((rdata->offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb); if (rc) return rc; smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = rdata->cfile->netfid; smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF); if (wct == 12) smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32); smb->Remaining = 0; smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF); smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16); if (wct == 12) smb->ByteCount = 0; else { /* old style read */ struct smb_com_readx_req *smbr = (struct smb_com_readx_req *)smb; smbr->ByteCount = 0; } /* 4 for RFC1001 length + 1 for BCC */ rdata->iov[0].iov_base = smb; rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; rc = cifs_call_async(tcon->ses->server, rdata->iov, 1, cifs_readv_receive, cifs_readv_callback, rdata, false); if (rc == 0) cifs_stats_inc(&tcon->num_reads); cifs_small_buf_release(smb); return rc; } int int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *pbuf_type) char **buf, int *pbuf_type) Loading Loading @@ -1834,7 +2194,7 @@ cifs_async_writev(struct cifs_writedata *wdata) kref_get(&wdata->refcount); kref_get(&wdata->refcount); rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, cifs_writev_callback, wdata, false); NULL, cifs_writev_callback, wdata, false); if (rc == 0) if (rc == 0) cifs_stats_inc(&tcon->num_writes); cifs_stats_inc(&tcon->num_writes); Loading Loading
fs/cifs/cifsfs.c +1 −1 Original line number Original line Diff line number Diff line Loading @@ -74,7 +74,7 @@ module_param(cifs_min_small, int, 0); MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " MODULE_PARM_DESC(cifs_min_small, "Small network buffers in pool. Default: 30 " "Range: 2 to 256"); "Range: 2 to 256"); unsigned int cifs_max_pending = CIFS_MAX_REQ; unsigned int cifs_max_pending = CIFS_MAX_REQ; module_param(cifs_max_pending, int, 0); module_param(cifs_max_pending, int, 0444); MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " MODULE_PARM_DESC(cifs_max_pending, "Simultaneous requests to server. " "Default: 50 Range: 2 to 256"); "Default: 50 Range: 2 to 256"); unsigned short echo_retries = 5; unsigned short echo_retries = 5; Loading
fs/cifs/cifsglob.h +26 −3 Original line number Original line Diff line number Diff line Loading @@ -291,7 +291,13 @@ struct TCP_Server_Info { bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberosu2u; /* supports U2U Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_kerberos; /* supports plain Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ bool sec_mskerberos; /* supports legacy MS Kerberos */ bool large_buf; /* is current buffer large? */ struct delayed_work echo; /* echo ping workqueue job */ struct delayed_work echo; /* echo ping workqueue job */ struct kvec *iov; /* reusable kvec array for receives */ unsigned int nr_iov; /* number of kvecs in array */ char *smallbuf; /* pointer to current "small" buffer */ char *bigbuf; /* pointer to current "big" buffer */ unsigned int total_read; /* total amount of data read in this pass */ #ifdef CONFIG_CIFS_FSCACHE #ifdef CONFIG_CIFS_FSCACHE struct fscache_cookie *fscache; /* client index cache cookie */ struct fscache_cookie *fscache; /* client index cache cookie */ #endif #endif Loading Loading @@ -650,8 +656,24 @@ static inline void cifs_stats_bytes_read(struct cifs_tcon *tcon, struct mid_q_entry; struct mid_q_entry; /* /* * This is the prototype for the mid callback function. When creating one, * This is the prototype for the mid receive function. This function is for * take special care to avoid deadlocks. Things to bear in mind: * receiving the rest of the SMB frame, starting with the WordCount (which is * just after the MID in struct smb_hdr). Note: * * - This will be called by cifsd, with no locks held. * - The mid will still be on the pending_mid_q. * - mid->resp_buf will point to the current buffer. * * Returns zero on a successful receive, or an error. The receive state in * the TCP_Server_Info will also be updated. */ typedef int (mid_receive_t)(struct TCP_Server_Info *server, struct mid_q_entry *mid); /* * This is the prototype for the mid callback function. This is called once the * mid has been received off of the socket. When creating one, take special * care to avoid deadlocks. Things to bear in mind: * * * - it will be called by cifsd, with no locks held * - it will be called by cifsd, with no locks held * - the mid will be removed from any lists * - the mid will be removed from any lists Loading @@ -669,9 +691,10 @@ struct mid_q_entry { unsigned long when_sent; /* time when smb send finished */ unsigned long when_sent; /* time when smb send finished */ unsigned long when_received; /* when demux complete (taken off wire) */ unsigned long when_received; /* when demux complete (taken off wire) */ #endif #endif mid_receive_t *receive; /* call receive callback */ mid_callback_t *callback; /* call completion callback */ mid_callback_t *callback; /* call completion callback */ void *callback_data; /* general purpose pointer for callback */ void *callback_data; /* general purpose pointer for callback */ struct smb_hdr *resp_buf; /* response buffer */ struct smb_hdr *resp_buf; /* pointer to received SMB header */ int midState; /* wish this were enum but can not pass to wait_event */ int midState; /* wish this were enum but can not pass to wait_event */ __u8 command; /* smb command code */ __u8 command; /* smb command code */ bool largeBuf:1; /* if valid response, is pointer to large buf */ bool largeBuf:1; /* if valid response, is pointer to large buf */ Loading
fs/cifs/cifspdu.h +1 −3 Original line number Original line Diff line number Diff line Loading @@ -1089,9 +1089,7 @@ typedef struct smb_com_read_rsp { __le16 DataLengthHigh; __le16 DataLengthHigh; __u64 Reserved2; __u64 Reserved2; __u16 ByteCount; __u16 ByteCount; __u8 Pad; /* BB check for whether padded to DWORD /* read response data immediately follows */ boundary and optimum performance here */ char Data[1]; } __attribute__((packed)) READ_RSP; } __attribute__((packed)) READ_RSP; typedef struct locking_andx_range { typedef struct locking_andx_range { Loading
fs/cifs/cifsproto.h +27 −2 Original line number Original line Diff line number Diff line Loading @@ -69,8 +69,9 @@ extern struct mid_q_entry *AllocMidQEntry(const struct smb_hdr *smb_buffer, struct TCP_Server_Info *server); struct TCP_Server_Info *server); extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern void DeleteMidQEntry(struct mid_q_entry *midEntry); extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, extern int cifs_call_async(struct TCP_Server_Info *server, struct kvec *iov, unsigned int nvec, mid_callback_t *callback, unsigned int nvec, mid_receive_t *receive, void *cbdata, bool ignore_pend); mid_callback_t *callback, void *cbdata, bool ignore_pend); extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, extern int SendReceive(const unsigned int /* xid */ , struct cifs_ses *, struct smb_hdr * /* input */ , struct smb_hdr * /* input */ , struct smb_hdr * /* out */ , struct smb_hdr * /* out */ , Loading Loading @@ -153,6 +154,12 @@ extern struct cifs_ntsd *get_cifs_acl(struct cifs_sb_info *, struct inode *, extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, extern int set_cifs_acl(struct cifs_ntsd *, __u32, struct inode *, const char *, int); const char *, int); extern void dequeue_mid(struct mid_q_entry *mid, bool malformed); extern int cifs_read_from_socket(struct TCP_Server_Info *server, char *buf, unsigned int to_read); extern int cifs_readv_from_socket(struct TCP_Server_Info *server, struct kvec *iov_orig, unsigned int nr_segs, unsigned int to_read); extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, extern void cifs_setup_cifs_sb(struct smb_vol *pvolume_info, struct cifs_sb_info *cifs_sb); struct cifs_sb_info *cifs_sb); extern int cifs_match_super(struct super_block *, void *); extern int cifs_match_super(struct super_block *, void *); Loading Loading @@ -442,6 +449,24 @@ extern int E_md4hash(const unsigned char *passwd, unsigned char *p16); extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, extern int SMBencrypt(unsigned char *passwd, const unsigned char *c8, unsigned char *p24); unsigned char *p24); /* asynchronous read support */ struct cifs_readdata { struct cifsFileInfo *cfile; struct address_space *mapping; __u64 offset; unsigned int bytes; pid_t pid; int result; struct list_head pages; struct work_struct work; unsigned int nr_iov; struct kvec iov[1]; }; struct cifs_readdata *cifs_readdata_alloc(unsigned int nr_pages); void cifs_readdata_free(struct cifs_readdata *rdata); int cifs_async_readv(struct cifs_readdata *rdata); /* asynchronous write support */ /* asynchronous write support */ struct cifs_writedata { struct cifs_writedata { struct kref refcount; struct kref refcount; Loading
fs/cifs/cifssmb.c +362 −2 Original line number Original line Diff line number Diff line Loading @@ -33,6 +33,8 @@ #include <linux/slab.h> #include <linux/slab.h> #include <linux/posix_acl_xattr.h> #include <linux/posix_acl_xattr.h> #include <linux/pagemap.h> #include <linux/pagemap.h> #include <linux/swap.h> #include <linux/task_io_accounting_ops.h> #include <asm/uaccess.h> #include <asm/uaccess.h> #include "cifspdu.h" #include "cifspdu.h" #include "cifsglob.h" #include "cifsglob.h" Loading @@ -40,6 +42,7 @@ #include "cifsproto.h" #include "cifsproto.h" #include "cifs_unicode.h" #include "cifs_unicode.h" #include "cifs_debug.h" #include "cifs_debug.h" #include "fscache.h" #ifdef CONFIG_CIFS_POSIX #ifdef CONFIG_CIFS_POSIX static struct { static struct { Loading Loading @@ -83,6 +86,9 @@ static struct { #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CONFIG_CIFS_WEAK_PW_HASH */ #endif /* CIFS_POSIX */ #endif /* CIFS_POSIX */ /* Forward declarations */ static void cifs_readv_complete(struct work_struct *work); /* Mark as invalid, all open files on tree connections since they /* Mark as invalid, all open files on tree connections since they were closed when session to server was lost */ were closed when session to server was lost */ static void mark_open_files_invalid(struct cifs_tcon *pTcon) static void mark_open_files_invalid(struct cifs_tcon *pTcon) Loading Loading @@ -737,7 +743,8 @@ CIFSSMBEcho(struct TCP_Server_Info *server) iov.iov_base = smb; iov.iov_base = smb; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; iov.iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; rc = cifs_call_async(server, &iov, 1, cifs_echo_callback, server, true); rc = cifs_call_async(server, &iov, 1, NULL, cifs_echo_callback, server, true); if (rc) if (rc) cFYI(1, "Echo request failed: %d", rc); cFYI(1, "Echo request failed: %d", rc); Loading Loading @@ -1374,6 +1381,359 @@ CIFSSMBOpen(const int xid, struct cifs_tcon *tcon, return rc; return rc; } } struct cifs_readdata * cifs_readdata_alloc(unsigned int nr_pages) { struct cifs_readdata *rdata; /* readdata + 1 kvec for each page */ rdata = kzalloc(sizeof(*rdata) + sizeof(struct kvec) * nr_pages, GFP_KERNEL); if (rdata != NULL) { INIT_WORK(&rdata->work, cifs_readv_complete); INIT_LIST_HEAD(&rdata->pages); } return rdata; } void cifs_readdata_free(struct cifs_readdata *rdata) { cifsFileInfo_put(rdata->cfile); kfree(rdata); } /* * Discard any remaining data in the current SMB. To do this, we borrow the * current bigbuf. */ static int cifs_readv_discard(struct TCP_Server_Info *server, struct mid_q_entry *mid) { READ_RSP *rsp = (READ_RSP *)server->smallbuf; unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length); int remaining = rfclen + 4 - server->total_read; struct cifs_readdata *rdata = mid->callback_data; while (remaining > 0) { int length; length = cifs_read_from_socket(server, server->bigbuf, min_t(unsigned int, remaining, CIFSMaxBufSize + MAX_CIFS_HDR_SIZE)); if (length < 0) return length; server->total_read += length; remaining -= length; } dequeue_mid(mid, rdata->result); return 0; } static int cifs_readv_receive(struct TCP_Server_Info *server, struct mid_q_entry *mid) { int length, len; unsigned int data_offset, remaining, data_len; struct cifs_readdata *rdata = mid->callback_data; READ_RSP *rsp = (READ_RSP *)server->smallbuf; unsigned int rfclen = be32_to_cpu(rsp->hdr.smb_buf_length) + 4; u64 eof; pgoff_t eof_index; struct page *page, *tpage; cFYI(1, "%s: mid=%u offset=%llu bytes=%u", __func__, mid->mid, rdata->offset, rdata->bytes); /* * read the rest of READ_RSP header (sans Data array), or whatever we * can if there's not enough data. At this point, we've read down to * the Mid. */ len = min_t(unsigned int, rfclen, sizeof(*rsp)) - sizeof(struct smb_hdr) + 1; rdata->iov[0].iov_base = server->smallbuf + sizeof(struct smb_hdr) - 1; rdata->iov[0].iov_len = len; length = cifs_readv_from_socket(server, rdata->iov, 1, len); if (length < 0) return length; server->total_read += length; /* Was the SMB read successful? */ rdata->result = map_smb_to_linux_error(&rsp->hdr, false); if (rdata->result != 0) { cFYI(1, "%s: server returned error %d", __func__, rdata->result); return cifs_readv_discard(server, mid); } /* Is there enough to get to the rest of the READ_RSP header? */ if (server->total_read < sizeof(READ_RSP)) { cFYI(1, "%s: server returned short header. got=%u expected=%zu", __func__, server->total_read, sizeof(READ_RSP)); rdata->result = -EIO; return cifs_readv_discard(server, mid); } data_offset = le16_to_cpu(rsp->DataOffset) + 4; if (data_offset < server->total_read) { /* * win2k8 sometimes sends an offset of 0 when the read * is beyond the EOF. Treat it as if the data starts just after * the header. */ cFYI(1, "%s: data offset (%u) inside read response header", __func__, data_offset); data_offset = server->total_read; } else if (data_offset > MAX_CIFS_SMALL_BUFFER_SIZE) { /* data_offset is beyond the end of smallbuf */ cFYI(1, "%s: data offset (%u) beyond end of smallbuf", __func__, data_offset); rdata->result = -EIO; return cifs_readv_discard(server, mid); } cFYI(1, "%s: total_read=%u data_offset=%u", __func__, server->total_read, data_offset); len = data_offset - server->total_read; if (len > 0) { /* read any junk before data into the rest of smallbuf */ rdata->iov[0].iov_base = server->smallbuf + server->total_read; rdata->iov[0].iov_len = len; length = cifs_readv_from_socket(server, rdata->iov, 1, len); if (length < 0) return length; server->total_read += length; } /* set up first iov for signature check */ rdata->iov[0].iov_base = server->smallbuf; rdata->iov[0].iov_len = server->total_read; cFYI(1, "0: iov_base=%p iov_len=%zu", rdata->iov[0].iov_base, rdata->iov[0].iov_len); /* how much data is in the response? */ data_len = le16_to_cpu(rsp->DataLengthHigh) << 16; data_len += le16_to_cpu(rsp->DataLength); if (data_offset + data_len > rfclen) { /* data_len is corrupt -- discard frame */ rdata->result = -EIO; return cifs_readv_discard(server, mid); } /* marshal up the page array */ len = 0; remaining = data_len; rdata->nr_iov = 1; /* determine the eof that the server (probably) has */ eof = CIFS_I(rdata->mapping->host)->server_eof; eof_index = eof ? (eof - 1) >> PAGE_CACHE_SHIFT : 0; cFYI(1, "eof=%llu eof_index=%lu", eof, eof_index); list_for_each_entry_safe(page, tpage, &rdata->pages, lru) { if (remaining >= PAGE_CACHE_SIZE) { /* enough data to fill the page */ rdata->iov[rdata->nr_iov].iov_base = kmap(page); rdata->iov[rdata->nr_iov].iov_len = PAGE_CACHE_SIZE; cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu", rdata->nr_iov, page->index, rdata->iov[rdata->nr_iov].iov_base, rdata->iov[rdata->nr_iov].iov_len); ++rdata->nr_iov; len += PAGE_CACHE_SIZE; remaining -= PAGE_CACHE_SIZE; } else if (remaining > 0) { /* enough for partial page, fill and zero the rest */ rdata->iov[rdata->nr_iov].iov_base = kmap(page); rdata->iov[rdata->nr_iov].iov_len = remaining; cFYI(1, "%u: idx=%lu iov_base=%p iov_len=%zu", rdata->nr_iov, page->index, rdata->iov[rdata->nr_iov].iov_base, rdata->iov[rdata->nr_iov].iov_len); memset(rdata->iov[rdata->nr_iov].iov_base + remaining, '\0', PAGE_CACHE_SIZE - remaining); ++rdata->nr_iov; len += remaining; remaining = 0; } else if (page->index > eof_index) { /* * The VFS will not try to do readahead past the * i_size, but it's possible that we have outstanding * writes with gaps in the middle and the i_size hasn't * caught up yet. Populate those with zeroed out pages * to prevent the VFS from repeatedly attempting to * fill them until the writes are flushed. */ zero_user(page, 0, PAGE_CACHE_SIZE); list_del(&page->lru); lru_cache_add_file(page); flush_dcache_page(page); SetPageUptodate(page); unlock_page(page); page_cache_release(page); } else { /* no need to hold page hostage */ list_del(&page->lru); lru_cache_add_file(page); unlock_page(page); page_cache_release(page); } } /* issue the read if we have any iovecs left to fill */ if (rdata->nr_iov > 1) { length = cifs_readv_from_socket(server, &rdata->iov[1], rdata->nr_iov - 1, len); if (length < 0) return length; server->total_read += length; } else { length = 0; } rdata->bytes = length; cFYI(1, "total_read=%u rfclen=%u remaining=%u", server->total_read, rfclen, remaining); /* discard anything left over */ if (server->total_read < rfclen) return cifs_readv_discard(server, mid); dequeue_mid(mid, false); return length; } static void cifs_readv_complete(struct work_struct *work) { struct cifs_readdata *rdata = container_of(work, struct cifs_readdata, work); struct page *page, *tpage; list_for_each_entry_safe(page, tpage, &rdata->pages, lru) { list_del(&page->lru); lru_cache_add_file(page); kunmap(page); if (rdata->result == 0) { flush_dcache_page(page); SetPageUptodate(page); } unlock_page(page); if (rdata->result == 0) cifs_readpage_to_fscache(rdata->mapping->host, page); page_cache_release(page); } cifs_readdata_free(rdata); } static void cifs_readv_callback(struct mid_q_entry *mid) { struct cifs_readdata *rdata = mid->callback_data; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); struct TCP_Server_Info *server = tcon->ses->server; cFYI(1, "%s: mid=%u state=%d result=%d bytes=%u", __func__, mid->mid, mid->midState, rdata->result, rdata->bytes); switch (mid->midState) { case MID_RESPONSE_RECEIVED: /* result already set, check signature */ if (server->sec_mode & (SECMODE_SIGN_REQUIRED | SECMODE_SIGN_ENABLED)) { if (cifs_verify_signature(rdata->iov, rdata->nr_iov, server, mid->sequence_number + 1)) cERROR(1, "Unexpected SMB signature"); } /* FIXME: should this be counted toward the initiating task? */ task_io_account_read(rdata->bytes); cifs_stats_bytes_read(tcon, rdata->bytes); break; case MID_REQUEST_SUBMITTED: case MID_RETRY_NEEDED: rdata->result = -EAGAIN; break; default: rdata->result = -EIO; } queue_work(system_nrt_wq, &rdata->work); DeleteMidQEntry(mid); atomic_dec(&server->inFlight); wake_up(&server->request_q); } /* cifs_async_readv - send an async write, and set up mid to handle result */ int cifs_async_readv(struct cifs_readdata *rdata) { int rc; READ_REQ *smb = NULL; int wct; struct cifs_tcon *tcon = tlink_tcon(rdata->cfile->tlink); cFYI(1, "%s: offset=%llu bytes=%u", __func__, rdata->offset, rdata->bytes); if (tcon->ses->capabilities & CAP_LARGE_FILES) wct = 12; else { wct = 10; /* old style read */ if ((rdata->offset >> 32) > 0) { /* can not handle this big offset for old */ return -EIO; } } rc = small_smb_init(SMB_COM_READ_ANDX, wct, tcon, (void **)&smb); if (rc) return rc; smb->hdr.Pid = cpu_to_le16((__u16)rdata->pid); smb->hdr.PidHigh = cpu_to_le16((__u16)(rdata->pid >> 16)); smb->AndXCommand = 0xFF; /* none */ smb->Fid = rdata->cfile->netfid; smb->OffsetLow = cpu_to_le32(rdata->offset & 0xFFFFFFFF); if (wct == 12) smb->OffsetHigh = cpu_to_le32(rdata->offset >> 32); smb->Remaining = 0; smb->MaxCount = cpu_to_le16(rdata->bytes & 0xFFFF); smb->MaxCountHigh = cpu_to_le32(rdata->bytes >> 16); if (wct == 12) smb->ByteCount = 0; else { /* old style read */ struct smb_com_readx_req *smbr = (struct smb_com_readx_req *)smb; smbr->ByteCount = 0; } /* 4 for RFC1001 length + 1 for BCC */ rdata->iov[0].iov_base = smb; rdata->iov[0].iov_len = be32_to_cpu(smb->hdr.smb_buf_length) + 4; rc = cifs_call_async(tcon->ses->server, rdata->iov, 1, cifs_readv_receive, cifs_readv_callback, rdata, false); if (rc == 0) cifs_stats_inc(&tcon->num_reads); cifs_small_buf_release(smb); return rc; } int int CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, CIFSSMBRead(const int xid, struct cifs_io_parms *io_parms, unsigned int *nbytes, char **buf, int *pbuf_type) char **buf, int *pbuf_type) Loading Loading @@ -1834,7 +2194,7 @@ cifs_async_writev(struct cifs_writedata *wdata) kref_get(&wdata->refcount); kref_get(&wdata->refcount); rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, rc = cifs_call_async(tcon->ses->server, iov, wdata->nr_pages + 1, cifs_writev_callback, wdata, false); NULL, cifs_writev_callback, wdata, false); if (rc == 0) if (rc == 0) cifs_stats_inc(&tcon->num_writes); cifs_stats_inc(&tcon->num_writes); Loading