Commit bdcffe4b authored by Xiaxi Shen's avatar Xiaxi Shen Committed by Steve French

Fix spelling errors in Server Message Block

Fixed typos in various files under fs/smb/client/
Signed-off-by: default avatarXiaxi Shen <shenxiaxi26@gmail.com>
Signed-off-by: default avatarSteve French <stfrench@microsoft.com>
parent 1b5487ae
...@@ -345,7 +345,7 @@ struct smb_version_operations { ...@@ -345,7 +345,7 @@ struct smb_version_operations {
/* connect to a server share */ /* connect to a server share */
int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *, int (*tree_connect)(const unsigned int, struct cifs_ses *, const char *,
struct cifs_tcon *, const struct nls_table *); struct cifs_tcon *, const struct nls_table *);
/* close tree connecion */ /* close tree connection */
int (*tree_disconnect)(const unsigned int, struct cifs_tcon *); int (*tree_disconnect)(const unsigned int, struct cifs_tcon *);
/* get DFS referrals */ /* get DFS referrals */
int (*get_dfs_refer)(const unsigned int, struct cifs_ses *, int (*get_dfs_refer)(const unsigned int, struct cifs_ses *,
...@@ -816,7 +816,7 @@ struct TCP_Server_Info { ...@@ -816,7 +816,7 @@ struct TCP_Server_Info {
* Protected by @refpath_lock and @srv_lock. The @refpath_lock is * Protected by @refpath_lock and @srv_lock. The @refpath_lock is
* mostly used for not requiring a copy of @leaf_fullpath when getting * mostly used for not requiring a copy of @leaf_fullpath when getting
* cached or new DFS referrals (which might also sleep during I/O). * cached or new DFS referrals (which might also sleep during I/O).
* While @srv_lock is held for making string and NULL comparions against * While @srv_lock is held for making string and NULL comparisons against
* both fields as in mount(2) and cache refresh. * both fields as in mount(2) and cache refresh.
* *
* format: \\HOST\SHARE[\OPTIONAL PATH] * format: \\HOST\SHARE[\OPTIONAL PATH]
......
...@@ -352,7 +352,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server) ...@@ -352,7 +352,7 @@ checkSMB(char *buf, unsigned int total_read, struct TCP_Server_Info *server)
* on simple responses (wct, bcc both zero) * on simple responses (wct, bcc both zero)
* in particular have seen this on * in particular have seen this on
* ulogoffX and FindClose. This leaves * ulogoffX and FindClose. This leaves
* one byte of bcc potentially unitialized * one byte of bcc potentially uninitialized
*/ */
/* zero rest of bcc */ /* zero rest of bcc */
tmp[sizeof(struct smb_hdr)+1] = 0; tmp[sizeof(struct smb_hdr)+1] = 0;
......
...@@ -406,7 +406,7 @@ static void smbd_post_send_credits(struct work_struct *work) ...@@ -406,7 +406,7 @@ static void smbd_post_send_credits(struct work_struct *work)
else else
response = get_empty_queue_buffer(info); response = get_empty_queue_buffer(info);
if (!response) { if (!response) {
/* now switch to emtpy packet queue */ /* now switch to empty packet queue */
if (use_receive_queue) { if (use_receive_queue) {
use_receive_queue = 0; use_receive_queue = 0;
continue; continue;
...@@ -618,7 +618,7 @@ static struct rdma_cm_id *smbd_create_id( ...@@ -618,7 +618,7 @@ static struct rdma_cm_id *smbd_create_id(
/* /*
* Test if FRWR (Fast Registration Work Requests) is supported on the device * Test if FRWR (Fast Registration Work Requests) is supported on the device
* This implementation requries FRWR on RDMA read/write * This implementation requires FRWR on RDMA read/write
* return value: true if it is supported * return value: true if it is supported
*/ */
static bool frwr_is_supported(struct ib_device_attr *attrs) static bool frwr_is_supported(struct ib_device_attr *attrs)
...@@ -2177,7 +2177,7 @@ static int allocate_mr_list(struct smbd_connection *info) ...@@ -2177,7 +2177,7 @@ static int allocate_mr_list(struct smbd_connection *info)
* MR available in the list. It may access the list while the * MR available in the list. It may access the list while the
* smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock * smbd_mr_recovery_work is recovering the MR list. This doesn't need a lock
* as they never modify the same places. However, there may be several CPUs * as they never modify the same places. However, there may be several CPUs
* issueing I/O trying to get MR at the same time, mr_list_lock is used to * issuing I/O trying to get MR at the same time, mr_list_lock is used to
* protect this situation. * protect this situation.
*/ */
static struct smbd_mr *get_mr(struct smbd_connection *info) static struct smbd_mr *get_mr(struct smbd_connection *info)
...@@ -2311,7 +2311,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info, ...@@ -2311,7 +2311,7 @@ struct smbd_mr *smbd_register_mr(struct smbd_connection *info,
/* /*
* There is no need for waiting for complemtion on ib_post_send * There is no need for waiting for complemtion on ib_post_send
* on IB_WR_REG_MR. Hardware enforces a barrier and order of execution * on IB_WR_REG_MR. Hardware enforces a barrier and order of execution
* on the next ib_post_send when we actaully send I/O to remote peer * on the next ib_post_send when we actually send I/O to remote peer
*/ */
rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL); rc = ib_post_send(info->id->qp, &reg_wr->wr, NULL);
if (!rc) if (!rc)
......
...@@ -1289,7 +1289,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses, ...@@ -1289,7 +1289,7 @@ compound_send_recv(const unsigned int xid, struct cifs_ses *ses,
out: out:
/* /*
* This will dequeue all mids. After this it is important that the * This will dequeue all mids. After this it is important that the
* demultiplex_thread will not process any of these mids any futher. * demultiplex_thread will not process any of these mids any further.
* This is prevented above by using a noop callback that will not * This is prevented above by using a noop callback that will not
* wake this thread except for the very last PDU. * wake this thread except for the very last PDU.
*/ */
......
Markdown is supported
0%
or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment