1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3  *   Copyright (C) 2018 Samsung Electronics Co., Ltd.
4  */
5 
6 #include <linux/list.h>
7 #include <linux/slab.h>
8 #include <linux/xarray.h>
9 
10 #include "../transport_ipc.h"
11 #include "../connection.h"
12 
13 #include "tree_connect.h"
14 #include "user_config.h"
15 #include "share_config.h"
16 #include "user_session.h"
17 
18 struct ksmbd_tree_conn_status
ksmbd_tree_conn_connect(struct ksmbd_work * work,const char * share_name)19 ksmbd_tree_conn_connect(struct ksmbd_work *work, const char *share_name)
20 {
21 	struct ksmbd_tree_conn_status status = {-ENOENT, NULL};
22 	struct ksmbd_tree_connect_response *resp = NULL;
23 	struct ksmbd_share_config *sc;
24 	struct ksmbd_tree_connect *tree_conn = NULL;
25 	struct sockaddr *peer_addr;
26 	struct ksmbd_conn *conn = work->conn;
27 	struct ksmbd_session *sess = work->sess;
28 	int ret;
29 
30 	sc = ksmbd_share_config_get(work, share_name);
31 	if (!sc)
32 		return status;
33 
34 	tree_conn = kzalloc(sizeof(struct ksmbd_tree_connect), GFP_KERNEL);
35 	if (!tree_conn) {
36 		status.ret = -ENOMEM;
37 		goto out_error;
38 	}
39 
40 	tree_conn->id = ksmbd_acquire_tree_conn_id(sess);
41 	if (tree_conn->id < 0) {
42 		status.ret = -EINVAL;
43 		goto out_error;
44 	}
45 
46 	peer_addr = KSMBD_TCP_PEER_SOCKADDR(conn);
47 	resp = ksmbd_ipc_tree_connect_request(sess,
48 					      sc,
49 					      tree_conn,
50 					      peer_addr);
51 	if (!resp) {
52 		status.ret = -EINVAL;
53 		goto out_error;
54 	}
55 
56 	status.ret = resp->status;
57 	if (status.ret != KSMBD_TREE_CONN_STATUS_OK)
58 		goto out_error;
59 
60 	tree_conn->flags = resp->connection_flags;
61 	if (test_tree_conn_flag(tree_conn, KSMBD_TREE_CONN_FLAG_UPDATE)) {
62 		struct ksmbd_share_config *new_sc;
63 
64 		ksmbd_share_config_del(sc);
65 		new_sc = ksmbd_share_config_get(work, share_name);
66 		if (!new_sc) {
67 			pr_err("Failed to update stale share config\n");
68 			status.ret = -ESTALE;
69 			goto out_error;
70 		}
71 		ksmbd_share_config_put(sc);
72 		sc = new_sc;
73 	}
74 
75 	tree_conn->user = sess->user;
76 	tree_conn->share_conf = sc;
77 	tree_conn->t_state = TREE_NEW;
78 	status.tree_conn = tree_conn;
79 	atomic_set(&tree_conn->refcount, 1);
80 	init_waitqueue_head(&tree_conn->refcount_q);
81 
82 	ret = xa_err(xa_store(&sess->tree_conns, tree_conn->id, tree_conn,
83 			      GFP_KERNEL));
84 	if (ret) {
85 		status.ret = -ENOMEM;
86 		goto out_error;
87 	}
88 	kvfree(resp);
89 	return status;
90 
91 out_error:
92 	if (tree_conn)
93 		ksmbd_release_tree_conn_id(sess, tree_conn->id);
94 	ksmbd_share_config_put(sc);
95 	kfree(tree_conn);
96 	kvfree(resp);
97 	return status;
98 }
99 
ksmbd_tree_connect_put(struct ksmbd_tree_connect * tcon)100 void ksmbd_tree_connect_put(struct ksmbd_tree_connect *tcon)
101 {
102 	/*
103 	 * Checking waitqueue to releasing tree connect on
104 	 * tree disconnect. waitqueue_active is safe because it
105 	 * uses atomic operation for condition.
106 	 */
107 	if (!atomic_dec_return(&tcon->refcount) &&
108 	    waitqueue_active(&tcon->refcount_q))
109 		wake_up(&tcon->refcount_q);
110 }
111 
ksmbd_tree_conn_disconnect(struct ksmbd_session * sess,struct ksmbd_tree_connect * tree_conn)112 int ksmbd_tree_conn_disconnect(struct ksmbd_session *sess,
113 			       struct ksmbd_tree_connect *tree_conn)
114 {
115 	int ret;
116 
117 	write_lock(&sess->tree_conns_lock);
118 	xa_erase(&sess->tree_conns, tree_conn->id);
119 	write_unlock(&sess->tree_conns_lock);
120 
121 	if (!atomic_dec_and_test(&tree_conn->refcount))
122 		wait_event(tree_conn->refcount_q,
123 			   atomic_read(&tree_conn->refcount) == 0);
124 
125 	ret = ksmbd_ipc_tree_disconnect_request(sess->id, tree_conn->id);
126 	ksmbd_release_tree_conn_id(sess, tree_conn->id);
127 	ksmbd_share_config_put(tree_conn->share_conf);
128 	kfree(tree_conn);
129 	return ret;
130 }
131 
ksmbd_tree_conn_lookup(struct ksmbd_session * sess,unsigned int id)132 struct ksmbd_tree_connect *ksmbd_tree_conn_lookup(struct ksmbd_session *sess,
133 						  unsigned int id)
134 {
135 	struct ksmbd_tree_connect *tcon;
136 
137 	read_lock(&sess->tree_conns_lock);
138 	tcon = xa_load(&sess->tree_conns, id);
139 	if (tcon) {
140 		if (tcon->t_state != TREE_CONNECTED)
141 			tcon = NULL;
142 		else if (!atomic_inc_not_zero(&tcon->refcount))
143 			tcon = NULL;
144 	}
145 	read_unlock(&sess->tree_conns_lock);
146 
147 	return tcon;
148 }
149 
ksmbd_tree_conn_session_logoff(struct ksmbd_session * sess)150 int ksmbd_tree_conn_session_logoff(struct ksmbd_session *sess)
151 {
152 	int ret = 0;
153 	struct ksmbd_tree_connect *tc;
154 	unsigned long id;
155 
156 	if (!sess)
157 		return -EINVAL;
158 
159 	xa_for_each(&sess->tree_conns, id, tc) {
160 		write_lock(&sess->tree_conns_lock);
161 		if (tc->t_state == TREE_DISCONNECTED) {
162 			write_unlock(&sess->tree_conns_lock);
163 			ret = -ENOENT;
164 			continue;
165 		}
166 		tc->t_state = TREE_DISCONNECTED;
167 		write_unlock(&sess->tree_conns_lock);
168 
169 		ret |= ksmbd_tree_conn_disconnect(sess, tc);
170 	}
171 	xa_destroy(&sess->tree_conns);
172 	return ret;
173 }
174