Merge tag 'iio-for-5.11a' of https://git.kernel.org/pub/scm/linux/kernel/git/jic23...
[linux-2.6-microblaze.git] / fs / ceph / metric.c
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/types.h>
5 #include <linux/percpu_counter.h>
6 #include <linux/math64.h>
7
8 #include "metric.h"
9 #include "mds_client.h"
10
11 static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
12                                    struct ceph_mds_session *s)
13 {
14         struct ceph_metric_head *head;
15         struct ceph_metric_cap *cap;
16         struct ceph_metric_read_latency *read;
17         struct ceph_metric_write_latency *write;
18         struct ceph_metric_metadata_latency *meta;
19         struct ceph_client_metric *m = &mdsc->metric;
20         u64 nr_caps = atomic64_read(&m->total_caps);
21         struct ceph_msg *msg;
22         struct timespec64 ts;
23         s64 sum;
24         s32 items = 0;
25         s32 len;
26
27         len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
28               + sizeof(*meta);
29
30         msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
31         if (!msg) {
32                 pr_err("send metrics to mds%d, failed to allocate message\n",
33                        s->s_mds);
34                 return false;
35         }
36
37         head = msg->front.iov_base;
38
39         /* encode the cap metric */
40         cap = (struct ceph_metric_cap *)(head + 1);
41         cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
42         cap->ver = 1;
43         cap->compat = 1;
44         cap->data_len = cpu_to_le32(sizeof(*cap) - 10);
45         cap->hit = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_hit));
46         cap->mis = cpu_to_le64(percpu_counter_sum(&mdsc->metric.i_caps_mis));
47         cap->total = cpu_to_le64(nr_caps);
48         items++;
49
50         /* encode the read latency metric */
51         read = (struct ceph_metric_read_latency *)(cap + 1);
52         read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
53         read->ver = 1;
54         read->compat = 1;
55         read->data_len = cpu_to_le32(sizeof(*read) - 10);
56         sum = m->read_latency_sum;
57         jiffies_to_timespec64(sum, &ts);
58         read->sec = cpu_to_le32(ts.tv_sec);
59         read->nsec = cpu_to_le32(ts.tv_nsec);
60         items++;
61
62         /* encode the write latency metric */
63         write = (struct ceph_metric_write_latency *)(read + 1);
64         write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
65         write->ver = 1;
66         write->compat = 1;
67         write->data_len = cpu_to_le32(sizeof(*write) - 10);
68         sum = m->write_latency_sum;
69         jiffies_to_timespec64(sum, &ts);
70         write->sec = cpu_to_le32(ts.tv_sec);
71         write->nsec = cpu_to_le32(ts.tv_nsec);
72         items++;
73
74         /* encode the metadata latency metric */
75         meta = (struct ceph_metric_metadata_latency *)(write + 1);
76         meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
77         meta->ver = 1;
78         meta->compat = 1;
79         meta->data_len = cpu_to_le32(sizeof(*meta) - 10);
80         sum = m->metadata_latency_sum;
81         jiffies_to_timespec64(sum, &ts);
82         meta->sec = cpu_to_le32(ts.tv_sec);
83         meta->nsec = cpu_to_le32(ts.tv_nsec);
84         items++;
85
86         put_unaligned_le32(items, &head->num);
87         msg->front.iov_len = len;
88         msg->hdr.version = cpu_to_le16(1);
89         msg->hdr.compat_version = cpu_to_le16(1);
90         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
91         dout("client%llu send metrics to mds%d\n",
92              ceph_client_gid(mdsc->fsc->client), s->s_mds);
93         ceph_con_send(&s->s_con, msg);
94
95         return true;
96 }
97
98
99 static void metric_get_session(struct ceph_mds_client *mdsc)
100 {
101         struct ceph_mds_session *s;
102         int i;
103
104         mutex_lock(&mdsc->mutex);
105         for (i = 0; i < mdsc->max_sessions; i++) {
106                 s = __ceph_lookup_mds_session(mdsc, i);
107                 if (!s)
108                         continue;
109
110                 /*
111                  * Skip it if MDS doesn't support the metric collection,
112                  * or the MDS will close the session's socket connection
113                  * directly when it get this message.
114                  */
115                 if (check_session_state(s) &&
116                     test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
117                         mdsc->metric.session = s;
118                         break;
119                 }
120
121                 ceph_put_mds_session(s);
122         }
123         mutex_unlock(&mdsc->mutex);
124 }
125
126 static void metric_delayed_work(struct work_struct *work)
127 {
128         struct ceph_client_metric *m =
129                 container_of(work, struct ceph_client_metric, delayed_work.work);
130         struct ceph_mds_client *mdsc =
131                 container_of(m, struct ceph_mds_client, metric);
132
133         if (mdsc->stopping)
134                 return;
135
136         if (!m->session || !check_session_state(m->session)) {
137                 if (m->session) {
138                         ceph_put_mds_session(m->session);
139                         m->session = NULL;
140                 }
141                 metric_get_session(mdsc);
142         }
143         if (m->session) {
144                 ceph_mdsc_send_metrics(mdsc, m->session);
145                 metric_schedule_delayed(m);
146         }
147 }
148
149 int ceph_metric_init(struct ceph_client_metric *m)
150 {
151         int ret;
152
153         if (!m)
154                 return -EINVAL;
155
156         atomic64_set(&m->total_dentries, 0);
157         ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
158         if (ret)
159                 return ret;
160
161         ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
162         if (ret)
163                 goto err_d_lease_mis;
164
165         atomic64_set(&m->total_caps, 0);
166         ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
167         if (ret)
168                 goto err_i_caps_hit;
169
170         ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
171         if (ret)
172                 goto err_i_caps_mis;
173
174         spin_lock_init(&m->read_latency_lock);
175         m->read_latency_sq_sum = 0;
176         m->read_latency_min = KTIME_MAX;
177         m->read_latency_max = 0;
178         m->total_reads = 0;
179         m->read_latency_sum = 0;
180
181         spin_lock_init(&m->write_latency_lock);
182         m->write_latency_sq_sum = 0;
183         m->write_latency_min = KTIME_MAX;
184         m->write_latency_max = 0;
185         m->total_writes = 0;
186         m->write_latency_sum = 0;
187
188         spin_lock_init(&m->metadata_latency_lock);
189         m->metadata_latency_sq_sum = 0;
190         m->metadata_latency_min = KTIME_MAX;
191         m->metadata_latency_max = 0;
192         m->total_metadatas = 0;
193         m->metadata_latency_sum = 0;
194
195         atomic64_set(&m->opened_files, 0);
196         ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
197         if (ret)
198                 goto err_opened_inodes;
199         ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL);
200         if (ret)
201                 goto err_total_inodes;
202
203         m->session = NULL;
204         INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
205
206         return 0;
207
208 err_total_inodes:
209         percpu_counter_destroy(&m->opened_inodes);
210 err_opened_inodes:
211         percpu_counter_destroy(&m->i_caps_mis);
212 err_i_caps_mis:
213         percpu_counter_destroy(&m->i_caps_hit);
214 err_i_caps_hit:
215         percpu_counter_destroy(&m->d_lease_mis);
216 err_d_lease_mis:
217         percpu_counter_destroy(&m->d_lease_hit);
218
219         return ret;
220 }
221
222 void ceph_metric_destroy(struct ceph_client_metric *m)
223 {
224         if (!m)
225                 return;
226
227         percpu_counter_destroy(&m->total_inodes);
228         percpu_counter_destroy(&m->opened_inodes);
229         percpu_counter_destroy(&m->i_caps_mis);
230         percpu_counter_destroy(&m->i_caps_hit);
231         percpu_counter_destroy(&m->d_lease_mis);
232         percpu_counter_destroy(&m->d_lease_hit);
233
234         cancel_delayed_work_sync(&m->delayed_work);
235
236         if (m->session)
237                 ceph_put_mds_session(m->session);
238 }
239
240 static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
241                                     ktime_t *min, ktime_t *max,
242                                     ktime_t *sq_sump, ktime_t lat)
243 {
244         ktime_t total, avg, sq, lsum;
245
246         total = ++(*totalp);
247         lsum = (*lsump += lat);
248
249         if (unlikely(lat < *min))
250                 *min = lat;
251         if (unlikely(lat > *max))
252                 *max = lat;
253
254         if (unlikely(total == 1))
255                 return;
256
257         /* the sq is (lat - old_avg) * (lat - new_avg) */
258         avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
259         sq = lat - avg;
260         avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
261         sq = sq * (lat - avg);
262         *sq_sump += sq;
263 }
264
265 void ceph_update_read_latency(struct ceph_client_metric *m,
266                               ktime_t r_start, ktime_t r_end,
267                               int rc)
268 {
269         ktime_t lat = ktime_sub(r_end, r_start);
270
271         if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
272                 return;
273
274         spin_lock(&m->read_latency_lock);
275         __update_latency(&m->total_reads, &m->read_latency_sum,
276                          &m->read_latency_min, &m->read_latency_max,
277                          &m->read_latency_sq_sum, lat);
278         spin_unlock(&m->read_latency_lock);
279 }
280
281 void ceph_update_write_latency(struct ceph_client_metric *m,
282                                ktime_t r_start, ktime_t r_end,
283                                int rc)
284 {
285         ktime_t lat = ktime_sub(r_end, r_start);
286
287         if (unlikely(rc && rc != -ETIMEDOUT))
288                 return;
289
290         spin_lock(&m->write_latency_lock);
291         __update_latency(&m->total_writes, &m->write_latency_sum,
292                          &m->write_latency_min, &m->write_latency_max,
293                          &m->write_latency_sq_sum, lat);
294         spin_unlock(&m->write_latency_lock);
295 }
296
297 void ceph_update_metadata_latency(struct ceph_client_metric *m,
298                                   ktime_t r_start, ktime_t r_end,
299                                   int rc)
300 {
301         ktime_t lat = ktime_sub(r_end, r_start);
302
303         if (unlikely(rc && rc != -ENOENT))
304                 return;
305
306         spin_lock(&m->metadata_latency_lock);
307         __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
308                          &m->metadata_latency_min, &m->metadata_latency_max,
309                          &m->metadata_latency_sq_sum, lat);
310         spin_unlock(&m->metadata_latency_lock);
311 }