Merge branch 'akpm' (patches from Andrew)
[linux-2.6-microblaze.git] / fs / ceph / metric.c
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #include <linux/ceph/ceph_debug.h>
3
4 #include <linux/types.h>
5 #include <linux/percpu_counter.h>
6 #include <linux/math64.h>
7
8 #include "metric.h"
9 #include "mds_client.h"
10
11 static bool ceph_mdsc_send_metrics(struct ceph_mds_client *mdsc,
12                                    struct ceph_mds_session *s)
13 {
14         struct ceph_metric_head *head;
15         struct ceph_metric_cap *cap;
16         struct ceph_metric_read_latency *read;
17         struct ceph_metric_write_latency *write;
18         struct ceph_metric_metadata_latency *meta;
19         struct ceph_metric_dlease *dlease;
20         struct ceph_client_metric *m = &mdsc->metric;
21         u64 nr_caps = atomic64_read(&m->total_caps);
22         struct ceph_msg *msg;
23         struct timespec64 ts;
24         s64 sum;
25         s32 items = 0;
26         s32 len;
27
28         len = sizeof(*head) + sizeof(*cap) + sizeof(*read) + sizeof(*write)
29               + sizeof(*meta) + sizeof(*dlease);
30
31         msg = ceph_msg_new(CEPH_MSG_CLIENT_METRICS, len, GFP_NOFS, true);
32         if (!msg) {
33                 pr_err("send metrics to mds%d, failed to allocate message\n",
34                        s->s_mds);
35                 return false;
36         }
37
38         head = msg->front.iov_base;
39
40         /* encode the cap metric */
41         cap = (struct ceph_metric_cap *)(head + 1);
42         cap->type = cpu_to_le32(CLIENT_METRIC_TYPE_CAP_INFO);
43         cap->ver = 1;
44         cap->compat = 1;
45         cap->data_len = cpu_to_le32(sizeof(*cap) - 10);
46         cap->hit = cpu_to_le64(percpu_counter_sum(&m->i_caps_hit));
47         cap->mis = cpu_to_le64(percpu_counter_sum(&m->i_caps_mis));
48         cap->total = cpu_to_le64(nr_caps);
49         items++;
50
51         /* encode the read latency metric */
52         read = (struct ceph_metric_read_latency *)(cap + 1);
53         read->type = cpu_to_le32(CLIENT_METRIC_TYPE_READ_LATENCY);
54         read->ver = 1;
55         read->compat = 1;
56         read->data_len = cpu_to_le32(sizeof(*read) - 10);
57         sum = m->read_latency_sum;
58         jiffies_to_timespec64(sum, &ts);
59         read->sec = cpu_to_le32(ts.tv_sec);
60         read->nsec = cpu_to_le32(ts.tv_nsec);
61         items++;
62
63         /* encode the write latency metric */
64         write = (struct ceph_metric_write_latency *)(read + 1);
65         write->type = cpu_to_le32(CLIENT_METRIC_TYPE_WRITE_LATENCY);
66         write->ver = 1;
67         write->compat = 1;
68         write->data_len = cpu_to_le32(sizeof(*write) - 10);
69         sum = m->write_latency_sum;
70         jiffies_to_timespec64(sum, &ts);
71         write->sec = cpu_to_le32(ts.tv_sec);
72         write->nsec = cpu_to_le32(ts.tv_nsec);
73         items++;
74
75         /* encode the metadata latency metric */
76         meta = (struct ceph_metric_metadata_latency *)(write + 1);
77         meta->type = cpu_to_le32(CLIENT_METRIC_TYPE_METADATA_LATENCY);
78         meta->ver = 1;
79         meta->compat = 1;
80         meta->data_len = cpu_to_le32(sizeof(*meta) - 10);
81         sum = m->metadata_latency_sum;
82         jiffies_to_timespec64(sum, &ts);
83         meta->sec = cpu_to_le32(ts.tv_sec);
84         meta->nsec = cpu_to_le32(ts.tv_nsec);
85         items++;
86
87         /* encode the dentry lease metric */
88         dlease = (struct ceph_metric_dlease *)(meta + 1);
89         dlease->type = cpu_to_le32(CLIENT_METRIC_TYPE_DENTRY_LEASE);
90         dlease->ver = 1;
91         dlease->compat = 1;
92         dlease->data_len = cpu_to_le32(sizeof(*dlease) - 10);
93         dlease->hit = cpu_to_le64(percpu_counter_sum(&m->d_lease_hit));
94         dlease->mis = cpu_to_le64(percpu_counter_sum(&m->d_lease_mis));
95         dlease->total = cpu_to_le64(atomic64_read(&m->total_dentries));
96         items++;
97
98         put_unaligned_le32(items, &head->num);
99         msg->front.iov_len = len;
100         msg->hdr.version = cpu_to_le16(1);
101         msg->hdr.compat_version = cpu_to_le16(1);
102         msg->hdr.front_len = cpu_to_le32(msg->front.iov_len);
103         dout("client%llu send metrics to mds%d\n",
104              ceph_client_gid(mdsc->fsc->client), s->s_mds);
105         ceph_con_send(&s->s_con, msg);
106
107         return true;
108 }
109
110
111 static void metric_get_session(struct ceph_mds_client *mdsc)
112 {
113         struct ceph_mds_session *s;
114         int i;
115
116         mutex_lock(&mdsc->mutex);
117         for (i = 0; i < mdsc->max_sessions; i++) {
118                 s = __ceph_lookup_mds_session(mdsc, i);
119                 if (!s)
120                         continue;
121
122                 /*
123                  * Skip it if MDS doesn't support the metric collection,
124                  * or the MDS will close the session's socket connection
125                  * directly when it get this message.
126                  */
127                 if (check_session_state(s) &&
128                     test_bit(CEPHFS_FEATURE_METRIC_COLLECT, &s->s_features)) {
129                         mdsc->metric.session = s;
130                         break;
131                 }
132
133                 ceph_put_mds_session(s);
134         }
135         mutex_unlock(&mdsc->mutex);
136 }
137
138 static void metric_delayed_work(struct work_struct *work)
139 {
140         struct ceph_client_metric *m =
141                 container_of(work, struct ceph_client_metric, delayed_work.work);
142         struct ceph_mds_client *mdsc =
143                 container_of(m, struct ceph_mds_client, metric);
144
145         if (mdsc->stopping)
146                 return;
147
148         if (!m->session || !check_session_state(m->session)) {
149                 if (m->session) {
150                         ceph_put_mds_session(m->session);
151                         m->session = NULL;
152                 }
153                 metric_get_session(mdsc);
154         }
155         if (m->session) {
156                 ceph_mdsc_send_metrics(mdsc, m->session);
157                 metric_schedule_delayed(m);
158         }
159 }
160
161 int ceph_metric_init(struct ceph_client_metric *m)
162 {
163         int ret;
164
165         if (!m)
166                 return -EINVAL;
167
168         atomic64_set(&m->total_dentries, 0);
169         ret = percpu_counter_init(&m->d_lease_hit, 0, GFP_KERNEL);
170         if (ret)
171                 return ret;
172
173         ret = percpu_counter_init(&m->d_lease_mis, 0, GFP_KERNEL);
174         if (ret)
175                 goto err_d_lease_mis;
176
177         atomic64_set(&m->total_caps, 0);
178         ret = percpu_counter_init(&m->i_caps_hit, 0, GFP_KERNEL);
179         if (ret)
180                 goto err_i_caps_hit;
181
182         ret = percpu_counter_init(&m->i_caps_mis, 0, GFP_KERNEL);
183         if (ret)
184                 goto err_i_caps_mis;
185
186         spin_lock_init(&m->read_latency_lock);
187         m->read_latency_sq_sum = 0;
188         m->read_latency_min = KTIME_MAX;
189         m->read_latency_max = 0;
190         m->total_reads = 0;
191         m->read_latency_sum = 0;
192
193         spin_lock_init(&m->write_latency_lock);
194         m->write_latency_sq_sum = 0;
195         m->write_latency_min = KTIME_MAX;
196         m->write_latency_max = 0;
197         m->total_writes = 0;
198         m->write_latency_sum = 0;
199
200         spin_lock_init(&m->metadata_latency_lock);
201         m->metadata_latency_sq_sum = 0;
202         m->metadata_latency_min = KTIME_MAX;
203         m->metadata_latency_max = 0;
204         m->total_metadatas = 0;
205         m->metadata_latency_sum = 0;
206
207         atomic64_set(&m->opened_files, 0);
208         ret = percpu_counter_init(&m->opened_inodes, 0, GFP_KERNEL);
209         if (ret)
210                 goto err_opened_inodes;
211         ret = percpu_counter_init(&m->total_inodes, 0, GFP_KERNEL);
212         if (ret)
213                 goto err_total_inodes;
214
215         m->session = NULL;
216         INIT_DELAYED_WORK(&m->delayed_work, metric_delayed_work);
217
218         return 0;
219
220 err_total_inodes:
221         percpu_counter_destroy(&m->opened_inodes);
222 err_opened_inodes:
223         percpu_counter_destroy(&m->i_caps_mis);
224 err_i_caps_mis:
225         percpu_counter_destroy(&m->i_caps_hit);
226 err_i_caps_hit:
227         percpu_counter_destroy(&m->d_lease_mis);
228 err_d_lease_mis:
229         percpu_counter_destroy(&m->d_lease_hit);
230
231         return ret;
232 }
233
234 void ceph_metric_destroy(struct ceph_client_metric *m)
235 {
236         if (!m)
237                 return;
238
239         percpu_counter_destroy(&m->total_inodes);
240         percpu_counter_destroy(&m->opened_inodes);
241         percpu_counter_destroy(&m->i_caps_mis);
242         percpu_counter_destroy(&m->i_caps_hit);
243         percpu_counter_destroy(&m->d_lease_mis);
244         percpu_counter_destroy(&m->d_lease_hit);
245
246         cancel_delayed_work_sync(&m->delayed_work);
247
248         if (m->session)
249                 ceph_put_mds_session(m->session);
250 }
251
252 static inline void __update_latency(ktime_t *totalp, ktime_t *lsump,
253                                     ktime_t *min, ktime_t *max,
254                                     ktime_t *sq_sump, ktime_t lat)
255 {
256         ktime_t total, avg, sq, lsum;
257
258         total = ++(*totalp);
259         lsum = (*lsump += lat);
260
261         if (unlikely(lat < *min))
262                 *min = lat;
263         if (unlikely(lat > *max))
264                 *max = lat;
265
266         if (unlikely(total == 1))
267                 return;
268
269         /* the sq is (lat - old_avg) * (lat - new_avg) */
270         avg = DIV64_U64_ROUND_CLOSEST((lsum - lat), (total - 1));
271         sq = lat - avg;
272         avg = DIV64_U64_ROUND_CLOSEST(lsum, total);
273         sq = sq * (lat - avg);
274         *sq_sump += sq;
275 }
276
277 void ceph_update_read_latency(struct ceph_client_metric *m,
278                               ktime_t r_start, ktime_t r_end,
279                               int rc)
280 {
281         ktime_t lat = ktime_sub(r_end, r_start);
282
283         if (unlikely(rc < 0 && rc != -ENOENT && rc != -ETIMEDOUT))
284                 return;
285
286         spin_lock(&m->read_latency_lock);
287         __update_latency(&m->total_reads, &m->read_latency_sum,
288                          &m->read_latency_min, &m->read_latency_max,
289                          &m->read_latency_sq_sum, lat);
290         spin_unlock(&m->read_latency_lock);
291 }
292
293 void ceph_update_write_latency(struct ceph_client_metric *m,
294                                ktime_t r_start, ktime_t r_end,
295                                int rc)
296 {
297         ktime_t lat = ktime_sub(r_end, r_start);
298
299         if (unlikely(rc && rc != -ETIMEDOUT))
300                 return;
301
302         spin_lock(&m->write_latency_lock);
303         __update_latency(&m->total_writes, &m->write_latency_sum,
304                          &m->write_latency_min, &m->write_latency_max,
305                          &m->write_latency_sq_sum, lat);
306         spin_unlock(&m->write_latency_lock);
307 }
308
309 void ceph_update_metadata_latency(struct ceph_client_metric *m,
310                                   ktime_t r_start, ktime_t r_end,
311                                   int rc)
312 {
313         ktime_t lat = ktime_sub(r_end, r_start);
314
315         if (unlikely(rc && rc != -ENOENT))
316                 return;
317
318         spin_lock(&m->metadata_latency_lock);
319         __update_latency(&m->total_metadatas, &m->metadata_latency_sum,
320                          &m->metadata_latency_min, &m->metadata_latency_max,
321                          &m->metadata_latency_sq_sum, lat);
322         spin_unlock(&m->metadata_latency_lock);
323 }