Merge tag 'perf-urgent-2021-07-11' of git://git.kernel.org/pub/scm/linux/kernel/git...
[linux-2.6-microblaze.git] / drivers / infiniband / hw / hfi1 / user_exp_rcv.c
1 /*
2  * Copyright(c) 2020 Cornelis Networks, Inc.
3  * Copyright(c) 2015-2018 Intel Corporation.
4  *
5  * This file is provided under a dual BSD/GPLv2 license.  When using or
6  * redistributing this file, you may do so under either license.
7  *
8  * GPL LICENSE SUMMARY
9  *
10  * This program is free software; you can redistribute it and/or modify
11  * it under the terms of version 2 of the GNU General Public License as
12  * published by the Free Software Foundation.
13  *
14  * This program is distributed in the hope that it will be useful, but
15  * WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
17  * General Public License for more details.
18  *
19  * BSD LICENSE
20  *
21  * Redistribution and use in source and binary forms, with or without
22  * modification, are permitted provided that the following conditions
23  * are met:
24  *
25  *  - Redistributions of source code must retain the above copyright
26  *    notice, this list of conditions and the following disclaimer.
27  *  - Redistributions in binary form must reproduce the above copyright
28  *    notice, this list of conditions and the following disclaimer in
29  *    the documentation and/or other materials provided with the
30  *    distribution.
31  *  - Neither the name of Intel Corporation nor the names of its
32  *    contributors may be used to endorse or promote products derived
33  *    from this software without specific prior written permission.
34  *
35  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
36  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
37  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
38  * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
39  * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
40  * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
41  * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
42  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
43  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
44  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
45  * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
46  *
47  */
48 #include <asm/page.h>
49 #include <linux/string.h>
50
51 #include "mmu_rb.h"
52 #include "user_exp_rcv.h"
53 #include "trace.h"
54
55 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
56                             struct exp_tid_set *set,
57                             struct hfi1_filedata *fd);
58 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages);
59 static int set_rcvarray_entry(struct hfi1_filedata *fd,
60                               struct tid_user_buf *tbuf,
61                               u32 rcventry, struct tid_group *grp,
62                               u16 pageidx, unsigned int npages);
63 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
64                                     struct tid_rb_node *tnode);
65 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
66                               const struct mmu_notifier_range *range,
67                               unsigned long cur_seq);
68 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *,
69                             struct tid_group *grp,
70                             unsigned int start, u16 count,
71                             u32 *tidlist, unsigned int *tididx,
72                             unsigned int *pmapped);
73 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
74                               struct tid_group **grp);
75 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node);
76
77 static const struct mmu_interval_notifier_ops tid_mn_ops = {
78         .invalidate = tid_rb_invalidate,
79 };
80
81 /*
82  * Initialize context and file private data needed for Expected
83  * receive caching. This needs to be done after the context has
84  * been configured with the eager/expected RcvEntry counts.
85  */
86 int hfi1_user_exp_rcv_init(struct hfi1_filedata *fd,
87                            struct hfi1_ctxtdata *uctxt)
88 {
89         int ret = 0;
90
91         fd->entry_to_rb = kcalloc(uctxt->expected_count,
92                                   sizeof(struct rb_node *),
93                                   GFP_KERNEL);
94         if (!fd->entry_to_rb)
95                 return -ENOMEM;
96
97         if (!HFI1_CAP_UGET_MASK(uctxt->flags, TID_UNMAP)) {
98                 fd->invalid_tid_idx = 0;
99                 fd->invalid_tids = kcalloc(uctxt->expected_count,
100                                            sizeof(*fd->invalid_tids),
101                                            GFP_KERNEL);
102                 if (!fd->invalid_tids) {
103                         kfree(fd->entry_to_rb);
104                         fd->entry_to_rb = NULL;
105                         return -ENOMEM;
106                 }
107                 fd->use_mn = true;
108         }
109
110         /*
111          * PSM does not have a good way to separate, count, and
112          * effectively enforce a limit on RcvArray entries used by
113          * subctxts (when context sharing is used) when TID caching
114          * is enabled. To help with that, we calculate a per-process
115          * RcvArray entry share and enforce that.
116          * If TID caching is not in use, PSM deals with usage on its
117          * own. In that case, we allow any subctxt to take all of the
118          * entries.
119          *
120          * Make sure that we set the tid counts only after successful
121          * init.
122          */
123         spin_lock(&fd->tid_lock);
124         if (uctxt->subctxt_cnt && fd->use_mn) {
125                 u16 remainder;
126
127                 fd->tid_limit = uctxt->expected_count / uctxt->subctxt_cnt;
128                 remainder = uctxt->expected_count % uctxt->subctxt_cnt;
129                 if (remainder && fd->subctxt < remainder)
130                         fd->tid_limit++;
131         } else {
132                 fd->tid_limit = uctxt->expected_count;
133         }
134         spin_unlock(&fd->tid_lock);
135
136         return ret;
137 }
138
139 void hfi1_user_exp_rcv_free(struct hfi1_filedata *fd)
140 {
141         struct hfi1_ctxtdata *uctxt = fd->uctxt;
142
143         mutex_lock(&uctxt->exp_mutex);
144         if (!EXP_TID_SET_EMPTY(uctxt->tid_full_list))
145                 unlock_exp_tids(uctxt, &uctxt->tid_full_list, fd);
146         if (!EXP_TID_SET_EMPTY(uctxt->tid_used_list))
147                 unlock_exp_tids(uctxt, &uctxt->tid_used_list, fd);
148         mutex_unlock(&uctxt->exp_mutex);
149
150         kfree(fd->invalid_tids);
151         fd->invalid_tids = NULL;
152
153         kfree(fd->entry_to_rb);
154         fd->entry_to_rb = NULL;
155 }
156
157 /*
158  * Release pinned receive buffer pages.
159  *
160  * @mapped: true if the pages have been DMA mapped. false otherwise.
161  * @idx: Index of the first page to unpin.
162  * @npages: No of pages to unpin.
163  *
164  * If the pages have been DMA mapped (indicated by mapped parameter), their
165  * info will be passed via a struct tid_rb_node. If they haven't been mapped,
166  * their info will be passed via a struct tid_user_buf.
167  */
168 static void unpin_rcv_pages(struct hfi1_filedata *fd,
169                             struct tid_user_buf *tidbuf,
170                             struct tid_rb_node *node,
171                             unsigned int idx,
172                             unsigned int npages,
173                             bool mapped)
174 {
175         struct page **pages;
176         struct hfi1_devdata *dd = fd->uctxt->dd;
177         struct mm_struct *mm;
178
179         if (mapped) {
180                 pci_unmap_single(dd->pcidev, node->dma_addr,
181                                  node->npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
182                 pages = &node->pages[idx];
183                 mm = mm_from_tid_node(node);
184         } else {
185                 pages = &tidbuf->pages[idx];
186                 mm = current->mm;
187         }
188         hfi1_release_user_pages(mm, pages, npages, mapped);
189         fd->tid_n_pinned -= npages;
190 }
191
192 /*
193  * Pin receive buffer pages.
194  */
195 static int pin_rcv_pages(struct hfi1_filedata *fd, struct tid_user_buf *tidbuf)
196 {
197         int pinned;
198         unsigned int npages;
199         unsigned long vaddr = tidbuf->vaddr;
200         struct page **pages = NULL;
201         struct hfi1_devdata *dd = fd->uctxt->dd;
202
203         /* Get the number of pages the user buffer spans */
204         npages = num_user_pages(vaddr, tidbuf->length);
205         if (!npages)
206                 return -EINVAL;
207
208         if (npages > fd->uctxt->expected_count) {
209                 dd_dev_err(dd, "Expected buffer too big\n");
210                 return -EINVAL;
211         }
212
213         /* Allocate the array of struct page pointers needed for pinning */
214         pages = kcalloc(npages, sizeof(*pages), GFP_KERNEL);
215         if (!pages)
216                 return -ENOMEM;
217
218         /*
219          * Pin all the pages of the user buffer. If we can't pin all the
220          * pages, accept the amount pinned so far and program only that.
221          * User space knows how to deal with partially programmed buffers.
222          */
223         if (!hfi1_can_pin_pages(dd, current->mm, fd->tid_n_pinned, npages)) {
224                 kfree(pages);
225                 return -ENOMEM;
226         }
227
228         pinned = hfi1_acquire_user_pages(current->mm, vaddr, npages, true, pages);
229         if (pinned <= 0) {
230                 kfree(pages);
231                 return pinned;
232         }
233         tidbuf->pages = pages;
234         tidbuf->npages = npages;
235         fd->tid_n_pinned += pinned;
236         return pinned;
237 }
238
239 /*
240  * RcvArray entry allocation for Expected Receives is done by the
241  * following algorithm:
242  *
243  * The context keeps 3 lists of groups of RcvArray entries:
244  *   1. List of empty groups - tid_group_list
245  *      This list is created during user context creation and
246  *      contains elements which describe sets (of 8) of empty
247  *      RcvArray entries.
248  *   2. List of partially used groups - tid_used_list
249  *      This list contains sets of RcvArray entries which are
250  *      not completely used up. Another mapping request could
251  *      use some of all of the remaining entries.
252  *   3. List of full groups - tid_full_list
253  *      This is the list where sets that are completely used
254  *      up go.
255  *
256  * An attempt to optimize the usage of RcvArray entries is
257  * made by finding all sets of physically contiguous pages in a
258  * user's buffer.
259  * These physically contiguous sets are further split into
260  * sizes supported by the receive engine of the HFI. The
261  * resulting sets of pages are stored in struct tid_pageset,
262  * which describes the sets as:
263  *    * .count - number of pages in this set
264  *    * .idx - starting index into struct page ** array
265  *                    of this set
266  *
267  * From this point on, the algorithm deals with the page sets
268  * described above. The number of pagesets is divided by the
269  * RcvArray group size to produce the number of full groups
270  * needed.
271  *
272  * Groups from the 3 lists are manipulated using the following
273  * rules:
274  *   1. For each set of 8 pagesets, a complete group from
275  *      tid_group_list is taken, programmed, and moved to
276  *      the tid_full_list list.
277  *   2. For all remaining pagesets:
278  *      2.1 If the tid_used_list is empty and the tid_group_list
279  *          is empty, stop processing pageset and return only
280  *          what has been programmed up to this point.
281  *      2.2 If the tid_used_list is empty and the tid_group_list
282  *          is not empty, move a group from tid_group_list to
283  *          tid_used_list.
284  *      2.3 For each group is tid_used_group, program as much as
285  *          can fit into the group. If the group becomes fully
286  *          used, move it to tid_full_list.
287  */
288 int hfi1_user_exp_rcv_setup(struct hfi1_filedata *fd,
289                             struct hfi1_tid_info *tinfo)
290 {
291         int ret = 0, need_group = 0, pinned;
292         struct hfi1_ctxtdata *uctxt = fd->uctxt;
293         struct hfi1_devdata *dd = uctxt->dd;
294         unsigned int ngroups, pageidx = 0, pageset_count,
295                 tididx = 0, mapped, mapped_pages = 0;
296         u32 *tidlist = NULL;
297         struct tid_user_buf *tidbuf;
298
299         if (!PAGE_ALIGNED(tinfo->vaddr))
300                 return -EINVAL;
301
302         tidbuf = kzalloc(sizeof(*tidbuf), GFP_KERNEL);
303         if (!tidbuf)
304                 return -ENOMEM;
305
306         tidbuf->vaddr = tinfo->vaddr;
307         tidbuf->length = tinfo->length;
308         tidbuf->psets = kcalloc(uctxt->expected_count, sizeof(*tidbuf->psets),
309                                 GFP_KERNEL);
310         if (!tidbuf->psets) {
311                 kfree(tidbuf);
312                 return -ENOMEM;
313         }
314
315         pinned = pin_rcv_pages(fd, tidbuf);
316         if (pinned <= 0) {
317                 kfree(tidbuf->psets);
318                 kfree(tidbuf);
319                 return pinned;
320         }
321
322         /* Find sets of physically contiguous pages */
323         tidbuf->n_psets = find_phys_blocks(tidbuf, pinned);
324
325         /*
326          * We don't need to access this under a lock since tid_used is per
327          * process and the same process cannot be in hfi1_user_exp_rcv_clear()
328          * and hfi1_user_exp_rcv_setup() at the same time.
329          */
330         spin_lock(&fd->tid_lock);
331         if (fd->tid_used + tidbuf->n_psets > fd->tid_limit)
332                 pageset_count = fd->tid_limit - fd->tid_used;
333         else
334                 pageset_count = tidbuf->n_psets;
335         spin_unlock(&fd->tid_lock);
336
337         if (!pageset_count)
338                 goto bail;
339
340         ngroups = pageset_count / dd->rcv_entries.group_size;
341         tidlist = kcalloc(pageset_count, sizeof(*tidlist), GFP_KERNEL);
342         if (!tidlist) {
343                 ret = -ENOMEM;
344                 goto nomem;
345         }
346
347         tididx = 0;
348
349         /*
350          * From this point on, we are going to be using shared (between master
351          * and subcontexts) context resources. We need to take the lock.
352          */
353         mutex_lock(&uctxt->exp_mutex);
354         /*
355          * The first step is to program the RcvArray entries which are complete
356          * groups.
357          */
358         while (ngroups && uctxt->tid_group_list.count) {
359                 struct tid_group *grp =
360                         tid_group_pop(&uctxt->tid_group_list);
361
362                 ret = program_rcvarray(fd, tidbuf, grp,
363                                        pageidx, dd->rcv_entries.group_size,
364                                        tidlist, &tididx, &mapped);
365                 /*
366                  * If there was a failure to program the RcvArray
367                  * entries for the entire group, reset the grp fields
368                  * and add the grp back to the free group list.
369                  */
370                 if (ret <= 0) {
371                         tid_group_add_tail(grp, &uctxt->tid_group_list);
372                         hfi1_cdbg(TID,
373                                   "Failed to program RcvArray group %d", ret);
374                         goto unlock;
375                 }
376
377                 tid_group_add_tail(grp, &uctxt->tid_full_list);
378                 ngroups--;
379                 pageidx += ret;
380                 mapped_pages += mapped;
381         }
382
383         while (pageidx < pageset_count) {
384                 struct tid_group *grp, *ptr;
385                 /*
386                  * If we don't have any partially used tid groups, check
387                  * if we have empty groups. If so, take one from there and
388                  * put in the partially used list.
389                  */
390                 if (!uctxt->tid_used_list.count || need_group) {
391                         if (!uctxt->tid_group_list.count)
392                                 goto unlock;
393
394                         grp = tid_group_pop(&uctxt->tid_group_list);
395                         tid_group_add_tail(grp, &uctxt->tid_used_list);
396                         need_group = 0;
397                 }
398                 /*
399                  * There is an optimization opportunity here - instead of
400                  * fitting as many page sets as we can, check for a group
401                  * later on in the list that could fit all of them.
402                  */
403                 list_for_each_entry_safe(grp, ptr, &uctxt->tid_used_list.list,
404                                          list) {
405                         unsigned use = min_t(unsigned, pageset_count - pageidx,
406                                              grp->size - grp->used);
407
408                         ret = program_rcvarray(fd, tidbuf, grp,
409                                                pageidx, use, tidlist,
410                                                &tididx, &mapped);
411                         if (ret < 0) {
412                                 hfi1_cdbg(TID,
413                                           "Failed to program RcvArray entries %d",
414                                           ret);
415                                 goto unlock;
416                         } else if (ret > 0) {
417                                 if (grp->used == grp->size)
418                                         tid_group_move(grp,
419                                                        &uctxt->tid_used_list,
420                                                        &uctxt->tid_full_list);
421                                 pageidx += ret;
422                                 mapped_pages += mapped;
423                                 need_group = 0;
424                                 /* Check if we are done so we break out early */
425                                 if (pageidx >= pageset_count)
426                                         break;
427                         } else if (WARN_ON(ret == 0)) {
428                                 /*
429                                  * If ret is 0, we did not program any entries
430                                  * into this group, which can only happen if
431                                  * we've screwed up the accounting somewhere.
432                                  * Warn and try to continue.
433                                  */
434                                 need_group = 1;
435                         }
436                 }
437         }
438 unlock:
439         mutex_unlock(&uctxt->exp_mutex);
440 nomem:
441         hfi1_cdbg(TID, "total mapped: tidpairs:%u pages:%u (%d)", tididx,
442                   mapped_pages, ret);
443         if (tididx) {
444                 spin_lock(&fd->tid_lock);
445                 fd->tid_used += tididx;
446                 spin_unlock(&fd->tid_lock);
447                 tinfo->tidcnt = tididx;
448                 tinfo->length = mapped_pages * PAGE_SIZE;
449
450                 if (copy_to_user(u64_to_user_ptr(tinfo->tidlist),
451                                  tidlist, sizeof(tidlist[0]) * tididx)) {
452                         /*
453                          * On failure to copy to the user level, we need to undo
454                          * everything done so far so we don't leak resources.
455                          */
456                         tinfo->tidlist = (unsigned long)&tidlist;
457                         hfi1_user_exp_rcv_clear(fd, tinfo);
458                         tinfo->tidlist = 0;
459                         ret = -EFAULT;
460                         goto bail;
461                 }
462         }
463
464         /*
465          * If not everything was mapped (due to insufficient RcvArray entries,
466          * for example), unpin all unmapped pages so we can pin them nex time.
467          */
468         if (mapped_pages != pinned)
469                 unpin_rcv_pages(fd, tidbuf, NULL, mapped_pages,
470                                 (pinned - mapped_pages), false);
471 bail:
472         kfree(tidbuf->psets);
473         kfree(tidlist);
474         kfree(tidbuf->pages);
475         kfree(tidbuf);
476         return ret > 0 ? 0 : ret;
477 }
478
479 int hfi1_user_exp_rcv_clear(struct hfi1_filedata *fd,
480                             struct hfi1_tid_info *tinfo)
481 {
482         int ret = 0;
483         struct hfi1_ctxtdata *uctxt = fd->uctxt;
484         u32 *tidinfo;
485         unsigned tididx;
486
487         if (unlikely(tinfo->tidcnt > fd->tid_used))
488                 return -EINVAL;
489
490         tidinfo = memdup_user(u64_to_user_ptr(tinfo->tidlist),
491                               sizeof(tidinfo[0]) * tinfo->tidcnt);
492         if (IS_ERR(tidinfo))
493                 return PTR_ERR(tidinfo);
494
495         mutex_lock(&uctxt->exp_mutex);
496         for (tididx = 0; tididx < tinfo->tidcnt; tididx++) {
497                 ret = unprogram_rcvarray(fd, tidinfo[tididx], NULL);
498                 if (ret) {
499                         hfi1_cdbg(TID, "Failed to unprogram rcv array %d",
500                                   ret);
501                         break;
502                 }
503         }
504         spin_lock(&fd->tid_lock);
505         fd->tid_used -= tididx;
506         spin_unlock(&fd->tid_lock);
507         tinfo->tidcnt = tididx;
508         mutex_unlock(&uctxt->exp_mutex);
509
510         kfree(tidinfo);
511         return ret;
512 }
513
514 int hfi1_user_exp_rcv_invalid(struct hfi1_filedata *fd,
515                               struct hfi1_tid_info *tinfo)
516 {
517         struct hfi1_ctxtdata *uctxt = fd->uctxt;
518         unsigned long *ev = uctxt->dd->events +
519                 (uctxt_offset(uctxt) + fd->subctxt);
520         u32 *array;
521         int ret = 0;
522
523         /*
524          * copy_to_user() can sleep, which will leave the invalid_lock
525          * locked and cause the MMU notifier to be blocked on the lock
526          * for a long time.
527          * Copy the data to a local buffer so we can release the lock.
528          */
529         array = kcalloc(uctxt->expected_count, sizeof(*array), GFP_KERNEL);
530         if (!array)
531                 return -EFAULT;
532
533         spin_lock(&fd->invalid_lock);
534         if (fd->invalid_tid_idx) {
535                 memcpy(array, fd->invalid_tids, sizeof(*array) *
536                        fd->invalid_tid_idx);
537                 memset(fd->invalid_tids, 0, sizeof(*fd->invalid_tids) *
538                        fd->invalid_tid_idx);
539                 tinfo->tidcnt = fd->invalid_tid_idx;
540                 fd->invalid_tid_idx = 0;
541                 /*
542                  * Reset the user flag while still holding the lock.
543                  * Otherwise, PSM can miss events.
544                  */
545                 clear_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
546         } else {
547                 tinfo->tidcnt = 0;
548         }
549         spin_unlock(&fd->invalid_lock);
550
551         if (tinfo->tidcnt) {
552                 if (copy_to_user((void __user *)tinfo->tidlist,
553                                  array, sizeof(*array) * tinfo->tidcnt))
554                         ret = -EFAULT;
555         }
556         kfree(array);
557
558         return ret;
559 }
560
561 static u32 find_phys_blocks(struct tid_user_buf *tidbuf, unsigned int npages)
562 {
563         unsigned pagecount, pageidx, setcount = 0, i;
564         unsigned long pfn, this_pfn;
565         struct page **pages = tidbuf->pages;
566         struct tid_pageset *list = tidbuf->psets;
567
568         if (!npages)
569                 return 0;
570
571         /*
572          * Look for sets of physically contiguous pages in the user buffer.
573          * This will allow us to optimize Expected RcvArray entry usage by
574          * using the bigger supported sizes.
575          */
576         pfn = page_to_pfn(pages[0]);
577         for (pageidx = 0, pagecount = 1, i = 1; i <= npages; i++) {
578                 this_pfn = i < npages ? page_to_pfn(pages[i]) : 0;
579
580                 /*
581                  * If the pfn's are not sequential, pages are not physically
582                  * contiguous.
583                  */
584                 if (this_pfn != ++pfn) {
585                         /*
586                          * At this point we have to loop over the set of
587                          * physically contiguous pages and break them down it
588                          * sizes supported by the HW.
589                          * There are two main constraints:
590                          *     1. The max buffer size is MAX_EXPECTED_BUFFER.
591                          *        If the total set size is bigger than that
592                          *        program only a MAX_EXPECTED_BUFFER chunk.
593                          *     2. The buffer size has to be a power of two. If
594                          *        it is not, round down to the closes power of
595                          *        2 and program that size.
596                          */
597                         while (pagecount) {
598                                 int maxpages = pagecount;
599                                 u32 bufsize = pagecount * PAGE_SIZE;
600
601                                 if (bufsize > MAX_EXPECTED_BUFFER)
602                                         maxpages =
603                                                 MAX_EXPECTED_BUFFER >>
604                                                 PAGE_SHIFT;
605                                 else if (!is_power_of_2(bufsize))
606                                         maxpages =
607                                                 rounddown_pow_of_two(bufsize) >>
608                                                 PAGE_SHIFT;
609
610                                 list[setcount].idx = pageidx;
611                                 list[setcount].count = maxpages;
612                                 pagecount -= maxpages;
613                                 pageidx += maxpages;
614                                 setcount++;
615                         }
616                         pageidx = i;
617                         pagecount = 1;
618                         pfn = this_pfn;
619                 } else {
620                         pagecount++;
621                 }
622         }
623         return setcount;
624 }
625
626 /**
627  * program_rcvarray() - program an RcvArray group with receive buffers
628  * @fd: filedata pointer
629  * @tbuf: pointer to struct tid_user_buf that has the user buffer starting
630  *        virtual address, buffer length, page pointers, pagesets (array of
631  *        struct tid_pageset holding information on physically contiguous
632  *        chunks from the user buffer), and other fields.
633  * @grp: RcvArray group
634  * @start: starting index into sets array
635  * @count: number of struct tid_pageset's to program
636  * @tidlist: the array of u32 elements when the information about the
637  *           programmed RcvArray entries is to be encoded.
638  * @tididx: starting offset into tidlist
639  * @pmapped: (output parameter) number of pages programmed into the RcvArray
640  *           entries.
641  *
642  * This function will program up to 'count' number of RcvArray entries from the
643  * group 'grp'. To make best use of write-combining writes, the function will
644  * perform writes to the unused RcvArray entries which will be ignored by the
645  * HW. Each RcvArray entry will be programmed with a physically contiguous
646  * buffer chunk from the user's virtual buffer.
647  *
648  * Return:
649  * -EINVAL if the requested count is larger than the size of the group,
650  * -ENOMEM or -EFAULT on error from set_rcvarray_entry(), or
651  * number of RcvArray entries programmed.
652  */
653 static int program_rcvarray(struct hfi1_filedata *fd, struct tid_user_buf *tbuf,
654                             struct tid_group *grp,
655                             unsigned int start, u16 count,
656                             u32 *tidlist, unsigned int *tididx,
657                             unsigned int *pmapped)
658 {
659         struct hfi1_ctxtdata *uctxt = fd->uctxt;
660         struct hfi1_devdata *dd = uctxt->dd;
661         u16 idx;
662         u32 tidinfo = 0, rcventry, useidx = 0;
663         int mapped = 0;
664
665         /* Count should never be larger than the group size */
666         if (count > grp->size)
667                 return -EINVAL;
668
669         /* Find the first unused entry in the group */
670         for (idx = 0; idx < grp->size; idx++) {
671                 if (!(grp->map & (1 << idx))) {
672                         useidx = idx;
673                         break;
674                 }
675                 rcv_array_wc_fill(dd, grp->base + idx);
676         }
677
678         idx = 0;
679         while (idx < count) {
680                 u16 npages, pageidx, setidx = start + idx;
681                 int ret = 0;
682
683                 /*
684                  * If this entry in the group is used, move to the next one.
685                  * If we go past the end of the group, exit the loop.
686                  */
687                 if (useidx >= grp->size) {
688                         break;
689                 } else if (grp->map & (1 << useidx)) {
690                         rcv_array_wc_fill(dd, grp->base + useidx);
691                         useidx++;
692                         continue;
693                 }
694
695                 rcventry = grp->base + useidx;
696                 npages = tbuf->psets[setidx].count;
697                 pageidx = tbuf->psets[setidx].idx;
698
699                 ret = set_rcvarray_entry(fd, tbuf,
700                                          rcventry, grp, pageidx,
701                                          npages);
702                 if (ret)
703                         return ret;
704                 mapped += npages;
705
706                 tidinfo = rcventry2tidinfo(rcventry - uctxt->expected_base) |
707                         EXP_TID_SET(LEN, npages);
708                 tidlist[(*tididx)++] = tidinfo;
709                 grp->used++;
710                 grp->map |= 1 << useidx++;
711                 idx++;
712         }
713
714         /* Fill the rest of the group with "blank" writes */
715         for (; useidx < grp->size; useidx++)
716                 rcv_array_wc_fill(dd, grp->base + useidx);
717         *pmapped = mapped;
718         return idx;
719 }
720
721 static int set_rcvarray_entry(struct hfi1_filedata *fd,
722                               struct tid_user_buf *tbuf,
723                               u32 rcventry, struct tid_group *grp,
724                               u16 pageidx, unsigned int npages)
725 {
726         int ret;
727         struct hfi1_ctxtdata *uctxt = fd->uctxt;
728         struct tid_rb_node *node;
729         struct hfi1_devdata *dd = uctxt->dd;
730         dma_addr_t phys;
731         struct page **pages = tbuf->pages + pageidx;
732
733         /*
734          * Allocate the node first so we can handle a potential
735          * failure before we've programmed anything.
736          */
737         node = kzalloc(sizeof(*node) + (sizeof(struct page *) * npages),
738                        GFP_KERNEL);
739         if (!node)
740                 return -ENOMEM;
741
742         phys = pci_map_single(dd->pcidev,
743                               __va(page_to_phys(pages[0])),
744                               npages * PAGE_SIZE, PCI_DMA_FROMDEVICE);
745         if (dma_mapping_error(&dd->pcidev->dev, phys)) {
746                 dd_dev_err(dd, "Failed to DMA map Exp Rcv pages 0x%llx\n",
747                            phys);
748                 kfree(node);
749                 return -EFAULT;
750         }
751
752         node->fdata = fd;
753         node->phys = page_to_phys(pages[0]);
754         node->npages = npages;
755         node->rcventry = rcventry;
756         node->dma_addr = phys;
757         node->grp = grp;
758         node->freed = false;
759         memcpy(node->pages, pages, sizeof(struct page *) * npages);
760
761         if (fd->use_mn) {
762                 ret = mmu_interval_notifier_insert(
763                         &node->notifier, current->mm,
764                         tbuf->vaddr + (pageidx * PAGE_SIZE), npages * PAGE_SIZE,
765                         &tid_mn_ops);
766                 if (ret)
767                         goto out_unmap;
768                 /*
769                  * FIXME: This is in the wrong order, the notifier should be
770                  * established before the pages are pinned by pin_rcv_pages.
771                  */
772                 mmu_interval_read_begin(&node->notifier);
773         }
774         fd->entry_to_rb[node->rcventry - uctxt->expected_base] = node;
775
776         hfi1_put_tid(dd, rcventry, PT_EXPECTED, phys, ilog2(npages) + 1);
777         trace_hfi1_exp_tid_reg(uctxt->ctxt, fd->subctxt, rcventry, npages,
778                                node->notifier.interval_tree.start, node->phys,
779                                phys);
780         return 0;
781
782 out_unmap:
783         hfi1_cdbg(TID, "Failed to insert RB node %u 0x%lx, 0x%lx %d",
784                   node->rcventry, node->notifier.interval_tree.start,
785                   node->phys, ret);
786         pci_unmap_single(dd->pcidev, phys, npages * PAGE_SIZE,
787                          PCI_DMA_FROMDEVICE);
788         kfree(node);
789         return -EFAULT;
790 }
791
792 static int unprogram_rcvarray(struct hfi1_filedata *fd, u32 tidinfo,
793                               struct tid_group **grp)
794 {
795         struct hfi1_ctxtdata *uctxt = fd->uctxt;
796         struct hfi1_devdata *dd = uctxt->dd;
797         struct tid_rb_node *node;
798         u8 tidctrl = EXP_TID_GET(tidinfo, CTRL);
799         u32 tididx = EXP_TID_GET(tidinfo, IDX) << 1, rcventry;
800
801         if (tididx >= uctxt->expected_count) {
802                 dd_dev_err(dd, "Invalid RcvArray entry (%u) index for ctxt %u\n",
803                            tididx, uctxt->ctxt);
804                 return -EINVAL;
805         }
806
807         if (tidctrl == 0x3)
808                 return -EINVAL;
809
810         rcventry = tididx + (tidctrl - 1);
811
812         node = fd->entry_to_rb[rcventry];
813         if (!node || node->rcventry != (uctxt->expected_base + rcventry))
814                 return -EBADF;
815
816         if (grp)
817                 *grp = node->grp;
818
819         if (fd->use_mn)
820                 mmu_interval_notifier_remove(&node->notifier);
821         cacheless_tid_rb_remove(fd, node);
822
823         return 0;
824 }
825
826 static void clear_tid_node(struct hfi1_filedata *fd, struct tid_rb_node *node)
827 {
828         struct hfi1_ctxtdata *uctxt = fd->uctxt;
829         struct hfi1_devdata *dd = uctxt->dd;
830
831         trace_hfi1_exp_tid_unreg(uctxt->ctxt, fd->subctxt, node->rcventry,
832                                  node->npages,
833                                  node->notifier.interval_tree.start, node->phys,
834                                  node->dma_addr);
835
836         /*
837          * Make sure device has seen the write before we unpin the
838          * pages.
839          */
840         hfi1_put_tid(dd, node->rcventry, PT_INVALID_FLUSH, 0, 0);
841
842         unpin_rcv_pages(fd, NULL, node, 0, node->npages, true);
843
844         node->grp->used--;
845         node->grp->map &= ~(1 << (node->rcventry - node->grp->base));
846
847         if (node->grp->used == node->grp->size - 1)
848                 tid_group_move(node->grp, &uctxt->tid_full_list,
849                                &uctxt->tid_used_list);
850         else if (!node->grp->used)
851                 tid_group_move(node->grp, &uctxt->tid_used_list,
852                                &uctxt->tid_group_list);
853         kfree(node);
854 }
855
856 /*
857  * As a simple helper for hfi1_user_exp_rcv_free, this function deals with
858  * clearing nodes in the non-cached case.
859  */
860 static void unlock_exp_tids(struct hfi1_ctxtdata *uctxt,
861                             struct exp_tid_set *set,
862                             struct hfi1_filedata *fd)
863 {
864         struct tid_group *grp, *ptr;
865         int i;
866
867         list_for_each_entry_safe(grp, ptr, &set->list, list) {
868                 list_del_init(&grp->list);
869
870                 for (i = 0; i < grp->size; i++) {
871                         if (grp->map & (1 << i)) {
872                                 u16 rcventry = grp->base + i;
873                                 struct tid_rb_node *node;
874
875                                 node = fd->entry_to_rb[rcventry -
876                                                           uctxt->expected_base];
877                                 if (!node || node->rcventry != rcventry)
878                                         continue;
879
880                                 if (fd->use_mn)
881                                         mmu_interval_notifier_remove(
882                                                 &node->notifier);
883                                 cacheless_tid_rb_remove(fd, node);
884                         }
885                 }
886         }
887 }
888
889 static bool tid_rb_invalidate(struct mmu_interval_notifier *mni,
890                               const struct mmu_notifier_range *range,
891                               unsigned long cur_seq)
892 {
893         struct tid_rb_node *node =
894                 container_of(mni, struct tid_rb_node, notifier);
895         struct hfi1_filedata *fdata = node->fdata;
896         struct hfi1_ctxtdata *uctxt = fdata->uctxt;
897
898         if (node->freed)
899                 return true;
900
901         trace_hfi1_exp_tid_inval(uctxt->ctxt, fdata->subctxt,
902                                  node->notifier.interval_tree.start,
903                                  node->rcventry, node->npages, node->dma_addr);
904         node->freed = true;
905
906         spin_lock(&fdata->invalid_lock);
907         if (fdata->invalid_tid_idx < uctxt->expected_count) {
908                 fdata->invalid_tids[fdata->invalid_tid_idx] =
909                         rcventry2tidinfo(node->rcventry - uctxt->expected_base);
910                 fdata->invalid_tids[fdata->invalid_tid_idx] |=
911                         EXP_TID_SET(LEN, node->npages);
912                 if (!fdata->invalid_tid_idx) {
913                         unsigned long *ev;
914
915                         /*
916                          * hfi1_set_uevent_bits() sets a user event flag
917                          * for all processes. Because calling into the
918                          * driver to process TID cache invalidations is
919                          * expensive and TID cache invalidations are
920                          * handled on a per-process basis, we can
921                          * optimize this to set the flag only for the
922                          * process in question.
923                          */
924                         ev = uctxt->dd->events +
925                                 (uctxt_offset(uctxt) + fdata->subctxt);
926                         set_bit(_HFI1_EVENT_TID_MMU_NOTIFY_BIT, ev);
927                 }
928                 fdata->invalid_tid_idx++;
929         }
930         spin_unlock(&fdata->invalid_lock);
931         return true;
932 }
933
934 static void cacheless_tid_rb_remove(struct hfi1_filedata *fdata,
935                                     struct tid_rb_node *tnode)
936 {
937         u32 base = fdata->uctxt->expected_base;
938
939         fdata->entry_to_rb[tnode->rcventry - base] = NULL;
940         clear_tid_node(fdata, tnode);
941 }