1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2020 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
24 #include "ipa_clock.h"
26 #define atomic_dec_not_zero(v) atomic_add_unless((v), -1, 0)
28 #define IPA_REPLENISH_BATCH 16
30 /* RX buffer is 1 page (or a power-of-2 contiguous pages) */
31 #define IPA_RX_BUFFER_SIZE 8192 /* PAGE_SIZE > 4096 wastes a LOT */
33 /* The amount of RX buffer space consumed by standard skb overhead */
34 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
36 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
37 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
39 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
40 #define IPA_AGGR_TIME_LIMIT 500 /* microseconds */
42 /** enum ipa_status_opcode - status element opcode hardware values */
43 enum ipa_status_opcode {
44 IPA_STATUS_OPCODE_PACKET = 0x01,
45 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
46 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
47 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
50 /** enum ipa_status_exception - status element exception type */
51 enum ipa_status_exception {
52 /* 0 means no exception */
53 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
56 /* Status element provided by hardware */
58 u8 opcode; /* enum ipa_status_opcode */
59 u8 exception; /* enum ipa_status_exception */
71 /* Field masks for struct ipa_status structure fields */
72 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
73 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
74 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
75 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
76 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
80 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
81 const struct ipa_gsi_endpoint_data *all_data,
82 const struct ipa_gsi_endpoint_data *data)
84 const struct ipa_gsi_endpoint_data *other_data;
85 struct device *dev = &ipa->pdev->dev;
86 enum ipa_endpoint_name other_name;
88 if (ipa_gsi_endpoint_data_empty(data))
91 if (!data->toward_ipa) {
92 if (data->endpoint.filter_support) {
93 dev_err(dev, "filtering not supported for "
99 return true; /* Nothing more to check for RX */
102 if (data->endpoint.config.status_enable) {
103 other_name = data->endpoint.config.tx.status_endpoint;
104 if (other_name >= count) {
105 dev_err(dev, "status endpoint name %u out of range "
107 other_name, data->endpoint_id);
111 /* Status endpoint must be defined... */
112 other_data = &all_data[other_name];
113 if (ipa_gsi_endpoint_data_empty(other_data)) {
114 dev_err(dev, "DMA endpoint name %u undefined "
116 other_name, data->endpoint_id);
120 /* ...and has to be an RX endpoint... */
121 if (other_data->toward_ipa) {
123 "status endpoint for endpoint %u not RX\n",
128 /* ...and if it's to be an AP endpoint... */
129 if (other_data->ee_id == GSI_EE_AP) {
130 /* ...make sure it has status enabled. */
131 if (!other_data->endpoint.config.status_enable) {
133 "status not enabled for endpoint %u\n",
134 other_data->endpoint_id);
140 if (data->endpoint.config.dma_mode) {
141 other_name = data->endpoint.config.dma_endpoint;
142 if (other_name >= count) {
143 dev_err(dev, "DMA endpoint name %u out of range "
145 other_name, data->endpoint_id);
149 other_data = &all_data[other_name];
150 if (ipa_gsi_endpoint_data_empty(other_data)) {
151 dev_err(dev, "DMA endpoint name %u undefined "
153 other_name, data->endpoint_id);
161 static u32 aggr_byte_limit_max(enum ipa_version version)
163 if (version < IPA_VERSION_4_5)
164 return field_max(aggr_byte_limit_fmask(true));
166 return field_max(aggr_byte_limit_fmask(false));
169 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
170 const struct ipa_gsi_endpoint_data *data)
172 const struct ipa_gsi_endpoint_data *dp = data;
173 struct device *dev = &ipa->pdev->dev;
174 enum ipa_endpoint_name name;
177 /* Not sure where this constraint come from... */
178 BUILD_BUG_ON(sizeof(struct ipa_status) % 4);
180 if (count > IPA_ENDPOINT_COUNT) {
181 dev_err(dev, "too many endpoints specified (%u > %u)\n",
182 count, IPA_ENDPOINT_COUNT);
186 /* The aggregation byte limit defines the point at which an
187 * aggregation window will close. It is programmed into the
188 * IPA hardware as a number of KB. We don't use "hard byte
189 * limit" aggregation, which means that we need to supply
190 * enough space in a receive buffer to hold a complete MTU
191 * plus normal skb overhead *after* that aggregation byte
192 * limit has been crossed.
194 * This check ensures we don't define a receive buffer size
195 * that would exceed what we can represent in the field that
196 * is used to program its size.
198 limit = aggr_byte_limit_max(ipa->version) * SZ_1K;
199 limit += IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
200 if (limit < IPA_RX_BUFFER_SIZE) {
201 dev_err(dev, "buffer size too big for aggregation (%u > %u)\n",
202 IPA_RX_BUFFER_SIZE, limit);
206 /* Make sure needed endpoints have defined data */
207 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
208 dev_err(dev, "command TX endpoint not defined\n");
211 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
212 dev_err(dev, "LAN RX endpoint not defined\n");
215 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
216 dev_err(dev, "AP->modem TX endpoint not defined\n");
219 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
220 dev_err(dev, "AP<-modem RX endpoint not defined\n");
224 for (name = 0; name < count; name++, dp++)
225 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
231 #else /* !IPA_VALIDATE */
233 static bool ipa_endpoint_data_valid(struct ipa *ipa, u32 count,
234 const struct ipa_gsi_endpoint_data *data)
239 #endif /* !IPA_VALIDATE */
241 /* Allocate a transaction to use on a non-command endpoint */
242 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
245 struct gsi *gsi = &endpoint->ipa->gsi;
246 u32 channel_id = endpoint->channel_id;
247 enum dma_data_direction direction;
249 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
251 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
254 /* suspend_delay represents suspend for RX, delay for TX endpoints.
255 * Note that suspend is not supported starting with IPA v4.0.
258 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
260 u32 offset = IPA_REG_ENDP_INIT_CTRL_N_OFFSET(endpoint->endpoint_id);
261 struct ipa *ipa = endpoint->ipa;
266 /* Suspend is not supported for IPA v4.0+. Delay doesn't work
267 * correctly on IPA v4.2.
269 * if (endpoint->toward_ipa)
270 * assert(ipa->version != IPA_VERSION_4.2);
272 * assert(ipa->version == IPA_VERSION_3_5_1);
274 mask = endpoint->toward_ipa ? ENDP_DELAY_FMASK : ENDP_SUSPEND_FMASK;
276 val = ioread32(ipa->reg_virt + offset);
277 /* Don't bother if it's already in the requested state */
278 state = !!(val & mask);
279 if (suspend_delay != state) {
281 iowrite32(val, ipa->reg_virt + offset);
287 /* We currently don't care what the previous state was for delay mode */
289 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
291 /* assert(endpoint->toward_ipa); */
293 /* Delay mode doesn't work properly for IPA v4.2 */
294 if (endpoint->ipa->version != IPA_VERSION_4_2)
295 (void)ipa_endpoint_init_ctrl(endpoint, enable);
298 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
300 u32 mask = BIT(endpoint->endpoint_id);
301 struct ipa *ipa = endpoint->ipa;
305 /* assert(mask & ipa->available); */
306 offset = ipa_reg_state_aggr_active_offset(ipa->version);
307 val = ioread32(ipa->reg_virt + offset);
309 return !!(val & mask);
312 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
314 u32 mask = BIT(endpoint->endpoint_id);
315 struct ipa *ipa = endpoint->ipa;
317 /* assert(mask & ipa->available); */
318 iowrite32(mask, ipa->reg_virt + IPA_REG_AGGR_FORCE_CLOSE_OFFSET);
322 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
323 * @endpoint: Endpoint on which to emulate a suspend
325 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
326 * with an open aggregation frame. This is to work around a hardware
327 * issue in IPA version 3.5.1 where the suspend interrupt will not be
328 * generated when it should be.
330 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
332 struct ipa *ipa = endpoint->ipa;
334 if (!endpoint->data->aggregation)
337 /* Nothing to do if the endpoint doesn't have aggregation open */
338 if (!ipa_endpoint_aggr_active(endpoint))
341 /* Force close aggregation */
342 ipa_endpoint_force_close(endpoint);
344 ipa_interrupt_simulate_suspend(ipa->interrupt);
347 /* Returns previous suspend state (true means suspend was enabled) */
349 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
353 if (endpoint->ipa->version != IPA_VERSION_3_5_1)
354 return enable; /* For IPA v4.0+, no change made */
356 /* assert(!endpoint->toward_ipa); */
358 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
360 /* A client suspended with an open aggregation frame will not
361 * generate a SUSPEND IPA interrupt. If enabling suspend, have
362 * ipa_endpoint_suspend_aggr() handle this.
364 if (enable && !suspended)
365 ipa_endpoint_suspend_aggr(endpoint);
370 /* Enable or disable delay or suspend mode on all modem endpoints */
371 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
375 /* DELAY mode doesn't work correctly on IPA v4.2 */
376 if (ipa->version == IPA_VERSION_4_2)
379 for (endpoint_id = 0; endpoint_id < IPA_ENDPOINT_MAX; endpoint_id++) {
380 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id];
382 if (endpoint->ee_id != GSI_EE_MODEM)
385 /* Set TX delay mode or RX suspend mode */
386 if (endpoint->toward_ipa)
387 ipa_endpoint_program_delay(endpoint, enable);
389 (void)ipa_endpoint_program_suspend(endpoint, enable);
393 /* Reset all modem endpoints to use the default exception endpoint */
394 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
396 u32 initialized = ipa->initialized;
397 struct gsi_trans *trans;
400 /* We need one command per modem TX endpoint. We can get an upper
401 * bound on that by assuming all initialized endpoints are modem->IPA.
402 * That won't happen, and we could be more precise, but this is fine
403 * for now. We need to end the transaction with a "tag process."
405 count = hweight32(initialized) + ipa_cmd_pipeline_clear_count();
406 trans = ipa_cmd_trans_alloc(ipa, count);
408 dev_err(&ipa->pdev->dev,
409 "no transaction to reset modem exception endpoints\n");
413 while (initialized) {
414 u32 endpoint_id = __ffs(initialized);
415 struct ipa_endpoint *endpoint;
418 initialized ^= BIT(endpoint_id);
420 /* We only reset modem TX endpoints */
421 endpoint = &ipa->endpoint[endpoint_id];
422 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
425 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
427 /* Value written is 0, and all bits are updated. That
428 * means status is disabled on the endpoint, and as a
429 * result all other fields in the register are ignored.
431 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
434 ipa_cmd_pipeline_clear_add(trans);
436 /* XXX This should have a 1 second timeout */
437 gsi_trans_commit_wait(trans);
439 ipa_cmd_pipeline_clear_wait(ipa);
444 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
446 u32 offset = IPA_REG_ENDP_INIT_CFG_N_OFFSET(endpoint->endpoint_id);
449 /* FRAG_OFFLOAD_EN is 0 */
450 if (endpoint->data->checksum) {
451 if (endpoint->toward_ipa) {
454 val |= u32_encode_bits(IPA_CS_OFFLOAD_UL,
455 CS_OFFLOAD_EN_FMASK);
456 /* Checksum header offset is in 4-byte units */
457 checksum_offset = sizeof(struct rmnet_map_header);
458 checksum_offset /= sizeof(u32);
459 val |= u32_encode_bits(checksum_offset,
460 CS_METADATA_HDR_OFFSET_FMASK);
462 val |= u32_encode_bits(IPA_CS_OFFLOAD_DL,
463 CS_OFFLOAD_EN_FMASK);
466 val |= u32_encode_bits(IPA_CS_OFFLOAD_NONE,
467 CS_OFFLOAD_EN_FMASK);
469 /* CS_GEN_QMB_MASTER_SEL is 0 */
471 iowrite32(val, endpoint->ipa->reg_virt + offset);
475 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
476 * @endpoint: Endpoint pointer
478 * We program QMAP endpoints so each packet received is preceded by a QMAP
479 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
480 * packet size field, and we have the IPA hardware populate both for each
481 * received packet. The header is configured (in the HDR_EXT register)
482 * to use big endian format.
484 * The packet size is written into the QMAP header's pkt_len field. That
485 * location is defined here using the HDR_OFST_PKT_SIZE field.
487 * The mux_id comes from a 4-byte metadata value supplied with each packet
488 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
489 * value that we want, in its low-order byte. A bitmask defined in the
490 * endpoint's METADATA_MASK register defines which byte within the modem
491 * metadata contains the mux_id. And the OFST_METADATA field programmed
492 * here indicates where the extracted byte should be placed within the QMAP
495 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
497 u32 offset = IPA_REG_ENDP_INIT_HDR_N_OFFSET(endpoint->endpoint_id);
498 struct ipa *ipa = endpoint->ipa;
501 if (endpoint->data->qmap) {
502 size_t header_size = sizeof(struct rmnet_map_header);
503 enum ipa_version version = ipa->version;
505 /* We might supply a checksum header after the QMAP header */
506 if (endpoint->toward_ipa && endpoint->data->checksum)
507 header_size += sizeof(struct rmnet_map_ul_csum_header);
508 val |= ipa_header_size_encoded(version, header_size);
510 /* Define how to fill fields in a received QMAP header */
511 if (!endpoint->toward_ipa) {
512 u32 offset; /* Field offset within header */
514 /* Where IPA will write the metadata value */
515 offset = offsetof(struct rmnet_map_header, mux_id);
516 val |= ipa_metadata_offset_encoded(version, offset);
518 /* Where IPA will write the length */
519 offset = offsetof(struct rmnet_map_header, pkt_len);
520 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
521 if (version == IPA_VERSION_4_5)
522 offset &= field_mask(HDR_OFST_PKT_SIZE_FMASK);
524 val |= HDR_OFST_PKT_SIZE_VALID_FMASK;
525 val |= u32_encode_bits(offset, HDR_OFST_PKT_SIZE_FMASK);
527 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
528 val |= HDR_OFST_METADATA_VALID_FMASK;
530 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
531 /* HDR_A5_MUX is 0 */
532 /* HDR_LEN_INC_DEAGG_HDR is 0 */
533 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
536 iowrite32(val, ipa->reg_virt + offset);
539 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
541 u32 offset = IPA_REG_ENDP_INIT_HDR_EXT_N_OFFSET(endpoint->endpoint_id);
542 u32 pad_align = endpoint->data->rx.pad_align;
543 struct ipa *ipa = endpoint->ipa;
546 val |= HDR_ENDIANNESS_FMASK; /* big endian */
548 /* A QMAP header contains a 6 bit pad field at offset 0. The RMNet
549 * driver assumes this field is meaningful in packets it receives,
550 * and assumes the header's payload length includes that padding.
551 * The RMNet driver does *not* pad packets it sends, however, so
552 * the pad field (although 0) should be ignored.
554 if (endpoint->data->qmap && !endpoint->toward_ipa) {
555 val |= HDR_TOTAL_LEN_OR_PAD_VALID_FMASK;
556 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
557 val |= HDR_PAYLOAD_LEN_INC_PADDING_FMASK;
558 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
561 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
562 if (!endpoint->toward_ipa)
563 val |= u32_encode_bits(pad_align, HDR_PAD_TO_ALIGNMENT_FMASK);
565 /* IPA v4.5 adds some most-significant bits to a few fields,
566 * two of which are defined in the HDR (not HDR_EXT) register.
568 if (ipa->version == IPA_VERSION_4_5) {
569 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
570 if (endpoint->data->qmap && !endpoint->toward_ipa) {
573 offset = offsetof(struct rmnet_map_header, pkt_len);
574 offset >>= hweight32(HDR_OFST_PKT_SIZE_FMASK);
575 val |= u32_encode_bits(offset,
576 HDR_OFST_PKT_SIZE_MSB_FMASK);
577 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
580 iowrite32(val, ipa->reg_virt + offset);
583 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
585 u32 endpoint_id = endpoint->endpoint_id;
589 if (endpoint->toward_ipa)
590 return; /* Register not valid for TX endpoints */
592 offset = IPA_REG_ENDP_INIT_HDR_METADATA_MASK_N_OFFSET(endpoint_id);
594 /* Note that HDR_ENDIANNESS indicates big endian header fields */
595 if (endpoint->data->qmap)
596 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
598 iowrite32(val, endpoint->ipa->reg_virt + offset);
601 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
603 u32 offset = IPA_REG_ENDP_INIT_MODE_N_OFFSET(endpoint->endpoint_id);
606 if (!endpoint->toward_ipa)
607 return; /* Register not valid for RX endpoints */
609 if (endpoint->data->dma_mode) {
610 enum ipa_endpoint_name name = endpoint->data->dma_endpoint;
613 dma_endpoint_id = endpoint->ipa->name_map[name]->endpoint_id;
615 val = u32_encode_bits(IPA_DMA, MODE_FMASK);
616 val |= u32_encode_bits(dma_endpoint_id, DEST_PIPE_INDEX_FMASK);
618 val = u32_encode_bits(IPA_BASIC, MODE_FMASK);
620 /* All other bits unspecified (and 0) */
622 iowrite32(val, endpoint->ipa->reg_virt + offset);
625 /* Compute the aggregation size value to use for a given buffer size */
626 static u32 ipa_aggr_size_kb(u32 rx_buffer_size)
628 /* We don't use "hard byte limit" aggregation, so we define the
629 * aggregation limit such that our buffer has enough space *after*
630 * that limit to receive a full MTU of data, plus overhead.
632 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
634 return rx_buffer_size / SZ_1K;
637 /* Encoded values for AGGR endpoint register fields */
638 static u32 aggr_byte_limit_encoded(enum ipa_version version, u32 limit)
640 if (version < IPA_VERSION_4_5)
641 return u32_encode_bits(limit, aggr_byte_limit_fmask(true));
643 return u32_encode_bits(limit, aggr_byte_limit_fmask(false));
646 /* Encode the aggregation timer limit (microseconds) based on IPA version */
647 static u32 aggr_time_limit_encoded(enum ipa_version version, u32 limit)
653 if (version < IPA_VERSION_4_5) {
654 /* We set aggregation granularity in ipa_hardware_config() */
655 limit = DIV_ROUND_CLOSEST(limit, IPA_AGGR_GRANULARITY);
657 return u32_encode_bits(limit, aggr_time_limit_fmask(true));
660 /* IPA v4.5 expresses the time limit using Qtime. The AP has
661 * pulse generators 0 and 1 available, which were configured
662 * in ipa_qtime_config() to have granularity 100 usec and
663 * 1 msec, respectively. Use pulse generator 0 if possible,
664 * otherwise fall back to pulse generator 1.
666 fmask = aggr_time_limit_fmask(false);
667 val = DIV_ROUND_CLOSEST(limit, 100);
668 if (val > field_max(fmask)) {
669 /* Have to use pulse generator 1 (millisecond granularity) */
670 gran_sel = AGGR_GRAN_SEL_FMASK;
671 val = DIV_ROUND_CLOSEST(limit, 1000);
673 /* We can use pulse generator 0 (100 usec granularity) */
677 return gran_sel | u32_encode_bits(val, fmask);
680 static u32 aggr_sw_eof_active_encoded(enum ipa_version version, bool enabled)
682 u32 val = enabled ? 1 : 0;
684 if (version < IPA_VERSION_4_5)
685 return u32_encode_bits(val, aggr_sw_eof_active_fmask(true));
687 return u32_encode_bits(val, aggr_sw_eof_active_fmask(false));
690 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
692 u32 offset = IPA_REG_ENDP_INIT_AGGR_N_OFFSET(endpoint->endpoint_id);
693 enum ipa_version version = endpoint->ipa->version;
696 if (endpoint->data->aggregation) {
697 if (!endpoint->toward_ipa) {
701 val |= u32_encode_bits(IPA_ENABLE_AGGR, AGGR_EN_FMASK);
702 val |= u32_encode_bits(IPA_GENERIC, AGGR_TYPE_FMASK);
704 limit = ipa_aggr_size_kb(IPA_RX_BUFFER_SIZE);
705 val |= aggr_byte_limit_encoded(version, limit);
707 limit = IPA_AGGR_TIME_LIMIT;
708 val |= aggr_time_limit_encoded(version, limit);
710 /* AGGR_PKT_LIMIT is 0 (unlimited) */
712 close_eof = endpoint->data->rx.aggr_close_eof;
713 val |= aggr_sw_eof_active_encoded(version, close_eof);
715 /* AGGR_HARD_BYTE_LIMIT_ENABLE is 0 */
717 val |= u32_encode_bits(IPA_ENABLE_DEAGGR,
719 val |= u32_encode_bits(IPA_QCMAP, AGGR_TYPE_FMASK);
720 /* other fields ignored */
722 /* AGGR_FORCE_CLOSE is 0 */
723 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
725 val |= u32_encode_bits(IPA_BYPASS_AGGR, AGGR_EN_FMASK);
726 /* other fields ignored */
729 iowrite32(val, endpoint->ipa->reg_virt + offset);
732 /* Return the Qtime-based head-of-line blocking timer value that
733 * represents the given number of microseconds. The result
734 * includes both the timer value and the selected timer granularity.
736 static u32 hol_block_timer_qtime_val(struct ipa *ipa, u32 microseconds)
741 /* IPA v4.5 expresses time limits using Qtime. The AP has
742 * pulse generators 0 and 1 available, which were configured
743 * in ipa_qtime_config() to have granularity 100 usec and
744 * 1 msec, respectively. Use pulse generator 0 if possible,
745 * otherwise fall back to pulse generator 1.
747 val = DIV_ROUND_CLOSEST(microseconds, 100);
748 if (val > field_max(TIME_LIMIT_FMASK)) {
749 /* Have to use pulse generator 1 (millisecond granularity) */
750 gran_sel = GRAN_SEL_FMASK;
751 val = DIV_ROUND_CLOSEST(microseconds, 1000);
753 /* We can use pulse generator 0 (100 usec granularity) */
757 return gran_sel | u32_encode_bits(val, TIME_LIMIT_FMASK);
760 /* The head-of-line blocking timer is defined as a tick count. For
761 * IPA version 4.5 the tick count is based on the Qtimer, which is
762 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
763 * each tick represents 128 cycles of the IPA core clock.
765 * Return the encoded value that should be written to that register
766 * that represents the timeout period provided. For IPA v4.2 this
767 * encodes a base and scale value, while for earlier versions the
768 * value is a simple tick count.
770 static u32 hol_block_timer_val(struct ipa *ipa, u32 microseconds)
780 return 0; /* Nothing to compute if timer period is 0 */
782 if (ipa->version == IPA_VERSION_4_5)
783 return hol_block_timer_qtime_val(ipa, microseconds);
785 /* Use 64 bit arithmetic to avoid overflow... */
786 rate = ipa_clock_rate(ipa);
787 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
788 /* ...but we still need to fit into a 32-bit register */
789 WARN_ON(ticks > U32_MAX);
791 /* IPA v3.5.1 through v4.1 just record the tick count */
792 if (ipa->version < IPA_VERSION_4_2)
795 /* For IPA v4.2, the tick count is represented by base and
796 * scale fields within the 32-bit timer register, where:
797 * ticks = base << scale;
798 * The best precision is achieved when the base value is as
799 * large as possible. Find the highest set bit in the tick
800 * count, and extract the number of bits in the base field
801 * such that that high bit is included.
803 high = fls(ticks); /* 1..32 */
804 width = HWEIGHT32(BASE_VALUE_FMASK);
805 scale = high > width ? high - width : 0;
807 /* If we're scaling, round up to get a closer result */
808 ticks += 1 << (scale - 1);
809 /* High bit was set, so rounding might have affected it */
810 if (fls(ticks) != high)
814 val = u32_encode_bits(scale, SCALE_FMASK);
815 val |= u32_encode_bits(ticks >> scale, BASE_VALUE_FMASK);
820 /* If microseconds is 0, timeout is immediate */
821 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
824 u32 endpoint_id = endpoint->endpoint_id;
825 struct ipa *ipa = endpoint->ipa;
829 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_TIMER_N_OFFSET(endpoint_id);
830 val = hol_block_timer_val(ipa, microseconds);
831 iowrite32(val, ipa->reg_virt + offset);
835 ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint, bool enable)
837 u32 endpoint_id = endpoint->endpoint_id;
841 val = enable ? HOL_BLOCK_EN_FMASK : 0;
842 offset = IPA_REG_ENDP_INIT_HOL_BLOCK_EN_N_OFFSET(endpoint_id);
843 iowrite32(val, endpoint->ipa->reg_virt + offset);
846 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
850 for (i = 0; i < IPA_ENDPOINT_MAX; i++) {
851 struct ipa_endpoint *endpoint = &ipa->endpoint[i];
853 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
856 ipa_endpoint_init_hol_block_timer(endpoint, 0);
857 ipa_endpoint_init_hol_block_enable(endpoint, true);
861 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
863 u32 offset = IPA_REG_ENDP_INIT_DEAGGR_N_OFFSET(endpoint->endpoint_id);
866 if (!endpoint->toward_ipa)
867 return; /* Register not valid for RX endpoints */
869 /* DEAGGR_HDR_LEN is 0 */
870 /* PACKET_OFFSET_VALID is 0 */
871 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
872 /* MAX_PACKET_LEN is 0 (not enforced) */
874 iowrite32(val, endpoint->ipa->reg_virt + offset);
877 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
879 u32 offset = IPA_REG_ENDP_INIT_RSRC_GRP_N_OFFSET(endpoint->endpoint_id);
880 struct ipa *ipa = endpoint->ipa;
883 val = rsrc_grp_encoded(ipa->version, endpoint->data->resource_group);
884 iowrite32(val, ipa->reg_virt + offset);
887 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
889 u32 offset = IPA_REG_ENDP_INIT_SEQ_N_OFFSET(endpoint->endpoint_id);
890 u32 seq_type = endpoint->seq_type;
893 if (!endpoint->toward_ipa)
894 return; /* Register not valid for RX endpoints */
896 /* Sequencer type is made up of four nibbles */
897 val |= u32_encode_bits(seq_type & 0xf, HPS_SEQ_TYPE_FMASK);
898 val |= u32_encode_bits((seq_type >> 4) & 0xf, DPS_SEQ_TYPE_FMASK);
899 /* The second two apply to replicated packets */
900 val |= u32_encode_bits((seq_type >> 8) & 0xf, HPS_REP_SEQ_TYPE_FMASK);
901 val |= u32_encode_bits((seq_type >> 12) & 0xf, DPS_REP_SEQ_TYPE_FMASK);
903 iowrite32(val, endpoint->ipa->reg_virt + offset);
907 * ipa_endpoint_skb_tx() - Transmit a socket buffer
908 * @endpoint: Endpoint pointer
909 * @skb: Socket buffer to send
911 * Returns: 0 if successful, or a negative error code
913 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
915 struct gsi_trans *trans;
919 /* Make sure source endpoint's TLV FIFO has enough entries to
920 * hold the linear portion of the skb and all its fragments.
921 * If not, see if we can linearize it before giving up.
923 nr_frags = skb_shinfo(skb)->nr_frags;
924 if (1 + nr_frags > endpoint->trans_tre_max) {
925 if (skb_linearize(skb))
930 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
934 ret = gsi_trans_skb_add(trans, skb);
937 trans->data = skb; /* transaction owns skb now */
939 gsi_trans_commit(trans, !netdev_xmit_more());
944 gsi_trans_free(trans);
949 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
951 u32 endpoint_id = endpoint->endpoint_id;
952 struct ipa *ipa = endpoint->ipa;
956 offset = IPA_REG_ENDP_STATUS_N_OFFSET(endpoint_id);
958 if (endpoint->data->status_enable) {
959 val |= STATUS_EN_FMASK;
960 if (endpoint->toward_ipa) {
961 enum ipa_endpoint_name name;
962 u32 status_endpoint_id;
964 name = endpoint->data->tx.status_endpoint;
965 status_endpoint_id = ipa->name_map[name]->endpoint_id;
967 val |= u32_encode_bits(status_endpoint_id,
970 /* STATUS_LOCATION is 0, meaning status element precedes
971 * packet (not present for IPA v4.5)
973 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v3.5.1) */
976 iowrite32(val, ipa->reg_virt + offset);
979 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint)
981 struct gsi_trans *trans;
982 bool doorbell = false;
988 page = dev_alloc_pages(get_order(IPA_RX_BUFFER_SIZE));
992 trans = ipa_endpoint_trans_alloc(endpoint, 1);
996 /* Offset the buffer to make space for skb headroom */
997 offset = NET_SKB_PAD;
998 len = IPA_RX_BUFFER_SIZE - offset;
1000 ret = gsi_trans_page_add(trans, page, len, offset);
1002 goto err_trans_free;
1003 trans->data = page; /* transaction owns page now */
1005 if (++endpoint->replenish_ready == IPA_REPLENISH_BATCH) {
1007 endpoint->replenish_ready = 0;
1010 gsi_trans_commit(trans, doorbell);
1015 gsi_trans_free(trans);
1017 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1023 * ipa_endpoint_replenish() - Replenish the Rx packets cache.
1024 * @endpoint: Endpoint to be replenished
1025 * @count: Number of buffers to send to hardware
1027 * Allocate RX packet wrapper structures with maximal socket buffers
1028 * for an endpoint. These are supplied to the hardware, which fills
1029 * them with incoming data.
1031 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint, u32 count)
1036 if (!endpoint->replenish_enabled) {
1038 atomic_add(count, &endpoint->replenish_saved);
1043 while (atomic_dec_not_zero(&endpoint->replenish_backlog))
1044 if (ipa_endpoint_replenish_one(endpoint))
1045 goto try_again_later;
1047 atomic_add(count, &endpoint->replenish_backlog);
1052 /* The last one didn't succeed, so fix the backlog */
1053 backlog = atomic_inc_return(&endpoint->replenish_backlog);
1056 atomic_add(count, &endpoint->replenish_backlog);
1058 /* Whenever a receive buffer transaction completes we'll try to
1059 * replenish again. It's unlikely, but if we fail to supply even
1060 * one buffer, nothing will trigger another replenish attempt.
1061 * Receive buffer transactions use one TRE, so schedule work to
1062 * try replenishing again if our backlog is *all* available TREs.
1064 gsi = &endpoint->ipa->gsi;
1065 if (backlog == gsi_channel_tre_max(gsi, endpoint->channel_id))
1066 schedule_delayed_work(&endpoint->replenish_work,
1067 msecs_to_jiffies(1));
1070 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1072 struct gsi *gsi = &endpoint->ipa->gsi;
1076 endpoint->replenish_enabled = true;
1077 while ((saved = atomic_xchg(&endpoint->replenish_saved, 0)))
1078 atomic_add(saved, &endpoint->replenish_backlog);
1080 /* Start replenishing if hardware currently has no buffers */
1081 max_backlog = gsi_channel_tre_max(gsi, endpoint->channel_id);
1082 if (atomic_read(&endpoint->replenish_backlog) == max_backlog)
1083 ipa_endpoint_replenish(endpoint, 0);
1086 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1090 endpoint->replenish_enabled = false;
1091 while ((backlog = atomic_xchg(&endpoint->replenish_backlog, 0)))
1092 atomic_add(backlog, &endpoint->replenish_saved);
1095 static void ipa_endpoint_replenish_work(struct work_struct *work)
1097 struct delayed_work *dwork = to_delayed_work(work);
1098 struct ipa_endpoint *endpoint;
1100 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1102 ipa_endpoint_replenish(endpoint, 0);
1105 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1106 void *data, u32 len, u32 extra)
1108 struct sk_buff *skb;
1110 skb = __dev_alloc_skb(len, GFP_ATOMIC);
1113 memcpy(skb->data, data, len);
1114 skb->truesize += extra;
1117 /* Now receive it, or drop it if there's no netdev */
1118 if (endpoint->netdev)
1119 ipa_modem_skb_rx(endpoint->netdev, skb);
1121 dev_kfree_skb_any(skb);
1124 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1125 struct page *page, u32 len)
1127 struct sk_buff *skb;
1129 /* Nothing to do if there's no netdev */
1130 if (!endpoint->netdev)
1133 /* assert(len <= SKB_WITH_OVERHEAD(IPA_RX_BUFFER_SIZE-NET_SKB_PAD)); */
1134 skb = build_skb(page_address(page), IPA_RX_BUFFER_SIZE);
1136 /* Reserve the headroom and account for the data */
1137 skb_reserve(skb, NET_SKB_PAD);
1141 /* Receive the buffer (or record drop if unable to build it) */
1142 ipa_modem_skb_rx(endpoint->netdev, skb);
1147 /* The format of a packet status element is the same for several status
1148 * types (opcodes). Other types aren't currently supported.
1150 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1153 case IPA_STATUS_OPCODE_PACKET:
1154 case IPA_STATUS_OPCODE_DROPPED_PACKET:
1155 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1156 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1163 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1164 const struct ipa_status *status)
1168 if (!ipa_status_format_packet(status->opcode))
1170 if (!status->pkt_len)
1172 endpoint_id = u8_get_bits(status->endp_dst_idx,
1173 IPA_STATUS_DST_IDX_FMASK);
1174 if (endpoint_id != endpoint->endpoint_id)
1177 return false; /* Don't skip this packet, process it */
1180 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1181 const struct ipa_status *status)
1183 struct ipa_endpoint *command_endpoint;
1184 struct ipa *ipa = endpoint->ipa;
1187 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1188 return false; /* No valid tag */
1190 /* The status contains a valid tag. We know the packet was sent to
1191 * this endpoint (already verified by ipa_endpoint_status_skip()).
1192 * If the packet came from the AP->command TX endpoint we know
1193 * this packet was sent as part of the pipeline clear process.
1195 endpoint_id = u8_get_bits(status->endp_src_idx,
1196 IPA_STATUS_SRC_IDX_FMASK);
1197 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1198 if (endpoint_id == command_endpoint->endpoint_id) {
1199 complete(&ipa->completion);
1201 dev_err(&ipa->pdev->dev,
1202 "unexpected tagged packet from endpoint %u\n",
1209 /* Return whether the status indicates the packet should be dropped */
1210 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1211 const struct ipa_status *status)
1215 /* If the status indicates a tagged transfer, we'll drop the packet */
1216 if (ipa_endpoint_status_tag(endpoint, status))
1219 /* Deaggregation exceptions we drop; all other types we consume */
1220 if (status->exception)
1221 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1223 /* Drop the packet if it fails to match a routing rule; otherwise no */
1224 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1226 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1229 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1230 struct page *page, u32 total_len)
1232 void *data = page_address(page) + NET_SKB_PAD;
1233 u32 unused = IPA_RX_BUFFER_SIZE - total_len;
1234 u32 resid = total_len;
1237 const struct ipa_status *status = data;
1241 if (resid < sizeof(*status)) {
1242 dev_err(&endpoint->ipa->pdev->dev,
1243 "short message (%u bytes < %zu byte status)\n",
1244 resid, sizeof(*status));
1248 /* Skip over status packets that lack packet data */
1249 if (ipa_endpoint_status_skip(endpoint, status)) {
1250 data += sizeof(*status);
1251 resid -= sizeof(*status);
1255 /* Compute the amount of buffer space consumed by the packet,
1256 * including the status element. If the hardware is configured
1257 * to pad packet data to an aligned boundary, account for that.
1258 * And if checksum offload is enabled a trailer containing
1259 * computed checksum information will be appended.
1261 align = endpoint->data->rx.pad_align ? : 1;
1262 len = le16_to_cpu(status->pkt_len);
1263 len = sizeof(*status) + ALIGN(len, align);
1264 if (endpoint->data->checksum)
1265 len += sizeof(struct rmnet_map_dl_csum_trailer);
1267 if (!ipa_endpoint_status_drop(endpoint, status)) {
1272 /* Client receives only packet data (no status) */
1273 data2 = data + sizeof(*status);
1274 len2 = le16_to_cpu(status->pkt_len);
1276 /* Have the true size reflect the extra unused space in
1277 * the original receive buffer. Distribute the "cost"
1278 * proportionately across all aggregated packets in the
1281 extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1282 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1285 /* Consume status and the full packet it describes */
1291 /* Complete a TX transaction, command or from ipa_endpoint_skb_tx() */
1292 static void ipa_endpoint_tx_complete(struct ipa_endpoint *endpoint,
1293 struct gsi_trans *trans)
1297 /* Complete transaction initiated in ipa_endpoint_replenish_one() */
1298 static void ipa_endpoint_rx_complete(struct ipa_endpoint *endpoint,
1299 struct gsi_trans *trans)
1303 ipa_endpoint_replenish(endpoint, 1);
1305 if (trans->cancelled)
1308 /* Parse or build a socket buffer using the actual received length */
1310 if (endpoint->data->status_enable)
1311 ipa_endpoint_status_parse(endpoint, page, trans->len);
1312 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1313 trans->data = NULL; /* Pages have been consumed */
1316 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1317 struct gsi_trans *trans)
1319 if (endpoint->toward_ipa)
1320 ipa_endpoint_tx_complete(endpoint, trans);
1322 ipa_endpoint_rx_complete(endpoint, trans);
1325 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1326 struct gsi_trans *trans)
1328 if (endpoint->toward_ipa) {
1329 struct ipa *ipa = endpoint->ipa;
1331 /* Nothing to do for command transactions */
1332 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1333 struct sk_buff *skb = trans->data;
1336 dev_kfree_skb_any(skb);
1339 struct page *page = trans->data;
1342 __free_pages(page, get_order(IPA_RX_BUFFER_SIZE));
1346 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1350 /* ROUTE_DIS is 0 */
1351 val = u32_encode_bits(endpoint_id, ROUTE_DEF_PIPE_FMASK);
1352 val |= ROUTE_DEF_HDR_TABLE_FMASK;
1353 val |= u32_encode_bits(0, ROUTE_DEF_HDR_OFST_FMASK);
1354 val |= u32_encode_bits(endpoint_id, ROUTE_FRAG_DEF_PIPE_FMASK);
1355 val |= ROUTE_DEF_RETAIN_HDR_FMASK;
1357 iowrite32(val, ipa->reg_virt + IPA_REG_ROUTE_OFFSET);
1360 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1362 ipa_endpoint_default_route_set(ipa, 0);
1366 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1367 * @endpoint: Endpoint to be reset
1369 * If aggregation is active on an RX endpoint when a reset is performed
1370 * on its underlying GSI channel, a special sequence of actions must be
1371 * taken to ensure the IPA pipeline is properly cleared.
1373 * Return: 0 if successful, or a negative error code
1375 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1377 struct device *dev = &endpoint->ipa->pdev->dev;
1378 struct ipa *ipa = endpoint->ipa;
1379 struct gsi *gsi = &ipa->gsi;
1380 bool suspended = false;
1387 virt = kzalloc(len, GFP_KERNEL);
1391 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1392 if (dma_mapping_error(dev, addr)) {
1397 /* Force close aggregation before issuing the reset */
1398 ipa_endpoint_force_close(endpoint);
1400 /* Reset and reconfigure the channel with the doorbell engine
1401 * disabled. Then poll until we know aggregation is no longer
1402 * active. We'll re-enable the doorbell (if appropriate) when
1403 * we reset again below.
1405 gsi_channel_reset(gsi, endpoint->channel_id, false);
1407 /* Make sure the channel isn't suspended */
1408 suspended = ipa_endpoint_program_suspend(endpoint, false);
1410 /* Start channel and do a 1 byte read */
1411 ret = gsi_channel_start(gsi, endpoint->channel_id);
1413 goto out_suspend_again;
1415 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1417 goto err_endpoint_stop;
1419 /* Wait for aggregation to be closed on the channel */
1420 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1422 if (!ipa_endpoint_aggr_active(endpoint))
1424 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1425 } while (retries--);
1427 /* Check one last time */
1428 if (ipa_endpoint_aggr_active(endpoint))
1429 dev_err(dev, "endpoint %u still active during reset\n",
1430 endpoint->endpoint_id);
1432 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1434 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1436 goto out_suspend_again;
1438 /* Finally, reset and reconfigure the channel again (re-enabling the
1439 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1440 * complete the channel reset sequence. Finish by suspending the
1441 * channel again (if necessary).
1443 gsi_channel_reset(gsi, endpoint->channel_id, true);
1445 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1447 goto out_suspend_again;
1450 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1453 (void)ipa_endpoint_program_suspend(endpoint, true);
1454 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1461 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1463 u32 channel_id = endpoint->channel_id;
1464 struct ipa *ipa = endpoint->ipa;
1468 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1469 * is active, we need to handle things specially to recover.
1470 * All other cases just need to reset the underlying GSI channel.
1472 special = ipa->version == IPA_VERSION_3_5_1 &&
1473 !endpoint->toward_ipa &&
1474 endpoint->data->aggregation;
1475 if (special && ipa_endpoint_aggr_active(endpoint))
1476 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1478 gsi_channel_reset(&ipa->gsi, channel_id, true);
1481 dev_err(&ipa->pdev->dev,
1482 "error %d resetting channel %u for endpoint %u\n",
1483 ret, endpoint->channel_id, endpoint->endpoint_id);
1486 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1488 if (endpoint->toward_ipa)
1489 ipa_endpoint_program_delay(endpoint, false);
1491 (void)ipa_endpoint_program_suspend(endpoint, false);
1492 ipa_endpoint_init_cfg(endpoint);
1493 ipa_endpoint_init_hdr(endpoint);
1494 ipa_endpoint_init_hdr_ext(endpoint);
1495 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1496 ipa_endpoint_init_mode(endpoint);
1497 ipa_endpoint_init_aggr(endpoint);
1498 ipa_endpoint_init_deaggr(endpoint);
1499 ipa_endpoint_init_rsrc_grp(endpoint);
1500 ipa_endpoint_init_seq(endpoint);
1501 ipa_endpoint_status(endpoint);
1504 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1506 struct ipa *ipa = endpoint->ipa;
1507 struct gsi *gsi = &ipa->gsi;
1510 ret = gsi_channel_start(gsi, endpoint->channel_id);
1512 dev_err(&ipa->pdev->dev,
1513 "error %d starting %cX channel %u for endpoint %u\n",
1514 ret, endpoint->toward_ipa ? 'T' : 'R',
1515 endpoint->channel_id, endpoint->endpoint_id);
1519 if (!endpoint->toward_ipa) {
1520 ipa_interrupt_suspend_enable(ipa->interrupt,
1521 endpoint->endpoint_id);
1522 ipa_endpoint_replenish_enable(endpoint);
1525 ipa->enabled |= BIT(endpoint->endpoint_id);
1530 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1532 u32 mask = BIT(endpoint->endpoint_id);
1533 struct ipa *ipa = endpoint->ipa;
1534 struct gsi *gsi = &ipa->gsi;
1537 if (!(ipa->enabled & mask))
1540 ipa->enabled ^= mask;
1542 if (!endpoint->toward_ipa) {
1543 ipa_endpoint_replenish_disable(endpoint);
1544 ipa_interrupt_suspend_disable(ipa->interrupt,
1545 endpoint->endpoint_id);
1548 /* Note that if stop fails, the channel's state is not well-defined */
1549 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1551 dev_err(&ipa->pdev->dev,
1552 "error %d attempting to stop endpoint %u\n", ret,
1553 endpoint->endpoint_id);
1556 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1558 struct device *dev = &endpoint->ipa->pdev->dev;
1559 struct gsi *gsi = &endpoint->ipa->gsi;
1563 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1566 if (!endpoint->toward_ipa) {
1567 ipa_endpoint_replenish_disable(endpoint);
1568 (void)ipa_endpoint_program_suspend(endpoint, true);
1571 /* IPA v3.5.1 doesn't use channel stop for suspend */
1572 stop_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1573 ret = gsi_channel_suspend(gsi, endpoint->channel_id, stop_channel);
1575 dev_err(dev, "error %d suspending channel %u\n", ret,
1576 endpoint->channel_id);
1579 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1581 struct device *dev = &endpoint->ipa->pdev->dev;
1582 struct gsi *gsi = &endpoint->ipa->gsi;
1586 if (!(endpoint->ipa->enabled & BIT(endpoint->endpoint_id)))
1589 if (!endpoint->toward_ipa)
1590 (void)ipa_endpoint_program_suspend(endpoint, false);
1592 /* IPA v3.5.1 doesn't use channel start for resume */
1593 start_channel = endpoint->ipa->version != IPA_VERSION_3_5_1;
1594 ret = gsi_channel_resume(gsi, endpoint->channel_id, start_channel);
1596 dev_err(dev, "error %d resuming channel %u\n", ret,
1597 endpoint->channel_id);
1598 else if (!endpoint->toward_ipa)
1599 ipa_endpoint_replenish_enable(endpoint);
1602 void ipa_endpoint_suspend(struct ipa *ipa)
1604 if (!ipa->setup_complete)
1607 if (ipa->modem_netdev)
1608 ipa_modem_suspend(ipa->modem_netdev);
1610 ipa_cmd_pipeline_clear(ipa);
1612 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1613 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1616 void ipa_endpoint_resume(struct ipa *ipa)
1618 if (!ipa->setup_complete)
1621 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1622 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1624 if (ipa->modem_netdev)
1625 ipa_modem_resume(ipa->modem_netdev);
1628 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1630 struct gsi *gsi = &endpoint->ipa->gsi;
1631 u32 channel_id = endpoint->channel_id;
1633 /* Only AP endpoints get set up */
1634 if (endpoint->ee_id != GSI_EE_AP)
1637 endpoint->trans_tre_max = gsi_channel_trans_tre_max(gsi, channel_id);
1638 if (!endpoint->toward_ipa) {
1639 /* RX transactions require a single TRE, so the maximum
1640 * backlog is the same as the maximum outstanding TREs.
1642 endpoint->replenish_enabled = false;
1643 atomic_set(&endpoint->replenish_saved,
1644 gsi_channel_tre_max(gsi, endpoint->channel_id));
1645 atomic_set(&endpoint->replenish_backlog, 0);
1646 INIT_DELAYED_WORK(&endpoint->replenish_work,
1647 ipa_endpoint_replenish_work);
1650 ipa_endpoint_program(endpoint);
1652 endpoint->ipa->set_up |= BIT(endpoint->endpoint_id);
1655 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1657 endpoint->ipa->set_up &= ~BIT(endpoint->endpoint_id);
1659 if (!endpoint->toward_ipa)
1660 cancel_delayed_work_sync(&endpoint->replenish_work);
1662 ipa_endpoint_reset(endpoint);
1665 void ipa_endpoint_setup(struct ipa *ipa)
1667 u32 initialized = ipa->initialized;
1670 while (initialized) {
1671 u32 endpoint_id = __ffs(initialized);
1673 initialized ^= BIT(endpoint_id);
1675 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1679 void ipa_endpoint_teardown(struct ipa *ipa)
1681 u32 set_up = ipa->set_up;
1684 u32 endpoint_id = __fls(set_up);
1686 set_up ^= BIT(endpoint_id);
1688 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1693 int ipa_endpoint_config(struct ipa *ipa)
1695 struct device *dev = &ipa->pdev->dev;
1704 /* Find out about the endpoints supplied by the hardware, and ensure
1705 * the highest one doesn't exceed the number we support.
1707 val = ioread32(ipa->reg_virt + IPA_REG_FLAVOR_0_OFFSET);
1709 /* Our RX is an IPA producer */
1710 rx_base = u32_get_bits(val, IPA_PROD_LOWEST_FMASK);
1711 max = rx_base + u32_get_bits(val, IPA_MAX_PROD_PIPES_FMASK);
1712 if (max > IPA_ENDPOINT_MAX) {
1713 dev_err(dev, "too many endpoints (%u > %u)\n",
1714 max, IPA_ENDPOINT_MAX);
1717 rx_mask = GENMASK(max - 1, rx_base);
1719 /* Our TX is an IPA consumer */
1720 max = u32_get_bits(val, IPA_MAX_CONS_PIPES_FMASK);
1721 tx_mask = GENMASK(max - 1, 0);
1723 ipa->available = rx_mask | tx_mask;
1725 /* Check for initialized endpoints not supported by the hardware */
1726 if (ipa->initialized & ~ipa->available) {
1727 dev_err(dev, "unavailable endpoint id(s) 0x%08x\n",
1728 ipa->initialized & ~ipa->available);
1729 ret = -EINVAL; /* Report other errors too */
1732 initialized = ipa->initialized;
1733 while (initialized) {
1734 u32 endpoint_id = __ffs(initialized);
1735 struct ipa_endpoint *endpoint;
1737 initialized ^= BIT(endpoint_id);
1739 /* Make sure it's pointing in the right direction */
1740 endpoint = &ipa->endpoint[endpoint_id];
1741 if ((endpoint_id < rx_base) != !!endpoint->toward_ipa) {
1742 dev_err(dev, "endpoint id %u wrong direction\n",
1751 void ipa_endpoint_deconfig(struct ipa *ipa)
1753 ipa->available = 0; /* Nothing more to do */
1756 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1757 const struct ipa_gsi_endpoint_data *data)
1759 struct ipa_endpoint *endpoint;
1761 endpoint = &ipa->endpoint[data->endpoint_id];
1763 if (data->ee_id == GSI_EE_AP)
1764 ipa->channel_map[data->channel_id] = endpoint;
1765 ipa->name_map[name] = endpoint;
1767 endpoint->ipa = ipa;
1768 endpoint->ee_id = data->ee_id;
1769 endpoint->seq_type = data->endpoint.seq_type;
1770 endpoint->channel_id = data->channel_id;
1771 endpoint->endpoint_id = data->endpoint_id;
1772 endpoint->toward_ipa = data->toward_ipa;
1773 endpoint->data = &data->endpoint.config;
1775 ipa->initialized |= BIT(endpoint->endpoint_id);
1778 void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1780 endpoint->ipa->initialized &= ~BIT(endpoint->endpoint_id);
1782 memset(endpoint, 0, sizeof(*endpoint));
1785 void ipa_endpoint_exit(struct ipa *ipa)
1787 u32 initialized = ipa->initialized;
1789 while (initialized) {
1790 u32 endpoint_id = __fls(initialized);
1792 initialized ^= BIT(endpoint_id);
1794 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1796 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1797 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1800 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1801 u32 ipa_endpoint_init(struct ipa *ipa, u32 count,
1802 const struct ipa_gsi_endpoint_data *data)
1804 enum ipa_endpoint_name name;
1807 if (!ipa_endpoint_data_valid(ipa, count, data))
1808 return 0; /* Error */
1810 ipa->initialized = 0;
1813 for (name = 0; name < count; name++, data++) {
1814 if (ipa_gsi_endpoint_data_empty(data))
1815 continue; /* Skip over empty slots */
1817 ipa_endpoint_init_one(ipa, name, data);
1819 if (data->endpoint.filter_support)
1820 filter_map |= BIT(data->endpoint_id);
1823 if (!ipa_filter_map_valid(ipa, filter_map))
1824 goto err_endpoint_exit;
1826 return filter_map; /* Non-zero bitmask */
1829 ipa_endpoint_exit(ipa);
1831 return 0; /* Error */