1 // SPDX-License-Identifier: GPL-2.0
3 /* Copyright (c) 2012-2018, The Linux Foundation. All rights reserved.
4 * Copyright (C) 2019-2022 Linaro Ltd.
7 #include <linux/types.h>
8 #include <linux/device.h>
9 #include <linux/slab.h>
10 #include <linux/bitfield.h>
11 #include <linux/if_rmnet.h>
12 #include <linux/dma-direction.h>
15 #include "gsi_trans.h"
18 #include "ipa_endpoint.h"
21 #include "ipa_modem.h"
22 #include "ipa_table.h"
24 #include "ipa_power.h"
26 /* Hardware is told about receive buffers once a "batch" has been queued */
27 #define IPA_REPLENISH_BATCH 16 /* Must be non-zero */
29 /* The amount of RX buffer space consumed by standard skb overhead */
30 #define IPA_RX_BUFFER_OVERHEAD (PAGE_SIZE - SKB_MAX_ORDER(NET_SKB_PAD, 0))
32 /* Where to find the QMAP mux_id for a packet within modem-supplied metadata */
33 #define IPA_ENDPOINT_QMAP_METADATA_MASK 0x000000ff /* host byte order */
35 #define IPA_ENDPOINT_RESET_AGGR_RETRY_MAX 3
37 /** enum ipa_status_opcode - status element opcode hardware values */
38 enum ipa_status_opcode {
39 IPA_STATUS_OPCODE_PACKET = 0x01,
40 IPA_STATUS_OPCODE_DROPPED_PACKET = 0x04,
41 IPA_STATUS_OPCODE_SUSPENDED_PACKET = 0x08,
42 IPA_STATUS_OPCODE_PACKET_2ND_PASS = 0x40,
45 /** enum ipa_status_exception - status element exception type */
46 enum ipa_status_exception {
47 /* 0 means no exception */
48 IPA_STATUS_EXCEPTION_DEAGGR = 0x01,
51 /* Status element provided by hardware */
53 u8 opcode; /* enum ipa_status_opcode */
54 u8 exception; /* enum ipa_status_exception */
66 /* Field masks for struct ipa_status structure fields */
67 #define IPA_STATUS_MASK_TAG_VALID_FMASK GENMASK(4, 4)
68 #define IPA_STATUS_SRC_IDX_FMASK GENMASK(4, 0)
69 #define IPA_STATUS_DST_IDX_FMASK GENMASK(4, 0)
70 #define IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK GENMASK(31, 22)
71 #define IPA_STATUS_FLAGS2_TAG_FMASK GENMASK_ULL(63, 16)
73 /* Compute the aggregation size value to use for a given buffer size */
74 static u32 ipa_aggr_size_kb(u32 rx_buffer_size, bool aggr_hard_limit)
76 /* A hard aggregation limit will not be crossed; aggregation closes
77 * if saving incoming data would cross the hard byte limit boundary.
79 * With a soft limit, aggregation closes *after* the size boundary
80 * has been crossed. In that case the limit must leave enough space
81 * after that limit to receive a full MTU of data plus overhead.
84 rx_buffer_size -= IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
86 /* The byte limit is encoded as a number of kilobytes */
88 return rx_buffer_size / SZ_1K;
91 static bool ipa_endpoint_data_valid_one(struct ipa *ipa, u32 count,
92 const struct ipa_gsi_endpoint_data *all_data,
93 const struct ipa_gsi_endpoint_data *data)
95 const struct ipa_gsi_endpoint_data *other_data;
96 struct device *dev = &ipa->pdev->dev;
97 enum ipa_endpoint_name other_name;
99 if (ipa_gsi_endpoint_data_empty(data))
102 if (!data->toward_ipa) {
103 const struct ipa_endpoint_rx *rx_config;
104 const struct ipa_reg *reg;
109 if (data->endpoint.filter_support) {
110 dev_err(dev, "filtering not supported for "
116 /* Nothing more to check for non-AP RX */
117 if (data->ee_id != GSI_EE_AP)
120 rx_config = &data->endpoint.config.rx;
122 /* The buffer size must hold an MTU plus overhead */
123 buffer_size = rx_config->buffer_size;
124 limit = IPA_MTU + IPA_RX_BUFFER_OVERHEAD;
125 if (buffer_size < limit) {
126 dev_err(dev, "RX buffer size too small for RX endpoint %u (%u < %u)\n",
127 data->endpoint_id, buffer_size, limit);
131 if (!data->endpoint.config.aggregation) {
134 /* No aggregation; check for bogus aggregation data */
135 if (rx_config->aggr_time_limit) {
137 "time limit with no aggregation for RX endpoint %u\n",
142 if (rx_config->aggr_hard_limit) {
143 dev_err(dev, "hard limit with no aggregation for RX endpoint %u\n",
148 if (rx_config->aggr_close_eof) {
149 dev_err(dev, "close EOF with no aggregation for RX endpoint %u\n",
154 return result; /* Nothing more to check */
157 /* For an endpoint supporting receive aggregation, the byte
158 * limit defines the point at which aggregation closes. This
159 * check ensures the receive buffer size doesn't result in a
160 * limit that exceeds what's representable in the aggregation
163 aggr_size = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
164 rx_config->aggr_hard_limit);
165 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
167 limit = ipa_reg_field_max(reg, BYTE_LIMIT);
168 if (aggr_size > limit) {
169 dev_err(dev, "aggregated size too large for RX endpoint %u (%u KB > %u KB)\n",
170 data->endpoint_id, aggr_size, limit);
175 return true; /* Nothing more to check for RX */
178 /* Starting with IPA v4.5 sequencer replication is obsolete */
179 if (ipa->version >= IPA_VERSION_4_5) {
180 if (data->endpoint.config.tx.seq_rep_type) {
181 dev_err(dev, "no-zero seq_rep_type TX endpoint %u\n",
187 if (data->endpoint.config.status_enable) {
188 other_name = data->endpoint.config.tx.status_endpoint;
189 if (other_name >= count) {
190 dev_err(dev, "status endpoint name %u out of range "
192 other_name, data->endpoint_id);
196 /* Status endpoint must be defined... */
197 other_data = &all_data[other_name];
198 if (ipa_gsi_endpoint_data_empty(other_data)) {
199 dev_err(dev, "DMA endpoint name %u undefined "
201 other_name, data->endpoint_id);
205 /* ...and has to be an RX endpoint... */
206 if (other_data->toward_ipa) {
208 "status endpoint for endpoint %u not RX\n",
213 /* ...and if it's to be an AP endpoint... */
214 if (other_data->ee_id == GSI_EE_AP) {
215 /* ...make sure it has status enabled. */
216 if (!other_data->endpoint.config.status_enable) {
218 "status not enabled for endpoint %u\n",
219 other_data->endpoint_id);
225 if (data->endpoint.config.dma_mode) {
226 other_name = data->endpoint.config.dma_endpoint;
227 if (other_name >= count) {
228 dev_err(dev, "DMA endpoint name %u out of range "
230 other_name, data->endpoint_id);
234 other_data = &all_data[other_name];
235 if (ipa_gsi_endpoint_data_empty(other_data)) {
236 dev_err(dev, "DMA endpoint name %u undefined "
238 other_name, data->endpoint_id);
246 /* Validate endpoint configuration data. Return max defined endpoint ID */
247 static u32 ipa_endpoint_max(struct ipa *ipa, u32 count,
248 const struct ipa_gsi_endpoint_data *data)
250 const struct ipa_gsi_endpoint_data *dp = data;
251 struct device *dev = &ipa->pdev->dev;
252 enum ipa_endpoint_name name;
255 if (count > IPA_ENDPOINT_COUNT) {
256 dev_err(dev, "too many endpoints specified (%u > %u)\n",
257 count, IPA_ENDPOINT_COUNT);
261 /* Make sure needed endpoints have defined data */
262 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_COMMAND_TX])) {
263 dev_err(dev, "command TX endpoint not defined\n");
266 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_LAN_RX])) {
267 dev_err(dev, "LAN RX endpoint not defined\n");
270 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_TX])) {
271 dev_err(dev, "AP->modem TX endpoint not defined\n");
274 if (ipa_gsi_endpoint_data_empty(&data[IPA_ENDPOINT_AP_MODEM_RX])) {
275 dev_err(dev, "AP<-modem RX endpoint not defined\n");
280 for (name = 0; name < count; name++, dp++) {
281 if (!ipa_endpoint_data_valid_one(ipa, count, data, dp))
283 max = max_t(u32, max, dp->endpoint_id);
289 /* Allocate a transaction to use on a non-command endpoint */
290 static struct gsi_trans *ipa_endpoint_trans_alloc(struct ipa_endpoint *endpoint,
293 struct gsi *gsi = &endpoint->ipa->gsi;
294 u32 channel_id = endpoint->channel_id;
295 enum dma_data_direction direction;
297 direction = endpoint->toward_ipa ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
299 return gsi_channel_trans_alloc(gsi, channel_id, tre_count, direction);
302 /* suspend_delay represents suspend for RX, delay for TX endpoints.
303 * Note that suspend is not supported starting with IPA v4.0, and
304 * delay mode should not be used starting with IPA v4.2.
307 ipa_endpoint_init_ctrl(struct ipa_endpoint *endpoint, bool suspend_delay)
309 struct ipa *ipa = endpoint->ipa;
310 const struct ipa_reg *reg;
317 if (endpoint->toward_ipa)
318 WARN_ON(ipa->version >= IPA_VERSION_4_2);
320 WARN_ON(ipa->version >= IPA_VERSION_4_0);
322 reg = ipa_reg(ipa, ENDP_INIT_CTRL);
323 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
324 val = ioread32(ipa->reg_virt + offset);
326 field_id = endpoint->toward_ipa ? ENDP_DELAY : ENDP_SUSPEND;
327 mask = ipa_reg_bit(reg, field_id);
329 state = !!(val & mask);
331 /* Don't bother if it's already in the requested state */
332 if (suspend_delay != state) {
334 iowrite32(val, ipa->reg_virt + offset);
340 /* We don't care what the previous state was for delay mode */
342 ipa_endpoint_program_delay(struct ipa_endpoint *endpoint, bool enable)
344 /* Delay mode should not be used for IPA v4.2+ */
345 WARN_ON(endpoint->ipa->version >= IPA_VERSION_4_2);
346 WARN_ON(!endpoint->toward_ipa);
348 (void)ipa_endpoint_init_ctrl(endpoint, enable);
351 static bool ipa_endpoint_aggr_active(struct ipa_endpoint *endpoint)
353 u32 endpoint_id = endpoint->endpoint_id;
354 struct ipa *ipa = endpoint->ipa;
355 u32 unit = endpoint_id / 32;
356 const struct ipa_reg *reg;
359 WARN_ON(!test_bit(endpoint_id, ipa->available));
361 reg = ipa_reg(ipa, STATE_AGGR_ACTIVE);
362 val = ioread32(ipa->reg_virt + ipa_reg_n_offset(reg, unit));
364 return !!(val & BIT(endpoint_id % 32));
367 static void ipa_endpoint_force_close(struct ipa_endpoint *endpoint)
369 u32 endpoint_id = endpoint->endpoint_id;
370 u32 mask = BIT(endpoint_id % 32);
371 struct ipa *ipa = endpoint->ipa;
372 u32 unit = endpoint_id / 32;
373 const struct ipa_reg *reg;
375 WARN_ON(!test_bit(endpoint_id, ipa->available));
377 reg = ipa_reg(ipa, AGGR_FORCE_CLOSE);
378 iowrite32(mask, ipa->reg_virt + ipa_reg_n_offset(reg, unit));
382 * ipa_endpoint_suspend_aggr() - Emulate suspend interrupt
383 * @endpoint: Endpoint on which to emulate a suspend
385 * Emulate suspend IPA interrupt to unsuspend an endpoint suspended
386 * with an open aggregation frame. This is to work around a hardware
387 * issue in IPA version 3.5.1 where the suspend interrupt will not be
388 * generated when it should be.
390 static void ipa_endpoint_suspend_aggr(struct ipa_endpoint *endpoint)
392 struct ipa *ipa = endpoint->ipa;
394 if (!endpoint->config.aggregation)
397 /* Nothing to do if the endpoint doesn't have aggregation open */
398 if (!ipa_endpoint_aggr_active(endpoint))
401 /* Force close aggregation */
402 ipa_endpoint_force_close(endpoint);
404 ipa_interrupt_simulate_suspend(ipa->interrupt);
407 /* Returns previous suspend state (true means suspend was enabled) */
409 ipa_endpoint_program_suspend(struct ipa_endpoint *endpoint, bool enable)
413 if (endpoint->ipa->version >= IPA_VERSION_4_0)
414 return enable; /* For IPA v4.0+, no change made */
416 WARN_ON(endpoint->toward_ipa);
418 suspended = ipa_endpoint_init_ctrl(endpoint, enable);
420 /* A client suspended with an open aggregation frame will not
421 * generate a SUSPEND IPA interrupt. If enabling suspend, have
422 * ipa_endpoint_suspend_aggr() handle this.
424 if (enable && !suspended)
425 ipa_endpoint_suspend_aggr(endpoint);
430 /* Put all modem RX endpoints into suspend mode, and stop transmission
431 * on all modem TX endpoints. Prior to IPA v4.2, endpoint DELAY mode is
432 * used for TX endpoints; starting with IPA v4.2 we use GSI channel flow
435 void ipa_endpoint_modem_pause_all(struct ipa *ipa, bool enable)
439 while (endpoint_id < ipa->endpoint_count) {
440 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
442 if (endpoint->ee_id != GSI_EE_MODEM)
445 if (!endpoint->toward_ipa)
446 (void)ipa_endpoint_program_suspend(endpoint, enable);
447 else if (ipa->version < IPA_VERSION_4_2)
448 ipa_endpoint_program_delay(endpoint, enable);
450 gsi_modem_channel_flow_control(&ipa->gsi,
451 endpoint->channel_id,
456 /* Reset all modem endpoints to use the default exception endpoint */
457 int ipa_endpoint_modem_exception_reset_all(struct ipa *ipa)
459 struct gsi_trans *trans;
463 /* We need one command per modem TX endpoint, plus the commands
464 * that clear the pipeline.
466 count = ipa->modem_tx_count + ipa_cmd_pipeline_clear_count();
467 trans = ipa_cmd_trans_alloc(ipa, count);
469 dev_err(&ipa->pdev->dev,
470 "no transaction to reset modem exception endpoints\n");
474 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
475 struct ipa_endpoint *endpoint;
476 const struct ipa_reg *reg;
479 /* We only reset modem TX endpoints */
480 endpoint = &ipa->endpoint[endpoint_id];
481 if (!(endpoint->ee_id == GSI_EE_MODEM && endpoint->toward_ipa))
484 reg = ipa_reg(ipa, ENDP_STATUS);
485 offset = ipa_reg_n_offset(reg, endpoint_id);
487 /* Value written is 0, and all bits are updated. That
488 * means status is disabled on the endpoint, and as a
489 * result all other fields in the register are ignored.
491 ipa_cmd_register_write_add(trans, offset, 0, ~0, false);
494 ipa_cmd_pipeline_clear_add(trans);
496 gsi_trans_commit_wait(trans);
498 ipa_cmd_pipeline_clear_wait(ipa);
503 static void ipa_endpoint_init_cfg(struct ipa_endpoint *endpoint)
505 u32 endpoint_id = endpoint->endpoint_id;
506 struct ipa *ipa = endpoint->ipa;
507 enum ipa_cs_offload_en enabled;
508 const struct ipa_reg *reg;
511 reg = ipa_reg(ipa, ENDP_INIT_CFG);
512 /* FRAG_OFFLOAD_EN is 0 */
513 if (endpoint->config.checksum) {
514 enum ipa_version version = ipa->version;
516 if (endpoint->toward_ipa) {
519 /* Checksum header offset is in 4-byte units */
520 off = sizeof(struct rmnet_map_header) / sizeof(u32);
521 val |= ipa_reg_encode(reg, CS_METADATA_HDR_OFFSET, off);
523 enabled = version < IPA_VERSION_4_5
525 : IPA_CS_OFFLOAD_INLINE;
527 enabled = version < IPA_VERSION_4_5
529 : IPA_CS_OFFLOAD_INLINE;
532 enabled = IPA_CS_OFFLOAD_NONE;
534 val |= ipa_reg_encode(reg, CS_OFFLOAD_EN, enabled);
535 /* CS_GEN_QMB_MASTER_SEL is 0 */
537 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
540 static void ipa_endpoint_init_nat(struct ipa_endpoint *endpoint)
542 u32 endpoint_id = endpoint->endpoint_id;
543 struct ipa *ipa = endpoint->ipa;
544 const struct ipa_reg *reg;
547 if (!endpoint->toward_ipa)
550 reg = ipa_reg(ipa, ENDP_INIT_NAT);
551 val = ipa_reg_encode(reg, NAT_EN, IPA_NAT_BYPASS);
553 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
557 ipa_qmap_header_size(enum ipa_version version, struct ipa_endpoint *endpoint)
559 u32 header_size = sizeof(struct rmnet_map_header);
561 /* Without checksum offload, we just have the MAP header */
562 if (!endpoint->config.checksum)
565 if (version < IPA_VERSION_4_5) {
566 /* Checksum header inserted for AP TX endpoints only */
567 if (endpoint->toward_ipa)
568 header_size += sizeof(struct rmnet_map_ul_csum_header);
570 /* Checksum header is used in both directions */
571 header_size += sizeof(struct rmnet_map_v5_csum_header);
577 /* Encoded value for ENDP_INIT_HDR register HDR_LEN* field(s) */
578 static u32 ipa_header_size_encode(enum ipa_version version,
579 const struct ipa_reg *reg, u32 header_size)
581 u32 field_max = ipa_reg_field_max(reg, HDR_LEN);
584 /* We know field_max can be used as a mask (2^n - 1) */
585 val = ipa_reg_encode(reg, HDR_LEN, header_size & field_max);
586 if (version < IPA_VERSION_4_5) {
587 WARN_ON(header_size > field_max);
591 /* IPA v4.5 adds a few more most-significant bits */
592 header_size >>= hweight32(field_max);
593 WARN_ON(header_size > ipa_reg_field_max(reg, HDR_LEN_MSB));
594 val |= ipa_reg_encode(reg, HDR_LEN_MSB, header_size);
599 /* Encoded value for ENDP_INIT_HDR register OFST_METADATA* field(s) */
600 static u32 ipa_metadata_offset_encode(enum ipa_version version,
601 const struct ipa_reg *reg, u32 offset)
603 u32 field_max = ipa_reg_field_max(reg, HDR_OFST_METADATA);
606 /* We know field_max can be used as a mask (2^n - 1) */
607 val = ipa_reg_encode(reg, HDR_OFST_METADATA, offset);
608 if (version < IPA_VERSION_4_5) {
609 WARN_ON(offset > field_max);
613 /* IPA v4.5 adds a few more most-significant bits */
614 offset >>= hweight32(field_max);
615 WARN_ON(offset > ipa_reg_field_max(reg, HDR_OFST_METADATA_MSB));
616 val |= ipa_reg_encode(reg, HDR_OFST_METADATA_MSB, offset);
622 * ipa_endpoint_init_hdr() - Initialize HDR endpoint configuration register
623 * @endpoint: Endpoint pointer
625 * We program QMAP endpoints so each packet received is preceded by a QMAP
626 * header structure. The QMAP header contains a 1-byte mux_id and 2-byte
627 * packet size field, and we have the IPA hardware populate both for each
628 * received packet. The header is configured (in the HDR_EXT register)
629 * to use big endian format.
631 * The packet size is written into the QMAP header's pkt_len field. That
632 * location is defined here using the HDR_OFST_PKT_SIZE field.
634 * The mux_id comes from a 4-byte metadata value supplied with each packet
635 * by the modem. It is *not* a QMAP header, but it does contain the mux_id
636 * value that we want, in its low-order byte. A bitmask defined in the
637 * endpoint's METADATA_MASK register defines which byte within the modem
638 * metadata contains the mux_id. And the OFST_METADATA field programmed
639 * here indicates where the extracted byte should be placed within the QMAP
642 static void ipa_endpoint_init_hdr(struct ipa_endpoint *endpoint)
644 u32 endpoint_id = endpoint->endpoint_id;
645 struct ipa *ipa = endpoint->ipa;
646 const struct ipa_reg *reg;
649 reg = ipa_reg(ipa, ENDP_INIT_HDR);
650 if (endpoint->config.qmap) {
651 enum ipa_version version = ipa->version;
654 header_size = ipa_qmap_header_size(version, endpoint);
655 val = ipa_header_size_encode(version, reg, header_size);
657 /* Define how to fill fields in a received QMAP header */
658 if (!endpoint->toward_ipa) {
659 u32 off; /* Field offset within header */
661 /* Where IPA will write the metadata value */
662 off = offsetof(struct rmnet_map_header, mux_id);
663 val |= ipa_metadata_offset_encode(version, reg, off);
665 /* Where IPA will write the length */
666 off = offsetof(struct rmnet_map_header, pkt_len);
667 /* Upper bits are stored in HDR_EXT with IPA v4.5 */
668 if (version >= IPA_VERSION_4_5)
669 off &= ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
671 val |= ipa_reg_bit(reg, HDR_OFST_PKT_SIZE_VALID);
672 val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE, off);
674 /* For QMAP TX, metadata offset is 0 (modem assumes this) */
675 val |= ipa_reg_bit(reg, HDR_OFST_METADATA_VALID);
677 /* HDR_ADDITIONAL_CONST_LEN is 0; (RX only) */
678 /* HDR_A5_MUX is 0 */
679 /* HDR_LEN_INC_DEAGG_HDR is 0 */
680 /* HDR_METADATA_REG_VALID is 0 (TX only, version < v4.5) */
683 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
686 static void ipa_endpoint_init_hdr_ext(struct ipa_endpoint *endpoint)
688 u32 pad_align = endpoint->config.rx.pad_align;
689 u32 endpoint_id = endpoint->endpoint_id;
690 struct ipa *ipa = endpoint->ipa;
691 const struct ipa_reg *reg;
694 reg = ipa_reg(ipa, ENDP_INIT_HDR_EXT);
695 if (endpoint->config.qmap) {
696 /* We have a header, so we must specify its endianness */
697 val |= ipa_reg_bit(reg, HDR_ENDIANNESS); /* big endian */
699 /* A QMAP header contains a 6 bit pad field at offset 0.
700 * The RMNet driver assumes this field is meaningful in
701 * packets it receives, and assumes the header's payload
702 * length includes that padding. The RMNet driver does
703 * *not* pad packets it sends, however, so the pad field
704 * (although 0) should be ignored.
706 if (!endpoint->toward_ipa) {
707 val |= ipa_reg_bit(reg, HDR_TOTAL_LEN_OR_PAD_VALID);
708 /* HDR_TOTAL_LEN_OR_PAD is 0 (pad, not total_len) */
709 val |= ipa_reg_bit(reg, HDR_PAYLOAD_LEN_INC_PADDING);
710 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0 */
714 /* HDR_PAYLOAD_LEN_INC_PADDING is 0 */
715 if (!endpoint->toward_ipa)
716 val |= ipa_reg_encode(reg, HDR_PAD_TO_ALIGNMENT, pad_align);
718 /* IPA v4.5 adds some most-significant bits to a few fields,
719 * two of which are defined in the HDR (not HDR_EXT) register.
721 if (ipa->version >= IPA_VERSION_4_5) {
722 /* HDR_TOTAL_LEN_OR_PAD_OFFSET is 0, so MSB is 0 */
723 if (endpoint->config.qmap && !endpoint->toward_ipa) {
724 u32 mask = ipa_reg_field_max(reg, HDR_OFST_PKT_SIZE);
725 u32 off; /* Field offset within header */
727 off = offsetof(struct rmnet_map_header, pkt_len);
728 /* Low bits are in the ENDP_INIT_HDR register */
729 off >>= hweight32(mask);
730 val |= ipa_reg_encode(reg, HDR_OFST_PKT_SIZE_MSB, off);
731 /* HDR_ADDITIONAL_CONST_LEN is 0 so MSB is 0 */
735 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
738 static void ipa_endpoint_init_hdr_metadata_mask(struct ipa_endpoint *endpoint)
740 u32 endpoint_id = endpoint->endpoint_id;
741 struct ipa *ipa = endpoint->ipa;
742 const struct ipa_reg *reg;
746 if (endpoint->toward_ipa)
747 return; /* Register not valid for TX endpoints */
749 reg = ipa_reg(ipa, ENDP_INIT_HDR_METADATA_MASK);
750 offset = ipa_reg_n_offset(reg, endpoint_id);
752 /* Note that HDR_ENDIANNESS indicates big endian header fields */
753 if (endpoint->config.qmap)
754 val = (__force u32)cpu_to_be32(IPA_ENDPOINT_QMAP_METADATA_MASK);
756 iowrite32(val, ipa->reg_virt + offset);
759 static void ipa_endpoint_init_mode(struct ipa_endpoint *endpoint)
761 struct ipa *ipa = endpoint->ipa;
762 const struct ipa_reg *reg;
766 if (!endpoint->toward_ipa)
767 return; /* Register not valid for RX endpoints */
769 reg = ipa_reg(ipa, ENDP_INIT_MODE);
770 if (endpoint->config.dma_mode) {
771 enum ipa_endpoint_name name = endpoint->config.dma_endpoint;
772 u32 dma_endpoint_id = ipa->name_map[name]->endpoint_id;
774 val = ipa_reg_encode(reg, ENDP_MODE, IPA_DMA);
775 val |= ipa_reg_encode(reg, DEST_PIPE_INDEX, dma_endpoint_id);
777 val = ipa_reg_encode(reg, ENDP_MODE, IPA_BASIC);
779 /* All other bits unspecified (and 0) */
781 offset = ipa_reg_n_offset(reg, endpoint->endpoint_id);
782 iowrite32(val, ipa->reg_virt + offset);
785 /* For IPA v4.5+, times are expressed using Qtime. The AP uses one of two
786 * pulse generators (0 and 1) to measure elapsed time. In ipa_qtime_config()
787 * they're configured to have granularity 100 usec and 1 msec, respectively.
789 * The return value is the positive or negative Qtime value to use to
790 * express the (microsecond) time provided. A positive return value
791 * means pulse generator 0 can be used; otherwise use pulse generator 1.
793 static int ipa_qtime_val(u32 microseconds, u32 max)
797 /* Use 100 microsecond granularity if possible */
798 val = DIV_ROUND_CLOSEST(microseconds, 100);
802 /* Have to use pulse generator 1 (millisecond granularity) */
803 val = DIV_ROUND_CLOSEST(microseconds, 1000);
809 /* Encode the aggregation timer limit (microseconds) based on IPA version */
810 static u32 aggr_time_limit_encode(struct ipa *ipa, const struct ipa_reg *reg,
817 return 0; /* Nothing to compute if time limit is 0 */
819 max = ipa_reg_field_max(reg, TIME_LIMIT);
820 if (ipa->version >= IPA_VERSION_4_5) {
824 /* Compute the Qtime limit value to use */
825 ret = ipa_qtime_val(microseconds, max);
828 gran_sel = ipa_reg_bit(reg, AGGR_GRAN_SEL);
834 return gran_sel | ipa_reg_encode(reg, TIME_LIMIT, val);
837 /* We program aggregation granularity in ipa_hardware_config() */
838 val = DIV_ROUND_CLOSEST(microseconds, IPA_AGGR_GRANULARITY);
839 WARN(val > max, "aggr_time_limit too large (%u > %u usec)\n",
840 microseconds, max * IPA_AGGR_GRANULARITY);
842 return ipa_reg_encode(reg, TIME_LIMIT, val);
845 static void ipa_endpoint_init_aggr(struct ipa_endpoint *endpoint)
847 u32 endpoint_id = endpoint->endpoint_id;
848 struct ipa *ipa = endpoint->ipa;
849 const struct ipa_reg *reg;
852 reg = ipa_reg(ipa, ENDP_INIT_AGGR);
853 if (endpoint->config.aggregation) {
854 if (!endpoint->toward_ipa) {
855 const struct ipa_endpoint_rx *rx_config;
859 rx_config = &endpoint->config.rx;
860 val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_AGGR);
861 val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_GENERIC);
863 buffer_size = rx_config->buffer_size;
864 limit = ipa_aggr_size_kb(buffer_size - NET_SKB_PAD,
865 rx_config->aggr_hard_limit);
866 val |= ipa_reg_encode(reg, BYTE_LIMIT, limit);
868 limit = rx_config->aggr_time_limit;
869 val |= aggr_time_limit_encode(ipa, reg, limit);
871 /* AGGR_PKT_LIMIT is 0 (unlimited) */
873 if (rx_config->aggr_close_eof)
874 val |= ipa_reg_bit(reg, SW_EOF_ACTIVE);
876 val |= ipa_reg_encode(reg, AGGR_EN, IPA_ENABLE_DEAGGR);
877 val |= ipa_reg_encode(reg, AGGR_TYPE, IPA_QCMAP);
878 /* other fields ignored */
880 /* AGGR_FORCE_CLOSE is 0 */
881 /* AGGR_GRAN_SEL is 0 for IPA v4.5 */
883 val |= ipa_reg_encode(reg, AGGR_EN, IPA_BYPASS_AGGR);
884 /* other fields ignored */
887 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
890 /* The head-of-line blocking timer is defined as a tick count. For
891 * IPA version 4.5 the tick count is based on the Qtimer, which is
892 * derived from the 19.2 MHz SoC XO clock. For older IPA versions
893 * each tick represents 128 cycles of the IPA core clock.
895 * Return the encoded value representing the timeout period provided
896 * that should be written to the ENDP_INIT_HOL_BLOCK_TIMER register.
898 static u32 hol_block_timer_encode(struct ipa *ipa, const struct ipa_reg *reg,
909 return 0; /* Nothing to compute if timer period is 0 */
911 if (ipa->version >= IPA_VERSION_4_5) {
912 u32 max = ipa_reg_field_max(reg, TIMER_LIMIT);
916 /* Compute the Qtime limit value to use */
917 ret = ipa_qtime_val(microseconds, max);
920 gran_sel = ipa_reg_bit(reg, TIMER_GRAN_SEL);
926 return gran_sel | ipa_reg_encode(reg, TIMER_LIMIT, val);
929 /* Use 64 bit arithmetic to avoid overflow */
930 rate = ipa_core_clock_rate(ipa);
931 ticks = DIV_ROUND_CLOSEST(microseconds * rate, 128 * USEC_PER_SEC);
933 /* We still need the result to fit into the field */
934 WARN_ON(ticks > ipa_reg_field_max(reg, TIMER_BASE_VALUE));
936 /* IPA v3.5.1 through v4.1 just record the tick count */
937 if (ipa->version < IPA_VERSION_4_2)
938 return ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks);
940 /* For IPA v4.2, the tick count is represented by base and
941 * scale fields within the 32-bit timer register, where:
942 * ticks = base << scale;
943 * The best precision is achieved when the base value is as
944 * large as possible. Find the highest set bit in the tick
945 * count, and extract the number of bits in the base field
946 * such that high bit is included.
948 high = fls(ticks); /* 1..32 (or warning above) */
949 width = hweight32(ipa_reg_fmask(reg, TIMER_BASE_VALUE));
950 scale = high > width ? high - width : 0;
952 /* If we're scaling, round up to get a closer result */
953 ticks += 1 << (scale - 1);
954 /* High bit was set, so rounding might have affected it */
955 if (fls(ticks) != high)
959 val = ipa_reg_encode(reg, TIMER_SCALE, scale);
960 val |= ipa_reg_encode(reg, TIMER_BASE_VALUE, (u32)ticks >> scale);
965 /* If microseconds is 0, timeout is immediate */
966 static void ipa_endpoint_init_hol_block_timer(struct ipa_endpoint *endpoint,
969 u32 endpoint_id = endpoint->endpoint_id;
970 struct ipa *ipa = endpoint->ipa;
971 const struct ipa_reg *reg;
974 /* This should only be changed when HOL_BLOCK_EN is disabled */
975 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_TIMER);
976 val = hol_block_timer_encode(ipa, reg, microseconds);
978 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
982 ipa_endpoint_init_hol_block_en(struct ipa_endpoint *endpoint, bool enable)
984 u32 endpoint_id = endpoint->endpoint_id;
985 struct ipa *ipa = endpoint->ipa;
986 const struct ipa_reg *reg;
990 reg = ipa_reg(ipa, ENDP_INIT_HOL_BLOCK_EN);
991 offset = ipa_reg_n_offset(reg, endpoint_id);
992 val = enable ? ipa_reg_bit(reg, HOL_BLOCK_EN) : 0;
994 iowrite32(val, ipa->reg_virt + offset);
996 /* When enabling, the register must be written twice for IPA v4.5+ */
997 if (enable && ipa->version >= IPA_VERSION_4_5)
998 iowrite32(val, ipa->reg_virt + offset);
1001 /* Assumes HOL_BLOCK is in disabled state */
1002 static void ipa_endpoint_init_hol_block_enable(struct ipa_endpoint *endpoint,
1005 ipa_endpoint_init_hol_block_timer(endpoint, microseconds);
1006 ipa_endpoint_init_hol_block_en(endpoint, true);
1009 static void ipa_endpoint_init_hol_block_disable(struct ipa_endpoint *endpoint)
1011 ipa_endpoint_init_hol_block_en(endpoint, false);
1014 void ipa_endpoint_modem_hol_block_clear_all(struct ipa *ipa)
1016 u32 endpoint_id = 0;
1018 while (endpoint_id < ipa->endpoint_count) {
1019 struct ipa_endpoint *endpoint = &ipa->endpoint[endpoint_id++];
1021 if (endpoint->toward_ipa || endpoint->ee_id != GSI_EE_MODEM)
1024 ipa_endpoint_init_hol_block_disable(endpoint);
1025 ipa_endpoint_init_hol_block_enable(endpoint, 0);
1029 static void ipa_endpoint_init_deaggr(struct ipa_endpoint *endpoint)
1031 u32 endpoint_id = endpoint->endpoint_id;
1032 struct ipa *ipa = endpoint->ipa;
1033 const struct ipa_reg *reg;
1036 if (!endpoint->toward_ipa)
1037 return; /* Register not valid for RX endpoints */
1039 reg = ipa_reg(ipa, ENDP_INIT_DEAGGR);
1040 /* DEAGGR_HDR_LEN is 0 */
1041 /* PACKET_OFFSET_VALID is 0 */
1042 /* PACKET_OFFSET_LOCATION is ignored (not valid) */
1043 /* MAX_PACKET_LEN is 0 (not enforced) */
1045 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1048 static void ipa_endpoint_init_rsrc_grp(struct ipa_endpoint *endpoint)
1050 u32 resource_group = endpoint->config.resource_group;
1051 u32 endpoint_id = endpoint->endpoint_id;
1052 struct ipa *ipa = endpoint->ipa;
1053 const struct ipa_reg *reg;
1056 reg = ipa_reg(ipa, ENDP_INIT_RSRC_GRP);
1057 val = ipa_reg_encode(reg, ENDP_RSRC_GRP, resource_group);
1059 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1062 static void ipa_endpoint_init_seq(struct ipa_endpoint *endpoint)
1064 u32 endpoint_id = endpoint->endpoint_id;
1065 struct ipa *ipa = endpoint->ipa;
1066 const struct ipa_reg *reg;
1069 if (!endpoint->toward_ipa)
1070 return; /* Register not valid for RX endpoints */
1072 reg = ipa_reg(ipa, ENDP_INIT_SEQ);
1074 /* Low-order byte configures primary packet processing */
1075 val = ipa_reg_encode(reg, SEQ_TYPE, endpoint->config.tx.seq_type);
1077 /* Second byte (if supported) configures replicated packet processing */
1078 if (ipa->version < IPA_VERSION_4_5)
1079 val |= ipa_reg_encode(reg, SEQ_REP_TYPE,
1080 endpoint->config.tx.seq_rep_type);
1082 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1086 * ipa_endpoint_skb_tx() - Transmit a socket buffer
1087 * @endpoint: Endpoint pointer
1088 * @skb: Socket buffer to send
1090 * Returns: 0 if successful, or a negative error code
1092 int ipa_endpoint_skb_tx(struct ipa_endpoint *endpoint, struct sk_buff *skb)
1094 struct gsi_trans *trans;
1098 /* Make sure source endpoint's TLV FIFO has enough entries to
1099 * hold the linear portion of the skb and all its fragments.
1100 * If not, see if we can linearize it before giving up.
1102 nr_frags = skb_shinfo(skb)->nr_frags;
1103 if (nr_frags > endpoint->skb_frag_max) {
1104 if (skb_linearize(skb))
1109 trans = ipa_endpoint_trans_alloc(endpoint, 1 + nr_frags);
1113 ret = gsi_trans_skb_add(trans, skb);
1115 goto err_trans_free;
1116 trans->data = skb; /* transaction owns skb now */
1118 gsi_trans_commit(trans, !netdev_xmit_more());
1123 gsi_trans_free(trans);
1128 static void ipa_endpoint_status(struct ipa_endpoint *endpoint)
1130 u32 endpoint_id = endpoint->endpoint_id;
1131 struct ipa *ipa = endpoint->ipa;
1132 const struct ipa_reg *reg;
1135 reg = ipa_reg(ipa, ENDP_STATUS);
1136 if (endpoint->config.status_enable) {
1137 val |= ipa_reg_bit(reg, STATUS_EN);
1138 if (endpoint->toward_ipa) {
1139 enum ipa_endpoint_name name;
1140 u32 status_endpoint_id;
1142 name = endpoint->config.tx.status_endpoint;
1143 status_endpoint_id = ipa->name_map[name]->endpoint_id;
1145 val |= ipa_reg_encode(reg, STATUS_ENDP,
1146 status_endpoint_id);
1148 /* STATUS_LOCATION is 0, meaning status element precedes
1149 * packet (not present for IPA v4.5+)
1151 /* STATUS_PKT_SUPPRESS_FMASK is 0 (not present for v4.0+) */
1154 iowrite32(val, ipa->reg_virt + ipa_reg_n_offset(reg, endpoint_id));
1157 static int ipa_endpoint_replenish_one(struct ipa_endpoint *endpoint,
1158 struct gsi_trans *trans)
1166 buffer_size = endpoint->config.rx.buffer_size;
1167 page = dev_alloc_pages(get_order(buffer_size));
1171 /* Offset the buffer to make space for skb headroom */
1172 offset = NET_SKB_PAD;
1173 len = buffer_size - offset;
1175 ret = gsi_trans_page_add(trans, page, len, offset);
1179 trans->data = page; /* transaction owns page now */
1185 * ipa_endpoint_replenish() - Replenish endpoint receive buffers
1186 * @endpoint: Endpoint to be replenished
1188 * The IPA hardware can hold a fixed number of receive buffers for an RX
1189 * endpoint, based on the number of entries in the underlying channel ring
1190 * buffer. If an endpoint's "backlog" is non-zero, it indicates how many
1191 * more receive buffers can be supplied to the hardware. Replenishing for
1192 * an endpoint can be disabled, in which case buffers are not queued to
1195 static void ipa_endpoint_replenish(struct ipa_endpoint *endpoint)
1197 struct gsi_trans *trans;
1199 if (!test_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags))
1202 /* Skip it if it's already active */
1203 if (test_and_set_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags))
1206 while ((trans = ipa_endpoint_trans_alloc(endpoint, 1))) {
1209 if (ipa_endpoint_replenish_one(endpoint, trans))
1210 goto try_again_later;
1213 /* Ring the doorbell if we've got a full batch */
1214 doorbell = !(++endpoint->replenish_count % IPA_REPLENISH_BATCH);
1215 gsi_trans_commit(trans, doorbell);
1218 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1223 gsi_trans_free(trans);
1224 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1226 /* Whenever a receive buffer transaction completes we'll try to
1227 * replenish again. It's unlikely, but if we fail to supply even
1228 * one buffer, nothing will trigger another replenish attempt.
1229 * If the hardware has no receive buffers queued, schedule work to
1230 * try replenishing again.
1232 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1233 schedule_delayed_work(&endpoint->replenish_work,
1234 msecs_to_jiffies(1));
1237 static void ipa_endpoint_replenish_enable(struct ipa_endpoint *endpoint)
1239 set_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1241 /* Start replenishing if hardware currently has no buffers */
1242 if (gsi_channel_trans_idle(&endpoint->ipa->gsi, endpoint->channel_id))
1243 ipa_endpoint_replenish(endpoint);
1246 static void ipa_endpoint_replenish_disable(struct ipa_endpoint *endpoint)
1248 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1251 static void ipa_endpoint_replenish_work(struct work_struct *work)
1253 struct delayed_work *dwork = to_delayed_work(work);
1254 struct ipa_endpoint *endpoint;
1256 endpoint = container_of(dwork, struct ipa_endpoint, replenish_work);
1258 ipa_endpoint_replenish(endpoint);
1261 static void ipa_endpoint_skb_copy(struct ipa_endpoint *endpoint,
1262 void *data, u32 len, u32 extra)
1264 struct sk_buff *skb;
1266 if (!endpoint->netdev)
1269 skb = __dev_alloc_skb(len, GFP_ATOMIC);
1271 /* Copy the data into the socket buffer and receive it */
1273 memcpy(skb->data, data, len);
1274 skb->truesize += extra;
1277 ipa_modem_skb_rx(endpoint->netdev, skb);
1280 static bool ipa_endpoint_skb_build(struct ipa_endpoint *endpoint,
1281 struct page *page, u32 len)
1283 u32 buffer_size = endpoint->config.rx.buffer_size;
1284 struct sk_buff *skb;
1286 /* Nothing to do if there's no netdev */
1287 if (!endpoint->netdev)
1290 WARN_ON(len > SKB_WITH_OVERHEAD(buffer_size - NET_SKB_PAD));
1292 skb = build_skb(page_address(page), buffer_size);
1294 /* Reserve the headroom and account for the data */
1295 skb_reserve(skb, NET_SKB_PAD);
1299 /* Receive the buffer (or record drop if unable to build it) */
1300 ipa_modem_skb_rx(endpoint->netdev, skb);
1305 /* The format of a packet status element is the same for several status
1306 * types (opcodes). Other types aren't currently supported.
1308 static bool ipa_status_format_packet(enum ipa_status_opcode opcode)
1311 case IPA_STATUS_OPCODE_PACKET:
1312 case IPA_STATUS_OPCODE_DROPPED_PACKET:
1313 case IPA_STATUS_OPCODE_SUSPENDED_PACKET:
1314 case IPA_STATUS_OPCODE_PACKET_2ND_PASS:
1321 static bool ipa_endpoint_status_skip(struct ipa_endpoint *endpoint,
1322 const struct ipa_status *status)
1326 if (!ipa_status_format_packet(status->opcode))
1328 if (!status->pkt_len)
1330 endpoint_id = u8_get_bits(status->endp_dst_idx,
1331 IPA_STATUS_DST_IDX_FMASK);
1332 if (endpoint_id != endpoint->endpoint_id)
1335 return false; /* Don't skip this packet, process it */
1338 static bool ipa_endpoint_status_tag(struct ipa_endpoint *endpoint,
1339 const struct ipa_status *status)
1341 struct ipa_endpoint *command_endpoint;
1342 struct ipa *ipa = endpoint->ipa;
1345 if (!le16_get_bits(status->mask, IPA_STATUS_MASK_TAG_VALID_FMASK))
1346 return false; /* No valid tag */
1348 /* The status contains a valid tag. We know the packet was sent to
1349 * this endpoint (already verified by ipa_endpoint_status_skip()).
1350 * If the packet came from the AP->command TX endpoint we know
1351 * this packet was sent as part of the pipeline clear process.
1353 endpoint_id = u8_get_bits(status->endp_src_idx,
1354 IPA_STATUS_SRC_IDX_FMASK);
1355 command_endpoint = ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX];
1356 if (endpoint_id == command_endpoint->endpoint_id) {
1357 complete(&ipa->completion);
1359 dev_err(&ipa->pdev->dev,
1360 "unexpected tagged packet from endpoint %u\n",
1367 /* Return whether the status indicates the packet should be dropped */
1368 static bool ipa_endpoint_status_drop(struct ipa_endpoint *endpoint,
1369 const struct ipa_status *status)
1373 /* If the status indicates a tagged transfer, we'll drop the packet */
1374 if (ipa_endpoint_status_tag(endpoint, status))
1377 /* Deaggregation exceptions we drop; all other types we consume */
1378 if (status->exception)
1379 return status->exception == IPA_STATUS_EXCEPTION_DEAGGR;
1381 /* Drop the packet if it fails to match a routing rule; otherwise no */
1382 val = le32_get_bits(status->flags1, IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1384 return val == field_max(IPA_STATUS_FLAGS1_RT_RULE_ID_FMASK);
1387 static void ipa_endpoint_status_parse(struct ipa_endpoint *endpoint,
1388 struct page *page, u32 total_len)
1390 u32 buffer_size = endpoint->config.rx.buffer_size;
1391 void *data = page_address(page) + NET_SKB_PAD;
1392 u32 unused = buffer_size - total_len;
1393 u32 resid = total_len;
1396 const struct ipa_status *status = data;
1400 if (resid < sizeof(*status)) {
1401 dev_err(&endpoint->ipa->pdev->dev,
1402 "short message (%u bytes < %zu byte status)\n",
1403 resid, sizeof(*status));
1407 /* Skip over status packets that lack packet data */
1408 if (ipa_endpoint_status_skip(endpoint, status)) {
1409 data += sizeof(*status);
1410 resid -= sizeof(*status);
1414 /* Compute the amount of buffer space consumed by the packet,
1415 * including the status element. If the hardware is configured
1416 * to pad packet data to an aligned boundary, account for that.
1417 * And if checksum offload is enabled a trailer containing
1418 * computed checksum information will be appended.
1420 align = endpoint->config.rx.pad_align ? : 1;
1421 len = le16_to_cpu(status->pkt_len);
1422 len = sizeof(*status) + ALIGN(len, align);
1423 if (endpoint->config.checksum)
1424 len += sizeof(struct rmnet_map_dl_csum_trailer);
1426 if (!ipa_endpoint_status_drop(endpoint, status)) {
1431 /* Client receives only packet data (no status) */
1432 data2 = data + sizeof(*status);
1433 len2 = le16_to_cpu(status->pkt_len);
1435 /* Have the true size reflect the extra unused space in
1436 * the original receive buffer. Distribute the "cost"
1437 * proportionately across all aggregated packets in the
1440 extra = DIV_ROUND_CLOSEST(unused * len, total_len);
1441 ipa_endpoint_skb_copy(endpoint, data2, len2, extra);
1444 /* Consume status and the full packet it describes */
1450 void ipa_endpoint_trans_complete(struct ipa_endpoint *endpoint,
1451 struct gsi_trans *trans)
1455 if (endpoint->toward_ipa)
1458 if (trans->cancelled)
1461 /* Parse or build a socket buffer using the actual received length */
1463 if (endpoint->config.status_enable)
1464 ipa_endpoint_status_parse(endpoint, page, trans->len);
1465 else if (ipa_endpoint_skb_build(endpoint, page, trans->len))
1466 trans->data = NULL; /* Pages have been consumed */
1468 ipa_endpoint_replenish(endpoint);
1471 void ipa_endpoint_trans_release(struct ipa_endpoint *endpoint,
1472 struct gsi_trans *trans)
1474 if (endpoint->toward_ipa) {
1475 struct ipa *ipa = endpoint->ipa;
1477 /* Nothing to do for command transactions */
1478 if (endpoint != ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]) {
1479 struct sk_buff *skb = trans->data;
1482 dev_kfree_skb_any(skb);
1485 struct page *page = trans->data;
1492 void ipa_endpoint_default_route_set(struct ipa *ipa, u32 endpoint_id)
1494 const struct ipa_reg *reg;
1497 reg = ipa_reg(ipa, ROUTE);
1498 /* ROUTE_DIS is 0 */
1499 val = ipa_reg_encode(reg, ROUTE_DEF_PIPE, endpoint_id);
1500 val |= ipa_reg_bit(reg, ROUTE_DEF_HDR_TABLE);
1501 /* ROUTE_DEF_HDR_OFST is 0 */
1502 val |= ipa_reg_encode(reg, ROUTE_FRAG_DEF_PIPE, endpoint_id);
1503 val |= ipa_reg_bit(reg, ROUTE_DEF_RETAIN_HDR);
1505 iowrite32(val, ipa->reg_virt + ipa_reg_offset(reg));
1508 void ipa_endpoint_default_route_clear(struct ipa *ipa)
1510 ipa_endpoint_default_route_set(ipa, 0);
1514 * ipa_endpoint_reset_rx_aggr() - Reset RX endpoint with aggregation active
1515 * @endpoint: Endpoint to be reset
1517 * If aggregation is active on an RX endpoint when a reset is performed
1518 * on its underlying GSI channel, a special sequence of actions must be
1519 * taken to ensure the IPA pipeline is properly cleared.
1521 * Return: 0 if successful, or a negative error code
1523 static int ipa_endpoint_reset_rx_aggr(struct ipa_endpoint *endpoint)
1525 struct device *dev = &endpoint->ipa->pdev->dev;
1526 struct ipa *ipa = endpoint->ipa;
1527 struct gsi *gsi = &ipa->gsi;
1528 bool suspended = false;
1535 virt = kzalloc(len, GFP_KERNEL);
1539 addr = dma_map_single(dev, virt, len, DMA_FROM_DEVICE);
1540 if (dma_mapping_error(dev, addr)) {
1545 /* Force close aggregation before issuing the reset */
1546 ipa_endpoint_force_close(endpoint);
1548 /* Reset and reconfigure the channel with the doorbell engine
1549 * disabled. Then poll until we know aggregation is no longer
1550 * active. We'll re-enable the doorbell (if appropriate) when
1551 * we reset again below.
1553 gsi_channel_reset(gsi, endpoint->channel_id, false);
1555 /* Make sure the channel isn't suspended */
1556 suspended = ipa_endpoint_program_suspend(endpoint, false);
1558 /* Start channel and do a 1 byte read */
1559 ret = gsi_channel_start(gsi, endpoint->channel_id);
1561 goto out_suspend_again;
1563 ret = gsi_trans_read_byte(gsi, endpoint->channel_id, addr);
1565 goto err_endpoint_stop;
1567 /* Wait for aggregation to be closed on the channel */
1568 retries = IPA_ENDPOINT_RESET_AGGR_RETRY_MAX;
1570 if (!ipa_endpoint_aggr_active(endpoint))
1572 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1573 } while (retries--);
1575 /* Check one last time */
1576 if (ipa_endpoint_aggr_active(endpoint))
1577 dev_err(dev, "endpoint %u still active during reset\n",
1578 endpoint->endpoint_id);
1580 gsi_trans_read_byte_done(gsi, endpoint->channel_id);
1582 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1584 goto out_suspend_again;
1586 /* Finally, reset and reconfigure the channel again (re-enabling
1587 * the doorbell engine if appropriate). Sleep for 1 millisecond to
1588 * complete the channel reset sequence. Finish by suspending the
1589 * channel again (if necessary).
1591 gsi_channel_reset(gsi, endpoint->channel_id, true);
1593 usleep_range(USEC_PER_MSEC, 2 * USEC_PER_MSEC);
1595 goto out_suspend_again;
1598 (void)gsi_channel_stop(gsi, endpoint->channel_id);
1601 (void)ipa_endpoint_program_suspend(endpoint, true);
1602 dma_unmap_single(dev, addr, len, DMA_FROM_DEVICE);
1609 static void ipa_endpoint_reset(struct ipa_endpoint *endpoint)
1611 u32 channel_id = endpoint->channel_id;
1612 struct ipa *ipa = endpoint->ipa;
1616 /* On IPA v3.5.1, if an RX endpoint is reset while aggregation
1617 * is active, we need to handle things specially to recover.
1618 * All other cases just need to reset the underlying GSI channel.
1620 special = ipa->version < IPA_VERSION_4_0 && !endpoint->toward_ipa &&
1621 endpoint->config.aggregation;
1622 if (special && ipa_endpoint_aggr_active(endpoint))
1623 ret = ipa_endpoint_reset_rx_aggr(endpoint);
1625 gsi_channel_reset(&ipa->gsi, channel_id, true);
1628 dev_err(&ipa->pdev->dev,
1629 "error %d resetting channel %u for endpoint %u\n",
1630 ret, endpoint->channel_id, endpoint->endpoint_id);
1633 static void ipa_endpoint_program(struct ipa_endpoint *endpoint)
1635 if (endpoint->toward_ipa) {
1636 /* Newer versions of IPA use GSI channel flow control
1637 * instead of endpoint DELAY mode to prevent sending data.
1638 * Flow control is disabled for newly-allocated channels,
1639 * and we can assume flow control is not (ever) enabled
1640 * for AP TX channels.
1642 if (endpoint->ipa->version < IPA_VERSION_4_2)
1643 ipa_endpoint_program_delay(endpoint, false);
1645 /* Ensure suspend mode is off on all AP RX endpoints */
1646 (void)ipa_endpoint_program_suspend(endpoint, false);
1648 ipa_endpoint_init_cfg(endpoint);
1649 ipa_endpoint_init_nat(endpoint);
1650 ipa_endpoint_init_hdr(endpoint);
1651 ipa_endpoint_init_hdr_ext(endpoint);
1652 ipa_endpoint_init_hdr_metadata_mask(endpoint);
1653 ipa_endpoint_init_mode(endpoint);
1654 ipa_endpoint_init_aggr(endpoint);
1655 if (!endpoint->toward_ipa) {
1656 if (endpoint->config.rx.holb_drop)
1657 ipa_endpoint_init_hol_block_enable(endpoint, 0);
1659 ipa_endpoint_init_hol_block_disable(endpoint);
1661 ipa_endpoint_init_deaggr(endpoint);
1662 ipa_endpoint_init_rsrc_grp(endpoint);
1663 ipa_endpoint_init_seq(endpoint);
1664 ipa_endpoint_status(endpoint);
1667 int ipa_endpoint_enable_one(struct ipa_endpoint *endpoint)
1669 u32 endpoint_id = endpoint->endpoint_id;
1670 struct ipa *ipa = endpoint->ipa;
1671 struct gsi *gsi = &ipa->gsi;
1674 ret = gsi_channel_start(gsi, endpoint->channel_id);
1676 dev_err(&ipa->pdev->dev,
1677 "error %d starting %cX channel %u for endpoint %u\n",
1678 ret, endpoint->toward_ipa ? 'T' : 'R',
1679 endpoint->channel_id, endpoint_id);
1683 if (!endpoint->toward_ipa) {
1684 ipa_interrupt_suspend_enable(ipa->interrupt, endpoint_id);
1685 ipa_endpoint_replenish_enable(endpoint);
1688 __set_bit(endpoint_id, ipa->enabled);
1693 void ipa_endpoint_disable_one(struct ipa_endpoint *endpoint)
1695 u32 endpoint_id = endpoint->endpoint_id;
1696 struct ipa *ipa = endpoint->ipa;
1697 struct gsi *gsi = &ipa->gsi;
1700 if (!test_bit(endpoint_id, ipa->enabled))
1703 __clear_bit(endpoint_id, endpoint->ipa->enabled);
1705 if (!endpoint->toward_ipa) {
1706 ipa_endpoint_replenish_disable(endpoint);
1707 ipa_interrupt_suspend_disable(ipa->interrupt, endpoint_id);
1710 /* Note that if stop fails, the channel's state is not well-defined */
1711 ret = gsi_channel_stop(gsi, endpoint->channel_id);
1713 dev_err(&ipa->pdev->dev,
1714 "error %d attempting to stop endpoint %u\n", ret,
1718 void ipa_endpoint_suspend_one(struct ipa_endpoint *endpoint)
1720 struct device *dev = &endpoint->ipa->pdev->dev;
1721 struct gsi *gsi = &endpoint->ipa->gsi;
1724 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1727 if (!endpoint->toward_ipa) {
1728 ipa_endpoint_replenish_disable(endpoint);
1729 (void)ipa_endpoint_program_suspend(endpoint, true);
1732 ret = gsi_channel_suspend(gsi, endpoint->channel_id);
1734 dev_err(dev, "error %d suspending channel %u\n", ret,
1735 endpoint->channel_id);
1738 void ipa_endpoint_resume_one(struct ipa_endpoint *endpoint)
1740 struct device *dev = &endpoint->ipa->pdev->dev;
1741 struct gsi *gsi = &endpoint->ipa->gsi;
1744 if (!test_bit(endpoint->endpoint_id, endpoint->ipa->enabled))
1747 if (!endpoint->toward_ipa)
1748 (void)ipa_endpoint_program_suspend(endpoint, false);
1750 ret = gsi_channel_resume(gsi, endpoint->channel_id);
1752 dev_err(dev, "error %d resuming channel %u\n", ret,
1753 endpoint->channel_id);
1754 else if (!endpoint->toward_ipa)
1755 ipa_endpoint_replenish_enable(endpoint);
1758 void ipa_endpoint_suspend(struct ipa *ipa)
1760 if (!ipa->setup_complete)
1763 if (ipa->modem_netdev)
1764 ipa_modem_suspend(ipa->modem_netdev);
1766 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1767 ipa_endpoint_suspend_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1770 void ipa_endpoint_resume(struct ipa *ipa)
1772 if (!ipa->setup_complete)
1775 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_COMMAND_TX]);
1776 ipa_endpoint_resume_one(ipa->name_map[IPA_ENDPOINT_AP_LAN_RX]);
1778 if (ipa->modem_netdev)
1779 ipa_modem_resume(ipa->modem_netdev);
1782 static void ipa_endpoint_setup_one(struct ipa_endpoint *endpoint)
1784 struct gsi *gsi = &endpoint->ipa->gsi;
1785 u32 channel_id = endpoint->channel_id;
1787 /* Only AP endpoints get set up */
1788 if (endpoint->ee_id != GSI_EE_AP)
1791 endpoint->skb_frag_max = gsi->channel[channel_id].trans_tre_max - 1;
1792 if (!endpoint->toward_ipa) {
1793 /* RX transactions require a single TRE, so the maximum
1794 * backlog is the same as the maximum outstanding TREs.
1796 clear_bit(IPA_REPLENISH_ENABLED, endpoint->replenish_flags);
1797 clear_bit(IPA_REPLENISH_ACTIVE, endpoint->replenish_flags);
1798 INIT_DELAYED_WORK(&endpoint->replenish_work,
1799 ipa_endpoint_replenish_work);
1802 ipa_endpoint_program(endpoint);
1804 __set_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1807 static void ipa_endpoint_teardown_one(struct ipa_endpoint *endpoint)
1809 __clear_bit(endpoint->endpoint_id, endpoint->ipa->set_up);
1811 if (!endpoint->toward_ipa)
1812 cancel_delayed_work_sync(&endpoint->replenish_work);
1814 ipa_endpoint_reset(endpoint);
1817 void ipa_endpoint_setup(struct ipa *ipa)
1821 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1822 ipa_endpoint_setup_one(&ipa->endpoint[endpoint_id]);
1825 void ipa_endpoint_teardown(struct ipa *ipa)
1829 for_each_set_bit(endpoint_id, ipa->set_up, ipa->endpoint_count)
1830 ipa_endpoint_teardown_one(&ipa->endpoint[endpoint_id]);
1833 void ipa_endpoint_deconfig(struct ipa *ipa)
1835 ipa->available_count = 0;
1836 bitmap_free(ipa->available);
1837 ipa->available = NULL;
1840 int ipa_endpoint_config(struct ipa *ipa)
1842 struct device *dev = &ipa->pdev->dev;
1843 const struct ipa_reg *reg;
1851 /* Prior to IPA v3.5, the FLAVOR_0 register was not supported.
1852 * Furthermore, the endpoints were not grouped such that TX
1853 * endpoint numbers started with 0 and RX endpoints had numbers
1854 * higher than all TX endpoints, so we can't do the simple
1855 * direction check used for newer hardware below.
1857 * For hardware that doesn't support the FLAVOR_0 register,
1858 * just set the available mask to support any endpoint, and
1859 * assume the configuration is valid.
1861 if (ipa->version < IPA_VERSION_3_5) {
1862 ipa->available = bitmap_zalloc(IPA_ENDPOINT_MAX, GFP_KERNEL);
1863 if (!ipa->available)
1865 ipa->available_count = IPA_ENDPOINT_MAX;
1867 bitmap_set(ipa->available, 0, IPA_ENDPOINT_MAX);
1872 /* Find out about the endpoints supplied by the hardware, and ensure
1873 * the highest one doesn't exceed the number supported by software.
1875 reg = ipa_reg(ipa, FLAVOR_0);
1876 val = ioread32(ipa->reg_virt + ipa_reg_offset(reg));
1878 /* Our RX is an IPA producer; our TX is an IPA consumer. */
1879 tx_count = ipa_reg_decode(reg, MAX_CONS_PIPES, val);
1880 rx_count = ipa_reg_decode(reg, MAX_PROD_PIPES, val);
1881 rx_base = ipa_reg_decode(reg, PROD_LOWEST, val);
1883 limit = rx_base + rx_count;
1884 if (limit > IPA_ENDPOINT_MAX) {
1885 dev_err(dev, "too many endpoints, %u > %u\n",
1886 limit, IPA_ENDPOINT_MAX);
1890 /* Allocate and initialize the available endpoint bitmap */
1891 ipa->available = bitmap_zalloc(limit, GFP_KERNEL);
1892 if (!ipa->available)
1894 ipa->available_count = limit;
1896 /* Mark all supported RX and TX endpoints as available */
1897 bitmap_set(ipa->available, 0, tx_count);
1898 bitmap_set(ipa->available, rx_base, rx_count);
1900 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count) {
1901 struct ipa_endpoint *endpoint;
1903 if (endpoint_id >= limit) {
1904 dev_err(dev, "invalid endpoint id, %u > %u\n",
1905 endpoint_id, limit - 1);
1906 goto err_free_bitmap;
1909 if (!test_bit(endpoint_id, ipa->available)) {
1910 dev_err(dev, "unavailable endpoint id %u\n",
1912 goto err_free_bitmap;
1915 /* Make sure it's pointing in the right direction */
1916 endpoint = &ipa->endpoint[endpoint_id];
1917 if (endpoint->toward_ipa) {
1918 if (endpoint_id < tx_count)
1920 } else if (endpoint_id >= rx_base) {
1924 dev_err(dev, "endpoint id %u wrong direction\n", endpoint_id);
1925 goto err_free_bitmap;
1931 ipa_endpoint_deconfig(ipa);
1936 static void ipa_endpoint_init_one(struct ipa *ipa, enum ipa_endpoint_name name,
1937 const struct ipa_gsi_endpoint_data *data)
1939 struct ipa_endpoint *endpoint;
1941 endpoint = &ipa->endpoint[data->endpoint_id];
1943 if (data->ee_id == GSI_EE_AP)
1944 ipa->channel_map[data->channel_id] = endpoint;
1945 ipa->name_map[name] = endpoint;
1947 endpoint->ipa = ipa;
1948 endpoint->ee_id = data->ee_id;
1949 endpoint->channel_id = data->channel_id;
1950 endpoint->endpoint_id = data->endpoint_id;
1951 endpoint->toward_ipa = data->toward_ipa;
1952 endpoint->config = data->endpoint.config;
1954 __set_bit(endpoint->endpoint_id, ipa->defined);
1957 static void ipa_endpoint_exit_one(struct ipa_endpoint *endpoint)
1959 __clear_bit(endpoint->endpoint_id, endpoint->ipa->defined);
1961 memset(endpoint, 0, sizeof(*endpoint));
1964 void ipa_endpoint_exit(struct ipa *ipa)
1970 for_each_set_bit(endpoint_id, ipa->defined, ipa->endpoint_count)
1971 ipa_endpoint_exit_one(&ipa->endpoint[endpoint_id]);
1973 bitmap_free(ipa->enabled);
1974 ipa->enabled = NULL;
1975 bitmap_free(ipa->set_up);
1977 bitmap_free(ipa->defined);
1978 ipa->defined = NULL;
1980 memset(ipa->name_map, 0, sizeof(ipa->name_map));
1981 memset(ipa->channel_map, 0, sizeof(ipa->channel_map));
1984 /* Returns a bitmask of endpoints that support filtering, or 0 on error */
1985 int ipa_endpoint_init(struct ipa *ipa, u32 count,
1986 const struct ipa_gsi_endpoint_data *data)
1988 enum ipa_endpoint_name name;
1991 BUILD_BUG_ON(!IPA_REPLENISH_BATCH);
1993 /* Number of endpoints is one more than the maximum ID */
1994 ipa->endpoint_count = ipa_endpoint_max(ipa, count, data) + 1;
1995 if (!ipa->endpoint_count)
1998 /* Initialize endpoint state bitmaps */
1999 ipa->defined = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2003 ipa->set_up = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2005 goto err_free_defined;
2007 ipa->enabled = bitmap_zalloc(ipa->endpoint_count, GFP_KERNEL);
2009 goto err_free_set_up;
2012 for (name = 0; name < count; name++, data++) {
2013 if (ipa_gsi_endpoint_data_empty(data))
2014 continue; /* Skip over empty slots */
2016 ipa_endpoint_init_one(ipa, name, data);
2018 if (data->endpoint.filter_support)
2019 filtered |= BIT(data->endpoint_id);
2020 if (data->ee_id == GSI_EE_MODEM && data->toward_ipa)
2021 ipa->modem_tx_count++;
2024 /* Make sure the set of filtered endpoints is valid */
2025 if (!ipa_filtered_valid(ipa, filtered)) {
2026 ipa_endpoint_exit(ipa);
2031 ipa->filtered = filtered;
2036 bitmap_free(ipa->set_up);
2039 bitmap_free(ipa->defined);
2040 ipa->defined = NULL;