1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #ifndef __HISI_SEC_V2_H
5 #define __HISI_SEC_V2_H
8 #include "sec_crypto.h"
10 /* Algorithm resource per hardware SEC queue */
15 dma_addr_t c_ivin_dma;
17 dma_addr_t a_ivin_dma;
19 dma_addr_t out_mac_dma;
22 /* Cipher request of SEC private */
23 struct sec_cipher_req {
24 struct hisi_acc_hw_sgl *c_out;
27 dma_addr_t c_ivin_dma;
28 struct skcipher_request *sk_req;
35 dma_addr_t out_mac_dma;
37 dma_addr_t a_ivin_dma;
38 struct aead_request *aead_req;
41 /* SEC request of Crypto */
44 struct sec_sqe sec_sqe;
45 struct sec_sqe3 sec_sqe3;
48 struct sec_qp_ctx *qp_ctx;
51 * Common parameter of the SEC request.
53 struct hisi_acc_hw_sgl *in;
55 struct sec_cipher_req c_req;
56 struct sec_aead_req aead_req;
57 struct list_head backlog_head;
63 /* Status of the SEC request */
69 * struct sec_req_op - Operations for SEC request
70 * @buf_map: DMA map the SGL buffers of the request
71 * @buf_unmap: DMA unmap the SGL buffers of the request
72 * @bd_fill: Fill the SEC queue BD
73 * @bd_send: Send the SEC BD into the hardware queue
74 * @callback: Call back for the request
75 * @process: Main processing logic of Skcipher
78 int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
79 void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
80 void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
81 int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
82 int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
83 void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
84 int (*process)(struct sec_ctx *ctx, struct sec_req *req);
87 /* SEC auth context */
95 struct crypto_shash *hash_tfm;
96 struct crypto_aead *fallback_aead_tfm;
99 /* SEC cipher context which cipher's relatives */
100 struct sec_cipher_ctx {
102 dma_addr_t c_key_dma;
110 /* add software support */
112 struct crypto_sync_skcipher *fbtfm;
115 /* SEC queue context which defines queue's relatives */
118 struct sec_req *req_list[QM_Q_DEPTH];
120 struct sec_alg_res res[QM_Q_DEPTH];
122 struct mutex req_lock;
123 struct list_head backlog;
124 struct hisi_acc_sgl_pool *c_in_pool;
125 struct hisi_acc_sgl_pool *c_out_pool;
133 /* SEC Crypto TFM context which defines queue and cipher .etc relatives */
135 struct sec_qp_ctx *qp_ctx;
137 const struct sec_req_op *req_op;
138 struct hisi_qp **qps;
140 /* Half queues for encipher, and half for decipher */
143 /* Threshold for fake busy, trigger to return -EBUSY to user */
146 /* Currrent cyclic index to select a queue for encipher */
147 atomic_t enc_qcyclic;
149 /* Currrent cyclic index to select a queue for decipher */
150 atomic_t dec_qcyclic;
152 enum sec_alg_type alg_type;
154 struct sec_cipher_ctx c_ctx;
155 struct sec_auth_ctx a_ctx;
161 enum sec_debug_file_index {
166 struct sec_debug_file {
167 enum sec_debug_file_index index;
175 atomic64_t send_busy_cnt;
176 atomic64_t recv_busy_cnt;
177 atomic64_t err_bd_cnt;
178 atomic64_t invalid_req_cnt;
179 atomic64_t done_flag_cnt;
184 struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
189 struct sec_debug debug;
194 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
195 struct hisi_qp **sec_create_qps(void);
196 int sec_register_to_crypto(struct hisi_qm *qm);
197 void sec_unregister_from_crypto(struct hisi_qm *qm);