1 /* SPDX-License-Identifier: GPL-2.0 */
2 /* Copyright (c) 2019 HiSilicon Limited. */
4 #ifndef __HISI_SEC_V2_H
5 #define __HISI_SEC_V2_H
8 #include "sec_crypto.h"
10 /* Algorithm resource per hardware SEC queue */
15 dma_addr_t c_ivin_dma;
17 dma_addr_t out_mac_dma;
20 /* Cipher request of SEC private */
21 struct sec_cipher_req {
22 struct hisi_acc_hw_sgl *c_in;
24 struct hisi_acc_hw_sgl *c_out;
27 dma_addr_t c_ivin_dma;
28 struct skcipher_request *sk_req;
35 dma_addr_t out_mac_dma;
36 struct aead_request *aead_req;
39 /* SEC request of Crypto */
41 struct sec_sqe sec_sqe;
43 struct sec_qp_ctx *qp_ctx;
45 struct sec_cipher_req c_req;
46 struct sec_aead_req aead_req;
47 struct list_head backlog_head;
53 /* Status of the SEC request */
59 * struct sec_req_op - Operations for SEC request
60 * @buf_map: DMA map the SGL buffers of the request
61 * @buf_unmap: DMA unmap the SGL buffers of the request
62 * @bd_fill: Fill the SEC queue BD
63 * @bd_send: Send the SEC BD into the hardware queue
64 * @callback: Call back for the request
65 * @process: Main processing logic of Skcipher
68 int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req);
69 void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req);
70 void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req);
71 int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req);
72 int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req);
73 void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err);
74 int (*process)(struct sec_ctx *ctx, struct sec_req *req);
77 /* SEC auth context */
84 struct crypto_shash *hash_tfm;
87 /* SEC cipher context which cipher's relatives */
88 struct sec_cipher_ctx {
99 /* SEC queue context which defines queue's relatives */
102 struct sec_req *req_list[QM_Q_DEPTH];
104 struct sec_alg_res res[QM_Q_DEPTH];
106 struct mutex req_lock;
107 struct list_head backlog;
108 struct hisi_acc_sgl_pool *c_in_pool;
109 struct hisi_acc_sgl_pool *c_out_pool;
117 /* SEC Crypto TFM context which defines queue and cipher .etc relatives */
119 struct sec_qp_ctx *qp_ctx;
121 const struct sec_req_op *req_op;
122 struct hisi_qp **qps;
124 /* Half queues for encipher, and half for decipher */
127 /* Threshold for fake busy, trigger to return -EBUSY to user */
130 /* Currrent cyclic index to select a queue for encipher */
131 atomic_t enc_qcyclic;
133 /* Currrent cyclic index to select a queue for decipher */
134 atomic_t dec_qcyclic;
136 enum sec_alg_type alg_type;
138 struct sec_cipher_ctx c_ctx;
139 struct sec_auth_ctx a_ctx;
149 enum sec_debug_file_index {
154 struct sec_debug_file {
155 enum sec_debug_file_index index;
163 atomic64_t send_busy_cnt;
164 atomic64_t recv_busy_cnt;
165 atomic64_t err_bd_cnt;
166 atomic64_t invalid_req_cnt;
167 atomic64_t done_flag_cnt;
172 struct sec_debug_file files[SEC_DEBUG_FILE_NUM];
177 struct sec_debug debug;
182 void sec_destroy_qps(struct hisi_qp **qps, int qp_num);
183 struct hisi_qp **sec_create_qps(void);
184 int sec_register_to_crypto(struct hisi_qm *qm);
185 void sec_unregister_from_crypto(struct hisi_qm *qm);