@@ -189,38 +189,53 @@ static bool hclge_is_special_opcode(u16 opcode)
189189 return false;
190190}
191191
192- static int hclge_cmd_convert_err_code (u16 desc_ret )
192+ struct errcode {
193+ u32 imp_errcode ;
194+ int common_errno ;
195+ };
196+
197+ static void hclge_cmd_copy_desc (struct hclge_hw * hw , struct hclge_desc * desc ,
198+ int num )
193199{
194- switch (desc_ret ) {
195- case HCLGE_CMD_EXEC_SUCCESS :
196- return 0 ;
197- case HCLGE_CMD_NO_AUTH :
198- return - EPERM ;
199- case HCLGE_CMD_NOT_SUPPORTED :
200- return - EOPNOTSUPP ;
201- case HCLGE_CMD_QUEUE_FULL :
202- return - EXFULL ;
203- case HCLGE_CMD_NEXT_ERR :
204- return - ENOSR ;
205- case HCLGE_CMD_UNEXE_ERR :
206- return - ENOTBLK ;
207- case HCLGE_CMD_PARA_ERR :
208- return - EINVAL ;
209- case HCLGE_CMD_RESULT_ERR :
210- return - ERANGE ;
211- case HCLGE_CMD_TIMEOUT :
212- return - ETIME ;
213- case HCLGE_CMD_HILINK_ERR :
214- return - ENOLINK ;
215- case HCLGE_CMD_QUEUE_ILLEGAL :
216- return - ENXIO ;
217- case HCLGE_CMD_INVALID :
218- return - EBADR ;
219- default :
220- return - EIO ;
200+ struct hclge_desc * desc_to_use ;
201+ int handle = 0 ;
202+
203+ while (handle < num ) {
204+ desc_to_use = & hw -> cmq .csq .desc [hw -> cmq .csq .next_to_use ];
205+ * desc_to_use = desc [handle ];
206+ (hw -> cmq .csq .next_to_use )++ ;
207+ if (hw -> cmq .csq .next_to_use >= hw -> cmq .csq .desc_num )
208+ hw -> cmq .csq .next_to_use = 0 ;
209+ handle ++ ;
221210 }
222211}
223212
213+ static int hclge_cmd_convert_err_code (u16 desc_ret )
214+ {
215+ struct errcode hclge_cmd_errcode [] = {
216+ {HCLGE_CMD_EXEC_SUCCESS , 0 },
217+ {HCLGE_CMD_NO_AUTH , - EPERM },
218+ {HCLGE_CMD_NOT_SUPPORTED , - EOPNOTSUPP },
219+ {HCLGE_CMD_QUEUE_FULL , - EXFULL },
220+ {HCLGE_CMD_NEXT_ERR , - ENOSR },
221+ {HCLGE_CMD_UNEXE_ERR , - ENOTBLK },
222+ {HCLGE_CMD_PARA_ERR , - EINVAL },
223+ {HCLGE_CMD_RESULT_ERR , - ERANGE },
224+ {HCLGE_CMD_TIMEOUT , - ETIME },
225+ {HCLGE_CMD_HILINK_ERR , - ENOLINK },
226+ {HCLGE_CMD_QUEUE_ILLEGAL , - ENXIO },
227+ {HCLGE_CMD_INVALID , - EBADR },
228+ };
229+ u32 errcode_count = ARRAY_SIZE (hclge_cmd_errcode );
230+ u32 i ;
231+
232+ for (i = 0 ; i < errcode_count ; i ++ )
233+ if (hclge_cmd_errcode [i ].imp_errcode == desc_ret )
234+ return hclge_cmd_errcode [i ].common_errno ;
235+
236+ return - EIO ;
237+ }
238+
224239static int hclge_cmd_check_retval (struct hclge_hw * hw , struct hclge_desc * desc ,
225240 int num , int ntc )
226241{
@@ -244,6 +259,44 @@ static int hclge_cmd_check_retval(struct hclge_hw *hw, struct hclge_desc *desc,
244259 return hclge_cmd_convert_err_code (desc_ret );
245260}
246261
262+ static int hclge_cmd_check_result (struct hclge_hw * hw , struct hclge_desc * desc ,
263+ int num , int ntc )
264+ {
265+ struct hclge_dev * hdev = container_of (hw , struct hclge_dev , hw );
266+ bool is_completed = false;
267+ u32 timeout = 0 ;
268+ int handle , ret ;
269+
270+ /**
271+ * If the command is sync, wait for the firmware to write back,
272+ * if multi descriptors to be sent, use the first one to check
273+ */
274+ if (HCLGE_SEND_SYNC (le16_to_cpu (desc -> flag ))) {
275+ do {
276+ if (hclge_cmd_csq_done (hw )) {
277+ is_completed = true;
278+ break ;
279+ }
280+ udelay (1 );
281+ timeout ++ ;
282+ } while (timeout < hw -> cmq .tx_timeout );
283+ }
284+
285+ if (!is_completed )
286+ ret = - EBADE ;
287+ else
288+ ret = hclge_cmd_check_retval (hw , desc , num , ntc );
289+
290+ /* Clean the command send queue */
291+ handle = hclge_cmd_csq_clean (hw );
292+ if (handle < 0 )
293+ ret = handle ;
294+ else if (handle != num )
295+ dev_warn (& hdev -> pdev -> dev ,
296+ "cleaned %d, need to clean %d\n" , handle , num );
297+ return ret ;
298+ }
299+
247300/**
248301 * hclge_cmd_send - send command to command queue
249302 * @hw: pointer to the hw struct
@@ -257,11 +310,7 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
257310{
258311 struct hclge_dev * hdev = container_of (hw , struct hclge_dev , hw );
259312 struct hclge_cmq_ring * csq = & hw -> cmq .csq ;
260- struct hclge_desc * desc_to_use ;
261- bool complete = false;
262- u32 timeout = 0 ;
263- int handle = 0 ;
264- int retval ;
313+ int ret ;
265314 int ntc ;
266315
267316 spin_lock_bh (& hw -> cmq .csq .lock );
@@ -285,49 +334,17 @@ int hclge_cmd_send(struct hclge_hw *hw, struct hclge_desc *desc, int num)
285334 * which will be use for hardware to write back
286335 */
287336 ntc = hw -> cmq .csq .next_to_use ;
288- while (handle < num ) {
289- desc_to_use = & hw -> cmq .csq .desc [hw -> cmq .csq .next_to_use ];
290- * desc_to_use = desc [handle ];
291- (hw -> cmq .csq .next_to_use )++ ;
292- if (hw -> cmq .csq .next_to_use >= hw -> cmq .csq .desc_num )
293- hw -> cmq .csq .next_to_use = 0 ;
294- handle ++ ;
295- }
337+
338+ hclge_cmd_copy_desc (hw , desc , num );
296339
297340 /* Write to hardware */
298341 hclge_write_dev (hw , HCLGE_NIC_CSQ_TAIL_REG , hw -> cmq .csq .next_to_use );
299342
300- /**
301- * If the command is sync, wait for the firmware to write back,
302- * if multi descriptors to be sent, use the first one to check
303- */
304- if (HCLGE_SEND_SYNC (le16_to_cpu (desc -> flag ))) {
305- do {
306- if (hclge_cmd_csq_done (hw )) {
307- complete = true;
308- break ;
309- }
310- udelay (1 );
311- timeout ++ ;
312- } while (timeout < hw -> cmq .tx_timeout );
313- }
314-
315- if (!complete )
316- retval = - EBADE ;
317- else
318- retval = hclge_cmd_check_retval (hw , desc , num , ntc );
319-
320- /* Clean the command send queue */
321- handle = hclge_cmd_csq_clean (hw );
322- if (handle < 0 )
323- retval = handle ;
324- else if (handle != num )
325- dev_warn (& hdev -> pdev -> dev ,
326- "cleaned %d, need to clean %d\n" , handle , num );
343+ ret = hclge_cmd_check_result (hw , desc , num , ntc );
327344
328345 spin_unlock_bh (& hw -> cmq .csq .lock );
329346
330- return retval ;
347+ return ret ;
331348}
332349
333350static void hclge_set_default_capability (struct hclge_dev * hdev )
0 commit comments