struct cpucp_packet *pkt;
        dma_addr_t pkt_dma_addr;
        struct hl_bd *sent_bd;
-       u32 tmp, expected_ack_val, pi;
+       u32 tmp, expected_ack_val, pi, opcode;
        int rc;
 
        pkt = hl_cpu_accessible_dma_pool_alloc(hdev, len, &pkt_dma_addr);
 
        rc = (tmp & CPUCP_PKT_CTL_RC_MASK) >> CPUCP_PKT_CTL_RC_SHIFT;
        if (rc) {
-               dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n",
-                       rc, (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT);
+               opcode = (tmp & CPUCP_PKT_CTL_OPCODE_MASK) >> CPUCP_PKT_CTL_OPCODE_SHIFT;
+
+               if (!prop->supports_advanced_cpucp_rc) {
+                       dev_dbg(hdev->dev, "F/W ERROR %d for CPU packet %d\n", rc, opcode);
+                       goto scrub_descriptor;
+               }
+
+               switch (rc) {
+               case cpucp_packet_invalid:
+                       dev_err(hdev->dev,
+                               "CPU packet %d is not supported by F/W\n", opcode);
+                       break;
+               case cpucp_packet_fault:
+                       dev_err(hdev->dev,
+                               "F/W failed processing CPU packet %d\n", opcode);
+                       break;
+               case cpucp_packet_invalid_pkt:
+                       dev_dbg(hdev->dev,
+                               "CPU packet %d is not supported by F/W\n", opcode);
+                       break;
+               case cpucp_packet_invalid_params:
+                       dev_err(hdev->dev,
+                               "F/W reports invalid parameters for CPU packet %d\n", opcode);
+                       break;
+
+               default:
+                       dev_err(hdev->dev,
+                               "Unknown F/W ERROR %d for CPU packet %d\n", rc, opcode);
+               }
 
                /* propagate the return code from the f/w to the callers who want to check it */
                if (result)
                *result = le64_to_cpu(pkt->result);
        }
 
+scrub_descriptor:
        /* Scrub previous buffer descriptor 'ctl' field which contains the
         * previous PI value written during packet submission.
         * We must do this or else F/W can read an old value upon queue wraparound.
 
  * @set_max_power_on_device_init: true if need to set max power in F/W on device init.
  * @supports_user_set_page_size: true if user can set the allocation page size.
  * @dma_mask: the dma mask to be set for this device
+ * @supports_advanced_cpucp_rc: true if new cpucp opcodes are supported.
  */
 struct asic_fixed_properties {
        struct hw_queue_properties      *hw_queues_props;
        u8                              set_max_power_on_device_init;
        u8                              supports_user_set_page_size;
        u8                              dma_mask;
+       u8                              supports_advanced_cpucp_rc;
 };
 
 /**
 
        CPUCP_LED2_INDEX
 };
 
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success       -> in case of success.
+ * @cpucp_packet_invalid       -> this is to support Goya and Gaudi platform.
+ * @cpucp_packet_fault         -> in case of processing error like failing to
+ *                                 get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt   -> when cpucp packet is un-supported. This is
+ *                                 supported Greco onwards.
+ * @cpucp_packet_invalid_params        -> when checking parameter like length of buffer
+ *                                or attribute value etc. Supported Greco onwards.
+ * @cpucp_packet_rc_max                -> It indicates size of enum so should be at last.
+ */
 enum cpucp_packet_rc {
        cpucp_packet_success,
        cpucp_packet_invalid,
-       cpucp_packet_fault
+       cpucp_packet_fault,
+       cpucp_packet_invalid_pkt,
+       cpucp_packet_invalid_params,
+       cpucp_packet_rc_max
 };
 
 /*